Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[platform/kernel/linux-rpi.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/sched/autogroup.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/coredump.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/numa_balancing.h>
20 #include <linux/sched/stat.h>
21 #include <linux/sched/task.h>
22 #include <linux/sched/task_stack.h>
23 #include <linux/sched/cputime.h>
24 #include <linux/rtmutex.h>
25 #include <linux/init.h>
26 #include <linux/unistd.h>
27 #include <linux/module.h>
28 #include <linux/vmalloc.h>
29 #include <linux/completion.h>
30 #include <linux/personality.h>
31 #include <linux/mempolicy.h>
32 #include <linux/sem.h>
33 #include <linux/file.h>
34 #include <linux/fdtable.h>
35 #include <linux/iocontext.h>
36 #include <linux/key.h>
37 #include <linux/binfmts.h>
38 #include <linux/mman.h>
39 #include <linux/mmu_notifier.h>
40 #include <linux/hmm.h>
41 #include <linux/fs.h>
42 #include <linux/mm.h>
43 #include <linux/vmacache.h>
44 #include <linux/nsproxy.h>
45 #include <linux/capability.h>
46 #include <linux/cpu.h>
47 #include <linux/cgroup.h>
48 #include <linux/security.h>
49 #include <linux/hugetlb.h>
50 #include <linux/seccomp.h>
51 #include <linux/swap.h>
52 #include <linux/syscalls.h>
53 #include <linux/jiffies.h>
54 #include <linux/futex.h>
55 #include <linux/compat.h>
56 #include <linux/kthread.h>
57 #include <linux/task_io_accounting_ops.h>
58 #include <linux/rcupdate.h>
59 #include <linux/ptrace.h>
60 #include <linux/mount.h>
61 #include <linux/audit.h>
62 #include <linux/memcontrol.h>
63 #include <linux/ftrace.h>
64 #include <linux/proc_fs.h>
65 #include <linux/profile.h>
66 #include <linux/rmap.h>
67 #include <linux/ksm.h>
68 #include <linux/acct.h>
69 #include <linux/userfaultfd_k.h>
70 #include <linux/tsacct_kern.h>
71 #include <linux/cn_proc.h>
72 #include <linux/freezer.h>
73 #include <linux/delayacct.h>
74 #include <linux/taskstats_kern.h>
75 #include <linux/random.h>
76 #include <linux/tty.h>
77 #include <linux/blkdev.h>
78 #include <linux/fs_struct.h>
79 #include <linux/magic.h>
80 #include <linux/sched/mm.h>
81 #include <linux/perf_event.h>
82 #include <linux/posix-timers.h>
83 #include <linux/user-return-notifier.h>
84 #include <linux/oom.h>
85 #include <linux/khugepaged.h>
86 #include <linux/signalfd.h>
87 #include <linux/uprobes.h>
88 #include <linux/aio.h>
89 #include <linux/compiler.h>
90 #include <linux/sysctl.h>
91 #include <linux/kcov.h>
92 #include <linux/livepatch.h>
93 #include <linux/thread_info.h>
94
95 #include <asm/pgtable.h>
96 #include <asm/pgalloc.h>
97 #include <linux/uaccess.h>
98 #include <asm/mmu_context.h>
99 #include <asm/cacheflush.h>
100 #include <asm/tlbflush.h>
101
102 #include <trace/events/sched.h>
103
104 #define CREATE_TRACE_POINTS
105 #include <trace/events/task.h>
106
107 /*
108  * Minimum number of threads to boot the kernel
109  */
110 #define MIN_THREADS 20
111
112 /*
113  * Maximum number of threads
114  */
115 #define MAX_THREADS FUTEX_TID_MASK
116
117 /*
118  * Protected counters by write_lock_irq(&tasklist_lock)
119  */
120 unsigned long total_forks;      /* Handle normal Linux uptimes. */
121 int nr_threads;                 /* The idle threads do not count.. */
122
123 int max_threads;                /* tunable limit on nr_threads */
124
125 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
126
127 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
128
129 #ifdef CONFIG_PROVE_RCU
130 int lockdep_tasklist_lock_is_held(void)
131 {
132         return lockdep_is_held(&tasklist_lock);
133 }
134 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
135 #endif /* #ifdef CONFIG_PROVE_RCU */
136
137 int nr_processes(void)
138 {
139         int cpu;
140         int total = 0;
141
142         for_each_possible_cpu(cpu)
143                 total += per_cpu(process_counts, cpu);
144
145         return total;
146 }
147
148 void __weak arch_release_task_struct(struct task_struct *tsk)
149 {
150 }
151
152 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
153 static struct kmem_cache *task_struct_cachep;
154
155 static inline struct task_struct *alloc_task_struct_node(int node)
156 {
157         return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
158 }
159
160 static inline void free_task_struct(struct task_struct *tsk)
161 {
162         kmem_cache_free(task_struct_cachep, tsk);
163 }
164 #endif
165
166 void __weak arch_release_thread_stack(unsigned long *stack)
167 {
168 }
169
170 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
171
172 /*
173  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
174  * kmemcache based allocator.
175  */
176 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
177
178 #ifdef CONFIG_VMAP_STACK
179 /*
180  * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
181  * flush.  Try to minimize the number of calls by caching stacks.
182  */
183 #define NR_CACHED_STACKS 2
184 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
185
186 static int free_vm_stack_cache(unsigned int cpu)
187 {
188         struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
189         int i;
190
191         for (i = 0; i < NR_CACHED_STACKS; i++) {
192                 struct vm_struct *vm_stack = cached_vm_stacks[i];
193
194                 if (!vm_stack)
195                         continue;
196
197                 vfree(vm_stack->addr);
198                 cached_vm_stacks[i] = NULL;
199         }
200
201         return 0;
202 }
203 #endif
204
205 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
206 {
207 #ifdef CONFIG_VMAP_STACK
208         void *stack;
209         int i;
210
211         for (i = 0; i < NR_CACHED_STACKS; i++) {
212                 struct vm_struct *s;
213
214                 s = this_cpu_xchg(cached_stacks[i], NULL);
215
216                 if (!s)
217                         continue;
218
219                 /* Clear stale pointers from reused stack. */
220                 memset(s->addr, 0, THREAD_SIZE);
221
222                 tsk->stack_vm_area = s;
223                 return s->addr;
224         }
225
226         stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
227                                      VMALLOC_START, VMALLOC_END,
228                                      THREADINFO_GFP,
229                                      PAGE_KERNEL,
230                                      0, node, __builtin_return_address(0));
231
232         /*
233          * We can't call find_vm_area() in interrupt context, and
234          * free_thread_stack() can be called in interrupt context,
235          * so cache the vm_struct.
236          */
237         if (stack)
238                 tsk->stack_vm_area = find_vm_area(stack);
239         return stack;
240 #else
241         struct page *page = alloc_pages_node(node, THREADINFO_GFP,
242                                              THREAD_SIZE_ORDER);
243
244         return page ? page_address(page) : NULL;
245 #endif
246 }
247
248 static inline void free_thread_stack(struct task_struct *tsk)
249 {
250 #ifdef CONFIG_VMAP_STACK
251         if (task_stack_vm_area(tsk)) {
252                 int i;
253
254                 for (i = 0; i < NR_CACHED_STACKS; i++) {
255                         if (this_cpu_cmpxchg(cached_stacks[i],
256                                         NULL, tsk->stack_vm_area) != NULL)
257                                 continue;
258
259                         return;
260                 }
261
262                 vfree_atomic(tsk->stack);
263                 return;
264         }
265 #endif
266
267         __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
268 }
269 # else
270 static struct kmem_cache *thread_stack_cache;
271
272 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
273                                                   int node)
274 {
275         return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
276 }
277
278 static void free_thread_stack(struct task_struct *tsk)
279 {
280         kmem_cache_free(thread_stack_cache, tsk->stack);
281 }
282
283 void thread_stack_cache_init(void)
284 {
285         thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
286                                         THREAD_SIZE, THREAD_SIZE, 0, 0,
287                                         THREAD_SIZE, NULL);
288         BUG_ON(thread_stack_cache == NULL);
289 }
290 # endif
291 #endif
292
293 /* SLAB cache for signal_struct structures (tsk->signal) */
294 static struct kmem_cache *signal_cachep;
295
296 /* SLAB cache for sighand_struct structures (tsk->sighand) */
297 struct kmem_cache *sighand_cachep;
298
299 /* SLAB cache for files_struct structures (tsk->files) */
300 struct kmem_cache *files_cachep;
301
302 /* SLAB cache for fs_struct structures (tsk->fs) */
303 struct kmem_cache *fs_cachep;
304
305 /* SLAB cache for vm_area_struct structures */
306 struct kmem_cache *vm_area_cachep;
307
308 /* SLAB cache for mm_struct structures (tsk->mm) */
309 static struct kmem_cache *mm_cachep;
310
311 static void account_kernel_stack(struct task_struct *tsk, int account)
312 {
313         void *stack = task_stack_page(tsk);
314         struct vm_struct *vm = task_stack_vm_area(tsk);
315
316         BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
317
318         if (vm) {
319                 int i;
320
321                 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
322
323                 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
324                         mod_zone_page_state(page_zone(vm->pages[i]),
325                                             NR_KERNEL_STACK_KB,
326                                             PAGE_SIZE / 1024 * account);
327                 }
328
329                 /* All stack pages belong to the same memcg. */
330                 mod_memcg_page_state(vm->pages[0], MEMCG_KERNEL_STACK_KB,
331                                      account * (THREAD_SIZE / 1024));
332         } else {
333                 /*
334                  * All stack pages are in the same zone and belong to the
335                  * same memcg.
336                  */
337                 struct page *first_page = virt_to_page(stack);
338
339                 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
340                                     THREAD_SIZE / 1024 * account);
341
342                 mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
343                                      account * (THREAD_SIZE / 1024));
344         }
345 }
346
347 static void release_task_stack(struct task_struct *tsk)
348 {
349         if (WARN_ON(tsk->state != TASK_DEAD))
350                 return;  /* Better to leak the stack than to free prematurely */
351
352         account_kernel_stack(tsk, -1);
353         arch_release_thread_stack(tsk->stack);
354         free_thread_stack(tsk);
355         tsk->stack = NULL;
356 #ifdef CONFIG_VMAP_STACK
357         tsk->stack_vm_area = NULL;
358 #endif
359 }
360
361 #ifdef CONFIG_THREAD_INFO_IN_TASK
362 void put_task_stack(struct task_struct *tsk)
363 {
364         if (atomic_dec_and_test(&tsk->stack_refcount))
365                 release_task_stack(tsk);
366 }
367 #endif
368
369 void free_task(struct task_struct *tsk)
370 {
371 #ifndef CONFIG_THREAD_INFO_IN_TASK
372         /*
373          * The task is finally done with both the stack and thread_info,
374          * so free both.
375          */
376         release_task_stack(tsk);
377 #else
378         /*
379          * If the task had a separate stack allocation, it should be gone
380          * by now.
381          */
382         WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
383 #endif
384         rt_mutex_debug_task_free(tsk);
385         ftrace_graph_exit_task(tsk);
386         put_seccomp_filter(tsk);
387         arch_release_task_struct(tsk);
388         if (tsk->flags & PF_KTHREAD)
389                 free_kthread_struct(tsk);
390         free_task_struct(tsk);
391 }
392 EXPORT_SYMBOL(free_task);
393
394 #ifdef CONFIG_MMU
395 static __latent_entropy int dup_mmap(struct mm_struct *mm,
396                                         struct mm_struct *oldmm)
397 {
398         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
399         struct rb_node **rb_link, *rb_parent;
400         int retval;
401         unsigned long charge;
402         LIST_HEAD(uf);
403
404         uprobe_start_dup_mmap();
405         if (down_write_killable(&oldmm->mmap_sem)) {
406                 retval = -EINTR;
407                 goto fail_uprobe_end;
408         }
409         flush_cache_dup_mm(oldmm);
410         uprobe_dup_mmap(oldmm, mm);
411         /*
412          * Not linked in yet - no deadlock potential:
413          */
414         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
415
416         /* No ordering required: file already has been exposed. */
417         RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
418
419         mm->total_vm = oldmm->total_vm;
420         mm->data_vm = oldmm->data_vm;
421         mm->exec_vm = oldmm->exec_vm;
422         mm->stack_vm = oldmm->stack_vm;
423
424         rb_link = &mm->mm_rb.rb_node;
425         rb_parent = NULL;
426         pprev = &mm->mmap;
427         retval = ksm_fork(mm, oldmm);
428         if (retval)
429                 goto out;
430         retval = khugepaged_fork(mm, oldmm);
431         if (retval)
432                 goto out;
433
434         prev = NULL;
435         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
436                 struct file *file;
437
438                 if (mpnt->vm_flags & VM_DONTCOPY) {
439                         vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
440                         continue;
441                 }
442                 charge = 0;
443                 if (mpnt->vm_flags & VM_ACCOUNT) {
444                         unsigned long len = vma_pages(mpnt);
445
446                         if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
447                                 goto fail_nomem;
448                         charge = len;
449                 }
450                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
451                 if (!tmp)
452                         goto fail_nomem;
453                 *tmp = *mpnt;
454                 INIT_LIST_HEAD(&tmp->anon_vma_chain);
455                 retval = vma_dup_policy(mpnt, tmp);
456                 if (retval)
457                         goto fail_nomem_policy;
458                 tmp->vm_mm = mm;
459                 retval = dup_userfaultfd(tmp, &uf);
460                 if (retval)
461                         goto fail_nomem_anon_vma_fork;
462                 if (tmp->vm_flags & VM_WIPEONFORK) {
463                         /* VM_WIPEONFORK gets a clean slate in the child. */
464                         tmp->anon_vma = NULL;
465                         if (anon_vma_prepare(tmp))
466                                 goto fail_nomem_anon_vma_fork;
467                 } else if (anon_vma_fork(tmp, mpnt))
468                         goto fail_nomem_anon_vma_fork;
469                 tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
470                 tmp->vm_next = tmp->vm_prev = NULL;
471                 file = tmp->vm_file;
472                 if (file) {
473                         struct inode *inode = file_inode(file);
474                         struct address_space *mapping = file->f_mapping;
475
476                         get_file(file);
477                         if (tmp->vm_flags & VM_DENYWRITE)
478                                 atomic_dec(&inode->i_writecount);
479                         i_mmap_lock_write(mapping);
480                         if (tmp->vm_flags & VM_SHARED)
481                                 atomic_inc(&mapping->i_mmap_writable);
482                         flush_dcache_mmap_lock(mapping);
483                         /* insert tmp into the share list, just after mpnt */
484                         vma_interval_tree_insert_after(tmp, mpnt,
485                                         &mapping->i_mmap);
486                         flush_dcache_mmap_unlock(mapping);
487                         i_mmap_unlock_write(mapping);
488                 }
489
490                 /*
491                  * Clear hugetlb-related page reserves for children. This only
492                  * affects MAP_PRIVATE mappings. Faults generated by the child
493                  * are not guaranteed to succeed, even if read-only
494                  */
495                 if (is_vm_hugetlb_page(tmp))
496                         reset_vma_resv_huge_pages(tmp);
497
498                 /*
499                  * Link in the new vma and copy the page table entries.
500                  */
501                 *pprev = tmp;
502                 pprev = &tmp->vm_next;
503                 tmp->vm_prev = prev;
504                 prev = tmp;
505
506                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
507                 rb_link = &tmp->vm_rb.rb_right;
508                 rb_parent = &tmp->vm_rb;
509
510                 mm->map_count++;
511                 if (!(tmp->vm_flags & VM_WIPEONFORK))
512                         retval = copy_page_range(mm, oldmm, mpnt);
513
514                 if (tmp->vm_ops && tmp->vm_ops->open)
515                         tmp->vm_ops->open(tmp);
516
517                 if (retval)
518                         goto out;
519         }
520         /* a new mm has just been created */
521         arch_dup_mmap(oldmm, mm);
522         retval = 0;
523 out:
524         up_write(&mm->mmap_sem);
525         flush_tlb_mm(oldmm);
526         up_write(&oldmm->mmap_sem);
527         dup_userfaultfd_complete(&uf);
528 fail_uprobe_end:
529         uprobe_end_dup_mmap();
530         return retval;
531 fail_nomem_anon_vma_fork:
532         mpol_put(vma_policy(tmp));
533 fail_nomem_policy:
534         kmem_cache_free(vm_area_cachep, tmp);
535 fail_nomem:
536         retval = -ENOMEM;
537         vm_unacct_memory(charge);
538         goto out;
539 }
540
541 static inline int mm_alloc_pgd(struct mm_struct *mm)
542 {
543         mm->pgd = pgd_alloc(mm);
544         if (unlikely(!mm->pgd))
545                 return -ENOMEM;
546         return 0;
547 }
548
549 static inline void mm_free_pgd(struct mm_struct *mm)
550 {
551         pgd_free(mm, mm->pgd);
552 }
553 #else
554 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
555 {
556         down_write(&oldmm->mmap_sem);
557         RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
558         up_write(&oldmm->mmap_sem);
559         return 0;
560 }
561 #define mm_alloc_pgd(mm)        (0)
562 #define mm_free_pgd(mm)
563 #endif /* CONFIG_MMU */
564
565 static void check_mm(struct mm_struct *mm)
566 {
567         int i;
568
569         for (i = 0; i < NR_MM_COUNTERS; i++) {
570                 long x = atomic_long_read(&mm->rss_stat.count[i]);
571
572                 if (unlikely(x))
573                         printk(KERN_ALERT "BUG: Bad rss-counter state "
574                                           "mm:%p idx:%d val:%ld\n", mm, i, x);
575         }
576
577         if (mm_pgtables_bytes(mm))
578                 pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
579                                 mm_pgtables_bytes(mm));
580
581 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
582         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
583 #endif
584 }
585
586 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
587 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
588
589 /*
590  * Called when the last reference to the mm
591  * is dropped: either by a lazy thread or by
592  * mmput. Free the page directory and the mm.
593  */
594 void __mmdrop(struct mm_struct *mm)
595 {
596         BUG_ON(mm == &init_mm);
597         WARN_ON_ONCE(mm == current->mm);
598         WARN_ON_ONCE(mm == current->active_mm);
599         mm_free_pgd(mm);
600         destroy_context(mm);
601         hmm_mm_destroy(mm);
602         mmu_notifier_mm_destroy(mm);
603         check_mm(mm);
604         put_user_ns(mm->user_ns);
605         free_mm(mm);
606 }
607 EXPORT_SYMBOL_GPL(__mmdrop);
608
609 static void mmdrop_async_fn(struct work_struct *work)
610 {
611         struct mm_struct *mm;
612
613         mm = container_of(work, struct mm_struct, async_put_work);
614         __mmdrop(mm);
615 }
616
617 static void mmdrop_async(struct mm_struct *mm)
618 {
619         if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
620                 INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
621                 schedule_work(&mm->async_put_work);
622         }
623 }
624
625 static inline void free_signal_struct(struct signal_struct *sig)
626 {
627         taskstats_tgid_free(sig);
628         sched_autogroup_exit(sig);
629         /*
630          * __mmdrop is not safe to call from softirq context on x86 due to
631          * pgd_dtor so postpone it to the async context
632          */
633         if (sig->oom_mm)
634                 mmdrop_async(sig->oom_mm);
635         kmem_cache_free(signal_cachep, sig);
636 }
637
638 static inline void put_signal_struct(struct signal_struct *sig)
639 {
640         if (atomic_dec_and_test(&sig->sigcnt))
641                 free_signal_struct(sig);
642 }
643
644 void __put_task_struct(struct task_struct *tsk)
645 {
646         WARN_ON(!tsk->exit_state);
647         WARN_ON(atomic_read(&tsk->usage));
648         WARN_ON(tsk == current);
649
650         cgroup_free(tsk);
651         task_numa_free(tsk);
652         security_task_free(tsk);
653         exit_creds(tsk);
654         delayacct_tsk_free(tsk);
655         put_signal_struct(tsk->signal);
656
657         if (!profile_handoff_task(tsk))
658                 free_task(tsk);
659 }
660 EXPORT_SYMBOL_GPL(__put_task_struct);
661
662 void __init __weak arch_task_cache_init(void) { }
663
664 /*
665  * set_max_threads
666  */
667 static void set_max_threads(unsigned int max_threads_suggested)
668 {
669         u64 threads;
670
671         /*
672          * The number of threads shall be limited such that the thread
673          * structures may only consume a small part of the available memory.
674          */
675         if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
676                 threads = MAX_THREADS;
677         else
678                 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
679                                     (u64) THREAD_SIZE * 8UL);
680
681         if (threads > max_threads_suggested)
682                 threads = max_threads_suggested;
683
684         max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
685 }
686
687 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
688 /* Initialized by the architecture: */
689 int arch_task_struct_size __read_mostly;
690 #endif
691
692 static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
693 {
694         /* Fetch thread_struct whitelist for the architecture. */
695         arch_thread_struct_whitelist(offset, size);
696
697         /*
698          * Handle zero-sized whitelist or empty thread_struct, otherwise
699          * adjust offset to position of thread_struct in task_struct.
700          */
701         if (unlikely(*size == 0))
702                 *offset = 0;
703         else
704                 *offset += offsetof(struct task_struct, thread);
705 }
706
707 void __init fork_init(void)
708 {
709         int i;
710 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
711 #ifndef ARCH_MIN_TASKALIGN
712 #define ARCH_MIN_TASKALIGN      0
713 #endif
714         int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
715         unsigned long useroffset, usersize;
716
717         /* create a slab on which task_structs can be allocated */
718         task_struct_whitelist(&useroffset, &usersize);
719         task_struct_cachep = kmem_cache_create_usercopy("task_struct",
720                         arch_task_struct_size, align,
721                         SLAB_PANIC|SLAB_ACCOUNT,
722                         useroffset, usersize, NULL);
723 #endif
724
725         /* do the arch specific task caches init */
726         arch_task_cache_init();
727
728         set_max_threads(MAX_THREADS);
729
730         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
731         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
732         init_task.signal->rlim[RLIMIT_SIGPENDING] =
733                 init_task.signal->rlim[RLIMIT_NPROC];
734
735         for (i = 0; i < UCOUNT_COUNTS; i++) {
736                 init_user_ns.ucount_max[i] = max_threads/2;
737         }
738
739 #ifdef CONFIG_VMAP_STACK
740         cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
741                           NULL, free_vm_stack_cache);
742 #endif
743
744         lockdep_init_task(&init_task);
745 }
746
747 int __weak arch_dup_task_struct(struct task_struct *dst,
748                                                struct task_struct *src)
749 {
750         *dst = *src;
751         return 0;
752 }
753
754 void set_task_stack_end_magic(struct task_struct *tsk)
755 {
756         unsigned long *stackend;
757
758         stackend = end_of_stack(tsk);
759         *stackend = STACK_END_MAGIC;    /* for overflow detection */
760 }
761
762 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
763 {
764         struct task_struct *tsk;
765         unsigned long *stack;
766         struct vm_struct *stack_vm_area;
767         int err;
768
769         if (node == NUMA_NO_NODE)
770                 node = tsk_fork_get_node(orig);
771         tsk = alloc_task_struct_node(node);
772         if (!tsk)
773                 return NULL;
774
775         stack = alloc_thread_stack_node(tsk, node);
776         if (!stack)
777                 goto free_tsk;
778
779         stack_vm_area = task_stack_vm_area(tsk);
780
781         err = arch_dup_task_struct(tsk, orig);
782
783         /*
784          * arch_dup_task_struct() clobbers the stack-related fields.  Make
785          * sure they're properly initialized before using any stack-related
786          * functions again.
787          */
788         tsk->stack = stack;
789 #ifdef CONFIG_VMAP_STACK
790         tsk->stack_vm_area = stack_vm_area;
791 #endif
792 #ifdef CONFIG_THREAD_INFO_IN_TASK
793         atomic_set(&tsk->stack_refcount, 1);
794 #endif
795
796         if (err)
797                 goto free_stack;
798
799 #ifdef CONFIG_SECCOMP
800         /*
801          * We must handle setting up seccomp filters once we're under
802          * the sighand lock in case orig has changed between now and
803          * then. Until then, filter must be NULL to avoid messing up
804          * the usage counts on the error path calling free_task.
805          */
806         tsk->seccomp.filter = NULL;
807 #endif
808
809         setup_thread_stack(tsk, orig);
810         clear_user_return_notifier(tsk);
811         clear_tsk_need_resched(tsk);
812         set_task_stack_end_magic(tsk);
813
814 #ifdef CONFIG_CC_STACKPROTECTOR
815         tsk->stack_canary = get_random_canary();
816 #endif
817
818         /*
819          * One for us, one for whoever does the "release_task()" (usually
820          * parent)
821          */
822         atomic_set(&tsk->usage, 2);
823 #ifdef CONFIG_BLK_DEV_IO_TRACE
824         tsk->btrace_seq = 0;
825 #endif
826         tsk->splice_pipe = NULL;
827         tsk->task_frag.page = NULL;
828         tsk->wake_q.next = NULL;
829
830         account_kernel_stack(tsk, 1);
831
832         kcov_task_init(tsk);
833
834 #ifdef CONFIG_FAULT_INJECTION
835         tsk->fail_nth = 0;
836 #endif
837
838         return tsk;
839
840 free_stack:
841         free_thread_stack(tsk);
842 free_tsk:
843         free_task_struct(tsk);
844         return NULL;
845 }
846
847 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
848
849 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
850
851 static int __init coredump_filter_setup(char *s)
852 {
853         default_dump_filter =
854                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
855                 MMF_DUMP_FILTER_MASK;
856         return 1;
857 }
858
859 __setup("coredump_filter=", coredump_filter_setup);
860
861 #include <linux/init_task.h>
862
863 static void mm_init_aio(struct mm_struct *mm)
864 {
865 #ifdef CONFIG_AIO
866         spin_lock_init(&mm->ioctx_lock);
867         mm->ioctx_table = NULL;
868 #endif
869 }
870
871 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
872 {
873 #ifdef CONFIG_MEMCG
874         mm->owner = p;
875 #endif
876 }
877
878 static void mm_init_uprobes_state(struct mm_struct *mm)
879 {
880 #ifdef CONFIG_UPROBES
881         mm->uprobes_state.xol_area = NULL;
882 #endif
883 }
884
885 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
886         struct user_namespace *user_ns)
887 {
888         mm->mmap = NULL;
889         mm->mm_rb = RB_ROOT;
890         mm->vmacache_seqnum = 0;
891         atomic_set(&mm->mm_users, 1);
892         atomic_set(&mm->mm_count, 1);
893         init_rwsem(&mm->mmap_sem);
894         INIT_LIST_HEAD(&mm->mmlist);
895         mm->core_state = NULL;
896         mm_pgtables_bytes_init(mm);
897         mm->map_count = 0;
898         mm->locked_vm = 0;
899         mm->pinned_vm = 0;
900         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
901         spin_lock_init(&mm->page_table_lock);
902         spin_lock_init(&mm->arg_lock);
903         mm_init_cpumask(mm);
904         mm_init_aio(mm);
905         mm_init_owner(mm, p);
906         RCU_INIT_POINTER(mm->exe_file, NULL);
907         mmu_notifier_mm_init(mm);
908         hmm_mm_init(mm);
909         init_tlb_flush_pending(mm);
910 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
911         mm->pmd_huge_pte = NULL;
912 #endif
913         mm_init_uprobes_state(mm);
914
915         if (current->mm) {
916                 mm->flags = current->mm->flags & MMF_INIT_MASK;
917                 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
918         } else {
919                 mm->flags = default_dump_filter;
920                 mm->def_flags = 0;
921         }
922
923         if (mm_alloc_pgd(mm))
924                 goto fail_nopgd;
925
926         if (init_new_context(p, mm))
927                 goto fail_nocontext;
928
929         mm->user_ns = get_user_ns(user_ns);
930         return mm;
931
932 fail_nocontext:
933         mm_free_pgd(mm);
934 fail_nopgd:
935         free_mm(mm);
936         return NULL;
937 }
938
939 /*
940  * Allocate and initialize an mm_struct.
941  */
942 struct mm_struct *mm_alloc(void)
943 {
944         struct mm_struct *mm;
945
946         mm = allocate_mm();
947         if (!mm)
948                 return NULL;
949
950         memset(mm, 0, sizeof(*mm));
951         return mm_init(mm, current, current_user_ns());
952 }
953
954 static inline void __mmput(struct mm_struct *mm)
955 {
956         VM_BUG_ON(atomic_read(&mm->mm_users));
957
958         uprobe_clear_state(mm);
959         exit_aio(mm);
960         ksm_exit(mm);
961         khugepaged_exit(mm); /* must run before exit_mmap */
962         exit_mmap(mm);
963         mm_put_huge_zero_page(mm);
964         set_mm_exe_file(mm, NULL);
965         if (!list_empty(&mm->mmlist)) {
966                 spin_lock(&mmlist_lock);
967                 list_del(&mm->mmlist);
968                 spin_unlock(&mmlist_lock);
969         }
970         if (mm->binfmt)
971                 module_put(mm->binfmt->module);
972         mmdrop(mm);
973 }
974
975 /*
976  * Decrement the use count and release all resources for an mm.
977  */
978 void mmput(struct mm_struct *mm)
979 {
980         might_sleep();
981
982         if (atomic_dec_and_test(&mm->mm_users))
983                 __mmput(mm);
984 }
985 EXPORT_SYMBOL_GPL(mmput);
986
987 #ifdef CONFIG_MMU
988 static void mmput_async_fn(struct work_struct *work)
989 {
990         struct mm_struct *mm = container_of(work, struct mm_struct,
991                                             async_put_work);
992
993         __mmput(mm);
994 }
995
996 void mmput_async(struct mm_struct *mm)
997 {
998         if (atomic_dec_and_test(&mm->mm_users)) {
999                 INIT_WORK(&mm->async_put_work, mmput_async_fn);
1000                 schedule_work(&mm->async_put_work);
1001         }
1002 }
1003 #endif
1004
1005 /**
1006  * set_mm_exe_file - change a reference to the mm's executable file
1007  *
1008  * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1009  *
1010  * Main users are mmput() and sys_execve(). Callers prevent concurrent
1011  * invocations: in mmput() nobody alive left, in execve task is single
1012  * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
1013  * mm->exe_file, but does so without using set_mm_exe_file() in order
1014  * to do avoid the need for any locks.
1015  */
1016 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1017 {
1018         struct file *old_exe_file;
1019
1020         /*
1021          * It is safe to dereference the exe_file without RCU as
1022          * this function is only called if nobody else can access
1023          * this mm -- see comment above for justification.
1024          */
1025         old_exe_file = rcu_dereference_raw(mm->exe_file);
1026
1027         if (new_exe_file)
1028                 get_file(new_exe_file);
1029         rcu_assign_pointer(mm->exe_file, new_exe_file);
1030         if (old_exe_file)
1031                 fput(old_exe_file);
1032 }
1033
1034 /**
1035  * get_mm_exe_file - acquire a reference to the mm's executable file
1036  *
1037  * Returns %NULL if mm has no associated executable file.
1038  * User must release file via fput().
1039  */
1040 struct file *get_mm_exe_file(struct mm_struct *mm)
1041 {
1042         struct file *exe_file;
1043
1044         rcu_read_lock();
1045         exe_file = rcu_dereference(mm->exe_file);
1046         if (exe_file && !get_file_rcu(exe_file))
1047                 exe_file = NULL;
1048         rcu_read_unlock();
1049         return exe_file;
1050 }
1051 EXPORT_SYMBOL(get_mm_exe_file);
1052
1053 /**
1054  * get_task_exe_file - acquire a reference to the task's executable file
1055  *
1056  * Returns %NULL if task's mm (if any) has no associated executable file or
1057  * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1058  * User must release file via fput().
1059  */
1060 struct file *get_task_exe_file(struct task_struct *task)
1061 {
1062         struct file *exe_file = NULL;
1063         struct mm_struct *mm;
1064
1065         task_lock(task);
1066         mm = task->mm;
1067         if (mm) {
1068                 if (!(task->flags & PF_KTHREAD))
1069                         exe_file = get_mm_exe_file(mm);
1070         }
1071         task_unlock(task);
1072         return exe_file;
1073 }
1074 EXPORT_SYMBOL(get_task_exe_file);
1075
1076 /**
1077  * get_task_mm - acquire a reference to the task's mm
1078  *
1079  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
1080  * this kernel workthread has transiently adopted a user mm with use_mm,
1081  * to do its AIO) is not set and if so returns a reference to it, after
1082  * bumping up the use count.  User must release the mm via mmput()
1083  * after use.  Typically used by /proc and ptrace.
1084  */
1085 struct mm_struct *get_task_mm(struct task_struct *task)
1086 {
1087         struct mm_struct *mm;
1088
1089         task_lock(task);
1090         mm = task->mm;
1091         if (mm) {
1092                 if (task->flags & PF_KTHREAD)
1093                         mm = NULL;
1094                 else
1095                         mmget(mm);
1096         }
1097         task_unlock(task);
1098         return mm;
1099 }
1100 EXPORT_SYMBOL_GPL(get_task_mm);
1101
1102 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1103 {
1104         struct mm_struct *mm;
1105         int err;
1106
1107         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
1108         if (err)
1109                 return ERR_PTR(err);
1110
1111         mm = get_task_mm(task);
1112         if (mm && mm != current->mm &&
1113                         !ptrace_may_access(task, mode)) {
1114                 mmput(mm);
1115                 mm = ERR_PTR(-EACCES);
1116         }
1117         mutex_unlock(&task->signal->cred_guard_mutex);
1118
1119         return mm;
1120 }
1121
1122 static void complete_vfork_done(struct task_struct *tsk)
1123 {
1124         struct completion *vfork;
1125
1126         task_lock(tsk);
1127         vfork = tsk->vfork_done;
1128         if (likely(vfork)) {
1129                 tsk->vfork_done = NULL;
1130                 complete(vfork);
1131         }
1132         task_unlock(tsk);
1133 }
1134
1135 static int wait_for_vfork_done(struct task_struct *child,
1136                                 struct completion *vfork)
1137 {
1138         int killed;
1139
1140         freezer_do_not_count();
1141         killed = wait_for_completion_killable(vfork);
1142         freezer_count();
1143
1144         if (killed) {
1145                 task_lock(child);
1146                 child->vfork_done = NULL;
1147                 task_unlock(child);
1148         }
1149
1150         put_task_struct(child);
1151         return killed;
1152 }
1153
1154 /* Please note the differences between mmput and mm_release.
1155  * mmput is called whenever we stop holding onto a mm_struct,
1156  * error success whatever.
1157  *
1158  * mm_release is called after a mm_struct has been removed
1159  * from the current process.
1160  *
1161  * This difference is important for error handling, when we
1162  * only half set up a mm_struct for a new process and need to restore
1163  * the old one.  Because we mmput the new mm_struct before
1164  * restoring the old one. . .
1165  * Eric Biederman 10 January 1998
1166  */
1167 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1168 {
1169         /* Get rid of any futexes when releasing the mm */
1170 #ifdef CONFIG_FUTEX
1171         if (unlikely(tsk->robust_list)) {
1172                 exit_robust_list(tsk);
1173                 tsk->robust_list = NULL;
1174         }
1175 #ifdef CONFIG_COMPAT
1176         if (unlikely(tsk->compat_robust_list)) {
1177                 compat_exit_robust_list(tsk);
1178                 tsk->compat_robust_list = NULL;
1179         }
1180 #endif
1181         if (unlikely(!list_empty(&tsk->pi_state_list)))
1182                 exit_pi_state_list(tsk);
1183 #endif
1184
1185         uprobe_free_utask(tsk);
1186
1187         /* Get rid of any cached register state */
1188         deactivate_mm(tsk, mm);
1189
1190         /*
1191          * Signal userspace if we're not exiting with a core dump
1192          * because we want to leave the value intact for debugging
1193          * purposes.
1194          */
1195         if (tsk->clear_child_tid) {
1196                 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
1197                     atomic_read(&mm->mm_users) > 1) {
1198                         /*
1199                          * We don't check the error code - if userspace has
1200                          * not set up a proper pointer then tough luck.
1201                          */
1202                         put_user(0, tsk->clear_child_tid);
1203                         do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1204                                         1, NULL, NULL, 0, 0);
1205                 }
1206                 tsk->clear_child_tid = NULL;
1207         }
1208
1209         /*
1210          * All done, finally we can wake up parent and return this mm to him.
1211          * Also kthread_stop() uses this completion for synchronization.
1212          */
1213         if (tsk->vfork_done)
1214                 complete_vfork_done(tsk);
1215 }
1216
1217 /*
1218  * Allocate a new mm structure and copy contents from the
1219  * mm structure of the passed in task structure.
1220  */
1221 static struct mm_struct *dup_mm(struct task_struct *tsk)
1222 {
1223         struct mm_struct *mm, *oldmm = current->mm;
1224         int err;
1225
1226         mm = allocate_mm();
1227         if (!mm)
1228                 goto fail_nomem;
1229
1230         memcpy(mm, oldmm, sizeof(*mm));
1231
1232         if (!mm_init(mm, tsk, mm->user_ns))
1233                 goto fail_nomem;
1234
1235         err = dup_mmap(mm, oldmm);
1236         if (err)
1237                 goto free_pt;
1238
1239         mm->hiwater_rss = get_mm_rss(mm);
1240         mm->hiwater_vm = mm->total_vm;
1241
1242         if (mm->binfmt && !try_module_get(mm->binfmt->module))
1243                 goto free_pt;
1244
1245         return mm;
1246
1247 free_pt:
1248         /* don't put binfmt in mmput, we haven't got module yet */
1249         mm->binfmt = NULL;
1250         mmput(mm);
1251
1252 fail_nomem:
1253         return NULL;
1254 }
1255
1256 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1257 {
1258         struct mm_struct *mm, *oldmm;
1259         int retval;
1260
1261         tsk->min_flt = tsk->maj_flt = 0;
1262         tsk->nvcsw = tsk->nivcsw = 0;
1263 #ifdef CONFIG_DETECT_HUNG_TASK
1264         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1265 #endif
1266
1267         tsk->mm = NULL;
1268         tsk->active_mm = NULL;
1269
1270         /*
1271          * Are we cloning a kernel thread?
1272          *
1273          * We need to steal a active VM for that..
1274          */
1275         oldmm = current->mm;
1276         if (!oldmm)
1277                 return 0;
1278
1279         /* initialize the new vmacache entries */
1280         vmacache_flush(tsk);
1281
1282         if (clone_flags & CLONE_VM) {
1283                 mmget(oldmm);
1284                 mm = oldmm;
1285                 goto good_mm;
1286         }
1287
1288         retval = -ENOMEM;
1289         mm = dup_mm(tsk);
1290         if (!mm)
1291                 goto fail_nomem;
1292
1293 good_mm:
1294         tsk->mm = mm;
1295         tsk->active_mm = mm;
1296         return 0;
1297
1298 fail_nomem:
1299         return retval;
1300 }
1301
1302 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1303 {
1304         struct fs_struct *fs = current->fs;
1305         if (clone_flags & CLONE_FS) {
1306                 /* tsk->fs is already what we want */
1307                 spin_lock(&fs->lock);
1308                 if (fs->in_exec) {
1309                         spin_unlock(&fs->lock);
1310                         return -EAGAIN;
1311                 }
1312                 fs->users++;
1313                 spin_unlock(&fs->lock);
1314                 return 0;
1315         }
1316         tsk->fs = copy_fs_struct(fs);
1317         if (!tsk->fs)
1318                 return -ENOMEM;
1319         return 0;
1320 }
1321
1322 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1323 {
1324         struct files_struct *oldf, *newf;
1325         int error = 0;
1326
1327         /*
1328          * A background process may not have any files ...
1329          */
1330         oldf = current->files;
1331         if (!oldf)
1332                 goto out;
1333
1334         if (clone_flags & CLONE_FILES) {
1335                 atomic_inc(&oldf->count);
1336                 goto out;
1337         }
1338
1339         newf = dup_fd(oldf, &error);
1340         if (!newf)
1341                 goto out;
1342
1343         tsk->files = newf;
1344         error = 0;
1345 out:
1346         return error;
1347 }
1348
1349 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1350 {
1351 #ifdef CONFIG_BLOCK
1352         struct io_context *ioc = current->io_context;
1353         struct io_context *new_ioc;
1354
1355         if (!ioc)
1356                 return 0;
1357         /*
1358          * Share io context with parent, if CLONE_IO is set
1359          */
1360         if (clone_flags & CLONE_IO) {
1361                 ioc_task_link(ioc);
1362                 tsk->io_context = ioc;
1363         } else if (ioprio_valid(ioc->ioprio)) {
1364                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1365                 if (unlikely(!new_ioc))
1366                         return -ENOMEM;
1367
1368                 new_ioc->ioprio = ioc->ioprio;
1369                 put_io_context(new_ioc);
1370         }
1371 #endif
1372         return 0;
1373 }
1374
1375 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1376 {
1377         struct sighand_struct *sig;
1378
1379         if (clone_flags & CLONE_SIGHAND) {
1380                 atomic_inc(&current->sighand->count);
1381                 return 0;
1382         }
1383         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1384         rcu_assign_pointer(tsk->sighand, sig);
1385         if (!sig)
1386                 return -ENOMEM;
1387
1388         atomic_set(&sig->count, 1);
1389         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1390         return 0;
1391 }
1392
1393 void __cleanup_sighand(struct sighand_struct *sighand)
1394 {
1395         if (atomic_dec_and_test(&sighand->count)) {
1396                 signalfd_cleanup(sighand);
1397                 /*
1398                  * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1399                  * without an RCU grace period, see __lock_task_sighand().
1400                  */
1401                 kmem_cache_free(sighand_cachep, sighand);
1402         }
1403 }
1404
1405 #ifdef CONFIG_POSIX_TIMERS
1406 /*
1407  * Initialize POSIX timer handling for a thread group.
1408  */
1409 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1410 {
1411         unsigned long cpu_limit;
1412
1413         cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1414         if (cpu_limit != RLIM_INFINITY) {
1415                 sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
1416                 sig->cputimer.running = true;
1417         }
1418
1419         /* The timer lists. */
1420         INIT_LIST_HEAD(&sig->cpu_timers[0]);
1421         INIT_LIST_HEAD(&sig->cpu_timers[1]);
1422         INIT_LIST_HEAD(&sig->cpu_timers[2]);
1423 }
1424 #else
1425 static inline void posix_cpu_timers_init_group(struct signal_struct *sig) { }
1426 #endif
1427
1428 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1429 {
1430         struct signal_struct *sig;
1431
1432         if (clone_flags & CLONE_THREAD)
1433                 return 0;
1434
1435         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1436         tsk->signal = sig;
1437         if (!sig)
1438                 return -ENOMEM;
1439
1440         sig->nr_threads = 1;
1441         atomic_set(&sig->live, 1);
1442         atomic_set(&sig->sigcnt, 1);
1443
1444         /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1445         sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1446         tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1447
1448         init_waitqueue_head(&sig->wait_chldexit);
1449         sig->curr_target = tsk;
1450         init_sigpending(&sig->shared_pending);
1451         seqlock_init(&sig->stats_lock);
1452         prev_cputime_init(&sig->prev_cputime);
1453
1454 #ifdef CONFIG_POSIX_TIMERS
1455         INIT_LIST_HEAD(&sig->posix_timers);
1456         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1457         sig->real_timer.function = it_real_fn;
1458 #endif
1459
1460         task_lock(current->group_leader);
1461         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1462         task_unlock(current->group_leader);
1463
1464         posix_cpu_timers_init_group(sig);
1465
1466         tty_audit_fork(sig);
1467         sched_autogroup_fork(sig);
1468
1469         sig->oom_score_adj = current->signal->oom_score_adj;
1470         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1471
1472         mutex_init(&sig->cred_guard_mutex);
1473
1474         return 0;
1475 }
1476
1477 static void copy_seccomp(struct task_struct *p)
1478 {
1479 #ifdef CONFIG_SECCOMP
1480         /*
1481          * Must be called with sighand->lock held, which is common to
1482          * all threads in the group. Holding cred_guard_mutex is not
1483          * needed because this new task is not yet running and cannot
1484          * be racing exec.
1485          */
1486         assert_spin_locked(&current->sighand->siglock);
1487
1488         /* Ref-count the new filter user, and assign it. */
1489         get_seccomp_filter(current);
1490         p->seccomp = current->seccomp;
1491
1492         /*
1493          * Explicitly enable no_new_privs here in case it got set
1494          * between the task_struct being duplicated and holding the
1495          * sighand lock. The seccomp state and nnp must be in sync.
1496          */
1497         if (task_no_new_privs(current))
1498                 task_set_no_new_privs(p);
1499
1500         /*
1501          * If the parent gained a seccomp mode after copying thread
1502          * flags and between before we held the sighand lock, we have
1503          * to manually enable the seccomp thread flag here.
1504          */
1505         if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1506                 set_tsk_thread_flag(p, TIF_SECCOMP);
1507 #endif
1508 }
1509
1510 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1511 {
1512         current->clear_child_tid = tidptr;
1513
1514         return task_pid_vnr(current);
1515 }
1516
1517 static void rt_mutex_init_task(struct task_struct *p)
1518 {
1519         raw_spin_lock_init(&p->pi_lock);
1520 #ifdef CONFIG_RT_MUTEXES
1521         p->pi_waiters = RB_ROOT_CACHED;
1522         p->pi_top_task = NULL;
1523         p->pi_blocked_on = NULL;
1524 #endif
1525 }
1526
1527 #ifdef CONFIG_POSIX_TIMERS
1528 /*
1529  * Initialize POSIX timer handling for a single task.
1530  */
1531 static void posix_cpu_timers_init(struct task_struct *tsk)
1532 {
1533         tsk->cputime_expires.prof_exp = 0;
1534         tsk->cputime_expires.virt_exp = 0;
1535         tsk->cputime_expires.sched_exp = 0;
1536         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1537         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1538         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1539 }
1540 #else
1541 static inline void posix_cpu_timers_init(struct task_struct *tsk) { }
1542 #endif
1543
1544 static inline void
1545 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1546 {
1547          task->pids[type].pid = pid;
1548 }
1549
1550 static inline void rcu_copy_process(struct task_struct *p)
1551 {
1552 #ifdef CONFIG_PREEMPT_RCU
1553         p->rcu_read_lock_nesting = 0;
1554         p->rcu_read_unlock_special.s = 0;
1555         p->rcu_blocked_node = NULL;
1556         INIT_LIST_HEAD(&p->rcu_node_entry);
1557 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1558 #ifdef CONFIG_TASKS_RCU
1559         p->rcu_tasks_holdout = false;
1560         INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1561         p->rcu_tasks_idle_cpu = -1;
1562 #endif /* #ifdef CONFIG_TASKS_RCU */
1563 }
1564
1565 /*
1566  * This creates a new process as a copy of the old one,
1567  * but does not actually start it yet.
1568  *
1569  * It copies the registers, and all the appropriate
1570  * parts of the process environment (as per the clone
1571  * flags). The actual kick-off is left to the caller.
1572  */
1573 static __latent_entropy struct task_struct *copy_process(
1574                                         unsigned long clone_flags,
1575                                         unsigned long stack_start,
1576                                         unsigned long stack_size,
1577                                         int __user *child_tidptr,
1578                                         struct pid *pid,
1579                                         int trace,
1580                                         unsigned long tls,
1581                                         int node)
1582 {
1583         int retval;
1584         struct task_struct *p;
1585
1586         /*
1587          * Don't allow sharing the root directory with processes in a different
1588          * namespace
1589          */
1590         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1591                 return ERR_PTR(-EINVAL);
1592
1593         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1594                 return ERR_PTR(-EINVAL);
1595
1596         /*
1597          * Thread groups must share signals as well, and detached threads
1598          * can only be started up within the thread group.
1599          */
1600         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1601                 return ERR_PTR(-EINVAL);
1602
1603         /*
1604          * Shared signal handlers imply shared VM. By way of the above,
1605          * thread groups also imply shared VM. Blocking this case allows
1606          * for various simplifications in other code.
1607          */
1608         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1609                 return ERR_PTR(-EINVAL);
1610
1611         /*
1612          * Siblings of global init remain as zombies on exit since they are
1613          * not reaped by their parent (swapper). To solve this and to avoid
1614          * multi-rooted process trees, prevent global and container-inits
1615          * from creating siblings.
1616          */
1617         if ((clone_flags & CLONE_PARENT) &&
1618                                 current->signal->flags & SIGNAL_UNKILLABLE)
1619                 return ERR_PTR(-EINVAL);
1620
1621         /*
1622          * If the new process will be in a different pid or user namespace
1623          * do not allow it to share a thread group with the forking task.
1624          */
1625         if (clone_flags & CLONE_THREAD) {
1626                 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1627                     (task_active_pid_ns(current) !=
1628                                 current->nsproxy->pid_ns_for_children))
1629                         return ERR_PTR(-EINVAL);
1630         }
1631
1632         retval = -ENOMEM;
1633         p = dup_task_struct(current, node);
1634         if (!p)
1635                 goto fork_out;
1636
1637         /*
1638          * This _must_ happen before we call free_task(), i.e. before we jump
1639          * to any of the bad_fork_* labels. This is to avoid freeing
1640          * p->set_child_tid which is (ab)used as a kthread's data pointer for
1641          * kernel threads (PF_KTHREAD).
1642          */
1643         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1644         /*
1645          * Clear TID on mm_release()?
1646          */
1647         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1648
1649         ftrace_graph_init_task(p);
1650
1651         rt_mutex_init_task(p);
1652
1653 #ifdef CONFIG_PROVE_LOCKING
1654         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1655         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1656 #endif
1657         retval = -EAGAIN;
1658         if (atomic_read(&p->real_cred->user->processes) >=
1659                         task_rlimit(p, RLIMIT_NPROC)) {
1660                 if (p->real_cred->user != INIT_USER &&
1661                     !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1662                         goto bad_fork_free;
1663         }
1664         current->flags &= ~PF_NPROC_EXCEEDED;
1665
1666         retval = copy_creds(p, clone_flags);
1667         if (retval < 0)
1668                 goto bad_fork_free;
1669
1670         /*
1671          * If multiple threads are within copy_process(), then this check
1672          * triggers too late. This doesn't hurt, the check is only there
1673          * to stop root fork bombs.
1674          */
1675         retval = -EAGAIN;
1676         if (nr_threads >= max_threads)
1677                 goto bad_fork_cleanup_count;
1678
1679         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1680         p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE);
1681         p->flags |= PF_FORKNOEXEC;
1682         INIT_LIST_HEAD(&p->children);
1683         INIT_LIST_HEAD(&p->sibling);
1684         rcu_copy_process(p);
1685         p->vfork_done = NULL;
1686         spin_lock_init(&p->alloc_lock);
1687
1688         init_sigpending(&p->pending);
1689
1690         p->utime = p->stime = p->gtime = 0;
1691 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1692         p->utimescaled = p->stimescaled = 0;
1693 #endif
1694         prev_cputime_init(&p->prev_cputime);
1695
1696 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1697         seqcount_init(&p->vtime.seqcount);
1698         p->vtime.starttime = 0;
1699         p->vtime.state = VTIME_INACTIVE;
1700 #endif
1701
1702 #if defined(SPLIT_RSS_COUNTING)
1703         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1704 #endif
1705
1706         p->default_timer_slack_ns = current->timer_slack_ns;
1707
1708         task_io_accounting_init(&p->ioac);
1709         acct_clear_integrals(p);
1710
1711         posix_cpu_timers_init(p);
1712
1713         p->start_time = ktime_get_ns();
1714         p->real_start_time = ktime_get_boot_ns();
1715         p->io_context = NULL;
1716         audit_set_context(p, NULL);
1717         cgroup_fork(p);
1718 #ifdef CONFIG_NUMA
1719         p->mempolicy = mpol_dup(p->mempolicy);
1720         if (IS_ERR(p->mempolicy)) {
1721                 retval = PTR_ERR(p->mempolicy);
1722                 p->mempolicy = NULL;
1723                 goto bad_fork_cleanup_threadgroup_lock;
1724         }
1725 #endif
1726 #ifdef CONFIG_CPUSETS
1727         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1728         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1729         seqcount_init(&p->mems_allowed_seq);
1730 #endif
1731 #ifdef CONFIG_TRACE_IRQFLAGS
1732         p->irq_events = 0;
1733         p->hardirqs_enabled = 0;
1734         p->hardirq_enable_ip = 0;
1735         p->hardirq_enable_event = 0;
1736         p->hardirq_disable_ip = _THIS_IP_;
1737         p->hardirq_disable_event = 0;
1738         p->softirqs_enabled = 1;
1739         p->softirq_enable_ip = _THIS_IP_;
1740         p->softirq_enable_event = 0;
1741         p->softirq_disable_ip = 0;
1742         p->softirq_disable_event = 0;
1743         p->hardirq_context = 0;
1744         p->softirq_context = 0;
1745 #endif
1746
1747         p->pagefault_disabled = 0;
1748
1749 #ifdef CONFIG_LOCKDEP
1750         p->lockdep_depth = 0; /* no locks held yet */
1751         p->curr_chain_key = 0;
1752         p->lockdep_recursion = 0;
1753         lockdep_init_task(p);
1754 #endif
1755
1756 #ifdef CONFIG_DEBUG_MUTEXES
1757         p->blocked_on = NULL; /* not blocked yet */
1758 #endif
1759 #ifdef CONFIG_BCACHE
1760         p->sequential_io        = 0;
1761         p->sequential_io_avg    = 0;
1762 #endif
1763
1764         /* Perform scheduler related setup. Assign this task to a CPU. */
1765         retval = sched_fork(clone_flags, p);
1766         if (retval)
1767                 goto bad_fork_cleanup_policy;
1768
1769         retval = perf_event_init_task(p);
1770         if (retval)
1771                 goto bad_fork_cleanup_policy;
1772         retval = audit_alloc(p);
1773         if (retval)
1774                 goto bad_fork_cleanup_perf;
1775         /* copy all the process information */
1776         shm_init_task(p);
1777         retval = security_task_alloc(p, clone_flags);
1778         if (retval)
1779                 goto bad_fork_cleanup_audit;
1780         retval = copy_semundo(clone_flags, p);
1781         if (retval)
1782                 goto bad_fork_cleanup_security;
1783         retval = copy_files(clone_flags, p);
1784         if (retval)
1785                 goto bad_fork_cleanup_semundo;
1786         retval = copy_fs(clone_flags, p);
1787         if (retval)
1788                 goto bad_fork_cleanup_files;
1789         retval = copy_sighand(clone_flags, p);
1790         if (retval)
1791                 goto bad_fork_cleanup_fs;
1792         retval = copy_signal(clone_flags, p);
1793         if (retval)
1794                 goto bad_fork_cleanup_sighand;
1795         retval = copy_mm(clone_flags, p);
1796         if (retval)
1797                 goto bad_fork_cleanup_signal;
1798         retval = copy_namespaces(clone_flags, p);
1799         if (retval)
1800                 goto bad_fork_cleanup_mm;
1801         retval = copy_io(clone_flags, p);
1802         if (retval)
1803                 goto bad_fork_cleanup_namespaces;
1804         retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
1805         if (retval)
1806                 goto bad_fork_cleanup_io;
1807
1808         if (pid != &init_struct_pid) {
1809                 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1810                 if (IS_ERR(pid)) {
1811                         retval = PTR_ERR(pid);
1812                         goto bad_fork_cleanup_thread;
1813                 }
1814         }
1815
1816 #ifdef CONFIG_BLOCK
1817         p->plug = NULL;
1818 #endif
1819 #ifdef CONFIG_FUTEX
1820         p->robust_list = NULL;
1821 #ifdef CONFIG_COMPAT
1822         p->compat_robust_list = NULL;
1823 #endif
1824         INIT_LIST_HEAD(&p->pi_state_list);
1825         p->pi_state_cache = NULL;
1826 #endif
1827         /*
1828          * sigaltstack should be cleared when sharing the same VM
1829          */
1830         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1831                 sas_ss_reset(p);
1832
1833         /*
1834          * Syscall tracing and stepping should be turned off in the
1835          * child regardless of CLONE_PTRACE.
1836          */
1837         user_disable_single_step(p);
1838         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1839 #ifdef TIF_SYSCALL_EMU
1840         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1841 #endif
1842         clear_all_latency_tracing(p);
1843
1844         /* ok, now we should be set up.. */
1845         p->pid = pid_nr(pid);
1846         if (clone_flags & CLONE_THREAD) {
1847                 p->exit_signal = -1;
1848                 p->group_leader = current->group_leader;
1849                 p->tgid = current->tgid;
1850         } else {
1851                 if (clone_flags & CLONE_PARENT)
1852                         p->exit_signal = current->group_leader->exit_signal;
1853                 else
1854                         p->exit_signal = (clone_flags & CSIGNAL);
1855                 p->group_leader = p;
1856                 p->tgid = p->pid;
1857         }
1858
1859         p->nr_dirtied = 0;
1860         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1861         p->dirty_paused_when = 0;
1862
1863         p->pdeath_signal = 0;
1864         INIT_LIST_HEAD(&p->thread_group);
1865         p->task_works = NULL;
1866
1867         cgroup_threadgroup_change_begin(current);
1868         /*
1869          * Ensure that the cgroup subsystem policies allow the new process to be
1870          * forked. It should be noted the the new process's css_set can be changed
1871          * between here and cgroup_post_fork() if an organisation operation is in
1872          * progress.
1873          */
1874         retval = cgroup_can_fork(p);
1875         if (retval)
1876                 goto bad_fork_free_pid;
1877
1878         /*
1879          * Make it visible to the rest of the system, but dont wake it up yet.
1880          * Need tasklist lock for parent etc handling!
1881          */
1882         write_lock_irq(&tasklist_lock);
1883
1884         /* CLONE_PARENT re-uses the old parent */
1885         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1886                 p->real_parent = current->real_parent;
1887                 p->parent_exec_id = current->parent_exec_id;
1888         } else {
1889                 p->real_parent = current;
1890                 p->parent_exec_id = current->self_exec_id;
1891         }
1892
1893         klp_copy_process(p);
1894
1895         spin_lock(&current->sighand->siglock);
1896
1897         /*
1898          * Copy seccomp details explicitly here, in case they were changed
1899          * before holding sighand lock.
1900          */
1901         copy_seccomp(p);
1902
1903         rseq_fork(p, clone_flags);
1904
1905         /*
1906          * Process group and session signals need to be delivered to just the
1907          * parent before the fork or both the parent and the child after the
1908          * fork. Restart if a signal comes in before we add the new process to
1909          * it's process group.
1910          * A fatal signal pending means that current will exit, so the new
1911          * thread can't slip out of an OOM kill (or normal SIGKILL).
1912         */
1913         recalc_sigpending();
1914         if (signal_pending(current)) {
1915                 retval = -ERESTARTNOINTR;
1916                 goto bad_fork_cancel_cgroup;
1917         }
1918         if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
1919                 retval = -ENOMEM;
1920                 goto bad_fork_cancel_cgroup;
1921         }
1922
1923         if (likely(p->pid)) {
1924                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1925
1926                 init_task_pid(p, PIDTYPE_PID, pid);
1927                 if (thread_group_leader(p)) {
1928                         init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
1929                         init_task_pid(p, PIDTYPE_SID, task_session(current));
1930
1931                         if (is_child_reaper(pid)) {
1932                                 ns_of_pid(pid)->child_reaper = p;
1933                                 p->signal->flags |= SIGNAL_UNKILLABLE;
1934                         }
1935
1936                         p->signal->leader_pid = pid;
1937                         p->signal->tty = tty_kref_get(current->signal->tty);
1938                         /*
1939                          * Inherit has_child_subreaper flag under the same
1940                          * tasklist_lock with adding child to the process tree
1941                          * for propagate_has_child_subreaper optimization.
1942                          */
1943                         p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
1944                                                          p->real_parent->signal->is_child_subreaper;
1945                         list_add_tail(&p->sibling, &p->real_parent->children);
1946                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1947                         attach_pid(p, PIDTYPE_PGID);
1948                         attach_pid(p, PIDTYPE_SID);
1949                         __this_cpu_inc(process_counts);
1950                 } else {
1951                         current->signal->nr_threads++;
1952                         atomic_inc(&current->signal->live);
1953                         atomic_inc(&current->signal->sigcnt);
1954                         list_add_tail_rcu(&p->thread_group,
1955                                           &p->group_leader->thread_group);
1956                         list_add_tail_rcu(&p->thread_node,
1957                                           &p->signal->thread_head);
1958                 }
1959                 attach_pid(p, PIDTYPE_PID);
1960                 nr_threads++;
1961         }
1962
1963         total_forks++;
1964         spin_unlock(&current->sighand->siglock);
1965         syscall_tracepoint_update(p);
1966         write_unlock_irq(&tasklist_lock);
1967
1968         proc_fork_connector(p);
1969         cgroup_post_fork(p);
1970         cgroup_threadgroup_change_end(current);
1971         perf_event_fork(p);
1972
1973         trace_task_newtask(p, clone_flags);
1974         uprobe_copy_process(p, clone_flags);
1975
1976         return p;
1977
1978 bad_fork_cancel_cgroup:
1979         spin_unlock(&current->sighand->siglock);
1980         write_unlock_irq(&tasklist_lock);
1981         cgroup_cancel_fork(p);
1982 bad_fork_free_pid:
1983         cgroup_threadgroup_change_end(current);
1984         if (pid != &init_struct_pid)
1985                 free_pid(pid);
1986 bad_fork_cleanup_thread:
1987         exit_thread(p);
1988 bad_fork_cleanup_io:
1989         if (p->io_context)
1990                 exit_io_context(p);
1991 bad_fork_cleanup_namespaces:
1992         exit_task_namespaces(p);
1993 bad_fork_cleanup_mm:
1994         if (p->mm)
1995                 mmput(p->mm);
1996 bad_fork_cleanup_signal:
1997         if (!(clone_flags & CLONE_THREAD))
1998                 free_signal_struct(p->signal);
1999 bad_fork_cleanup_sighand:
2000         __cleanup_sighand(p->sighand);
2001 bad_fork_cleanup_fs:
2002         exit_fs(p); /* blocking */
2003 bad_fork_cleanup_files:
2004         exit_files(p); /* blocking */
2005 bad_fork_cleanup_semundo:
2006         exit_sem(p);
2007 bad_fork_cleanup_security:
2008         security_task_free(p);
2009 bad_fork_cleanup_audit:
2010         audit_free(p);
2011 bad_fork_cleanup_perf:
2012         perf_event_free_task(p);
2013 bad_fork_cleanup_policy:
2014         lockdep_free_task(p);
2015 #ifdef CONFIG_NUMA
2016         mpol_put(p->mempolicy);
2017 bad_fork_cleanup_threadgroup_lock:
2018 #endif
2019         delayacct_tsk_free(p);
2020 bad_fork_cleanup_count:
2021         atomic_dec(&p->cred->user->processes);
2022         exit_creds(p);
2023 bad_fork_free:
2024         p->state = TASK_DEAD;
2025         put_task_stack(p);
2026         free_task(p);
2027 fork_out:
2028         return ERR_PTR(retval);
2029 }
2030
2031 static inline void init_idle_pids(struct pid_link *links)
2032 {
2033         enum pid_type type;
2034
2035         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2036                 INIT_HLIST_NODE(&links[type].node); /* not really needed */
2037                 links[type].pid = &init_struct_pid;
2038         }
2039 }
2040
2041 struct task_struct *fork_idle(int cpu)
2042 {
2043         struct task_struct *task;
2044         task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
2045                             cpu_to_node(cpu));
2046         if (!IS_ERR(task)) {
2047                 init_idle_pids(task->pids);
2048                 init_idle(task, cpu);
2049         }
2050
2051         return task;
2052 }
2053
2054 /*
2055  *  Ok, this is the main fork-routine.
2056  *
2057  * It copies the process, and if successful kick-starts
2058  * it and waits for it to finish using the VM if required.
2059  */
2060 long _do_fork(unsigned long clone_flags,
2061               unsigned long stack_start,
2062               unsigned long stack_size,
2063               int __user *parent_tidptr,
2064               int __user *child_tidptr,
2065               unsigned long tls)
2066 {
2067         struct completion vfork;
2068         struct pid *pid;
2069         struct task_struct *p;
2070         int trace = 0;
2071         long nr;
2072
2073         /*
2074          * Determine whether and which event to report to ptracer.  When
2075          * called from kernel_thread or CLONE_UNTRACED is explicitly
2076          * requested, no event is reported; otherwise, report if the event
2077          * for the type of forking is enabled.
2078          */
2079         if (!(clone_flags & CLONE_UNTRACED)) {
2080                 if (clone_flags & CLONE_VFORK)
2081                         trace = PTRACE_EVENT_VFORK;
2082                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
2083                         trace = PTRACE_EVENT_CLONE;
2084                 else
2085                         trace = PTRACE_EVENT_FORK;
2086
2087                 if (likely(!ptrace_event_enabled(current, trace)))
2088                         trace = 0;
2089         }
2090
2091         p = copy_process(clone_flags, stack_start, stack_size,
2092                          child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
2093         add_latent_entropy();
2094
2095         if (IS_ERR(p))
2096                 return PTR_ERR(p);
2097
2098         /*
2099          * Do this prior waking up the new thread - the thread pointer
2100          * might get invalid after that point, if the thread exits quickly.
2101          */
2102         trace_sched_process_fork(current, p);
2103
2104         pid = get_task_pid(p, PIDTYPE_PID);
2105         nr = pid_vnr(pid);
2106
2107         if (clone_flags & CLONE_PARENT_SETTID)
2108                 put_user(nr, parent_tidptr);
2109
2110         if (clone_flags & CLONE_VFORK) {
2111                 p->vfork_done = &vfork;
2112                 init_completion(&vfork);
2113                 get_task_struct(p);
2114         }
2115
2116         wake_up_new_task(p);
2117
2118         /* forking complete and child started to run, tell ptracer */
2119         if (unlikely(trace))
2120                 ptrace_event_pid(trace, pid);
2121
2122         if (clone_flags & CLONE_VFORK) {
2123                 if (!wait_for_vfork_done(p, &vfork))
2124                         ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2125         }
2126
2127         put_pid(pid);
2128         return nr;
2129 }
2130
2131 #ifndef CONFIG_HAVE_COPY_THREAD_TLS
2132 /* For compatibility with architectures that call do_fork directly rather than
2133  * using the syscall entry points below. */
2134 long do_fork(unsigned long clone_flags,
2135               unsigned long stack_start,
2136               unsigned long stack_size,
2137               int __user *parent_tidptr,
2138               int __user *child_tidptr)
2139 {
2140         return _do_fork(clone_flags, stack_start, stack_size,
2141                         parent_tidptr, child_tidptr, 0);
2142 }
2143 #endif
2144
2145 /*
2146  * Create a kernel thread.
2147  */
2148 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
2149 {
2150         return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
2151                 (unsigned long)arg, NULL, NULL, 0);
2152 }
2153
2154 #ifdef __ARCH_WANT_SYS_FORK
2155 SYSCALL_DEFINE0(fork)
2156 {
2157 #ifdef CONFIG_MMU
2158         return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
2159 #else
2160         /* can not support in nommu mode */
2161         return -EINVAL;
2162 #endif
2163 }
2164 #endif
2165
2166 #ifdef __ARCH_WANT_SYS_VFORK
2167 SYSCALL_DEFINE0(vfork)
2168 {
2169         return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
2170                         0, NULL, NULL, 0);
2171 }
2172 #endif
2173
2174 #ifdef __ARCH_WANT_SYS_CLONE
2175 #ifdef CONFIG_CLONE_BACKWARDS
2176 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2177                  int __user *, parent_tidptr,
2178                  unsigned long, tls,
2179                  int __user *, child_tidptr)
2180 #elif defined(CONFIG_CLONE_BACKWARDS2)
2181 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2182                  int __user *, parent_tidptr,
2183                  int __user *, child_tidptr,
2184                  unsigned long, tls)
2185 #elif defined(CONFIG_CLONE_BACKWARDS3)
2186 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2187                 int, stack_size,
2188                 int __user *, parent_tidptr,
2189                 int __user *, child_tidptr,
2190                 unsigned long, tls)
2191 #else
2192 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2193                  int __user *, parent_tidptr,
2194                  int __user *, child_tidptr,
2195                  unsigned long, tls)
2196 #endif
2197 {
2198         return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
2199 }
2200 #endif
2201
2202 void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
2203 {
2204         struct task_struct *leader, *parent, *child;
2205         int res;
2206
2207         read_lock(&tasklist_lock);
2208         leader = top = top->group_leader;
2209 down:
2210         for_each_thread(leader, parent) {
2211                 list_for_each_entry(child, &parent->children, sibling) {
2212                         res = visitor(child, data);
2213                         if (res) {
2214                                 if (res < 0)
2215                                         goto out;
2216                                 leader = child;
2217                                 goto down;
2218                         }
2219 up:
2220                         ;
2221                 }
2222         }
2223
2224         if (leader != top) {
2225                 child = leader;
2226                 parent = child->real_parent;
2227                 leader = parent->group_leader;
2228                 goto up;
2229         }
2230 out:
2231         read_unlock(&tasklist_lock);
2232 }
2233
2234 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
2235 #define ARCH_MIN_MMSTRUCT_ALIGN 0
2236 #endif
2237
2238 static void sighand_ctor(void *data)
2239 {
2240         struct sighand_struct *sighand = data;
2241
2242         spin_lock_init(&sighand->siglock);
2243         init_waitqueue_head(&sighand->signalfd_wqh);
2244 }
2245
2246 void __init proc_caches_init(void)
2247 {
2248         sighand_cachep = kmem_cache_create("sighand_cache",
2249                         sizeof(struct sighand_struct), 0,
2250                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
2251                         SLAB_ACCOUNT, sighand_ctor);
2252         signal_cachep = kmem_cache_create("signal_cache",
2253                         sizeof(struct signal_struct), 0,
2254                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2255                         NULL);
2256         files_cachep = kmem_cache_create("files_cache",
2257                         sizeof(struct files_struct), 0,
2258                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2259                         NULL);
2260         fs_cachep = kmem_cache_create("fs_cache",
2261                         sizeof(struct fs_struct), 0,
2262                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2263                         NULL);
2264         /*
2265          * FIXME! The "sizeof(struct mm_struct)" currently includes the
2266          * whole struct cpumask for the OFFSTACK case. We could change
2267          * this to *only* allocate as much of it as required by the
2268          * maximum number of CPU's we can ever have.  The cpumask_allocation
2269          * is at the end of the structure, exactly for that reason.
2270          */
2271         mm_cachep = kmem_cache_create_usercopy("mm_struct",
2272                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
2273                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
2274                         offsetof(struct mm_struct, saved_auxv),
2275                         sizeof_field(struct mm_struct, saved_auxv),
2276                         NULL);
2277         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2278         mmap_init();
2279         nsproxy_cache_init();
2280 }
2281
2282 /*
2283  * Check constraints on flags passed to the unshare system call.
2284  */
2285 static int check_unshare_flags(unsigned long unshare_flags)
2286 {
2287         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
2288                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
2289                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
2290                                 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
2291                 return -EINVAL;
2292         /*
2293          * Not implemented, but pretend it works if there is nothing
2294          * to unshare.  Note that unsharing the address space or the
2295          * signal handlers also need to unshare the signal queues (aka
2296          * CLONE_THREAD).
2297          */
2298         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
2299                 if (!thread_group_empty(current))
2300                         return -EINVAL;
2301         }
2302         if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
2303                 if (atomic_read(&current->sighand->count) > 1)
2304                         return -EINVAL;
2305         }
2306         if (unshare_flags & CLONE_VM) {
2307                 if (!current_is_single_threaded())
2308                         return -EINVAL;
2309         }
2310
2311         return 0;
2312 }
2313
2314 /*
2315  * Unshare the filesystem structure if it is being shared
2316  */
2317 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2318 {
2319         struct fs_struct *fs = current->fs;
2320
2321         if (!(unshare_flags & CLONE_FS) || !fs)
2322                 return 0;
2323
2324         /* don't need lock here; in the worst case we'll do useless copy */
2325         if (fs->users == 1)
2326                 return 0;
2327
2328         *new_fsp = copy_fs_struct(fs);
2329         if (!*new_fsp)
2330                 return -ENOMEM;
2331
2332         return 0;
2333 }
2334
2335 /*
2336  * Unshare file descriptor table if it is being shared
2337  */
2338 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
2339 {
2340         struct files_struct *fd = current->files;
2341         int error = 0;
2342
2343         if ((unshare_flags & CLONE_FILES) &&
2344             (fd && atomic_read(&fd->count) > 1)) {
2345                 *new_fdp = dup_fd(fd, &error);
2346                 if (!*new_fdp)
2347                         return error;
2348         }
2349
2350         return 0;
2351 }
2352
2353 /*
2354  * unshare allows a process to 'unshare' part of the process
2355  * context which was originally shared using clone.  copy_*
2356  * functions used by do_fork() cannot be used here directly
2357  * because they modify an inactive task_struct that is being
2358  * constructed. Here we are modifying the current, active,
2359  * task_struct.
2360  */
2361 int ksys_unshare(unsigned long unshare_flags)
2362 {
2363         struct fs_struct *fs, *new_fs = NULL;
2364         struct files_struct *fd, *new_fd = NULL;
2365         struct cred *new_cred = NULL;
2366         struct nsproxy *new_nsproxy = NULL;
2367         int do_sysvsem = 0;
2368         int err;
2369
2370         /*
2371          * If unsharing a user namespace must also unshare the thread group
2372          * and unshare the filesystem root and working directories.
2373          */
2374         if (unshare_flags & CLONE_NEWUSER)
2375                 unshare_flags |= CLONE_THREAD | CLONE_FS;
2376         /*
2377          * If unsharing vm, must also unshare signal handlers.
2378          */
2379         if (unshare_flags & CLONE_VM)
2380                 unshare_flags |= CLONE_SIGHAND;
2381         /*
2382          * If unsharing a signal handlers, must also unshare the signal queues.
2383          */
2384         if (unshare_flags & CLONE_SIGHAND)
2385                 unshare_flags |= CLONE_THREAD;
2386         /*
2387          * If unsharing namespace, must also unshare filesystem information.
2388          */
2389         if (unshare_flags & CLONE_NEWNS)
2390                 unshare_flags |= CLONE_FS;
2391
2392         err = check_unshare_flags(unshare_flags);
2393         if (err)
2394                 goto bad_unshare_out;
2395         /*
2396          * CLONE_NEWIPC must also detach from the undolist: after switching
2397          * to a new ipc namespace, the semaphore arrays from the old
2398          * namespace are unreachable.
2399          */
2400         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2401                 do_sysvsem = 1;
2402         err = unshare_fs(unshare_flags, &new_fs);
2403         if (err)
2404                 goto bad_unshare_out;
2405         err = unshare_fd(unshare_flags, &new_fd);
2406         if (err)
2407                 goto bad_unshare_cleanup_fs;
2408         err = unshare_userns(unshare_flags, &new_cred);
2409         if (err)
2410                 goto bad_unshare_cleanup_fd;
2411         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2412                                          new_cred, new_fs);
2413         if (err)
2414                 goto bad_unshare_cleanup_cred;
2415
2416         if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2417                 if (do_sysvsem) {
2418                         /*
2419                          * CLONE_SYSVSEM is equivalent to sys_exit().
2420                          */
2421                         exit_sem(current);
2422                 }
2423                 if (unshare_flags & CLONE_NEWIPC) {
2424                         /* Orphan segments in old ns (see sem above). */
2425                         exit_shm(current);
2426                         shm_init_task(current);
2427                 }
2428
2429                 if (new_nsproxy)
2430                         switch_task_namespaces(current, new_nsproxy);
2431
2432                 task_lock(current);
2433
2434                 if (new_fs) {
2435                         fs = current->fs;
2436                         spin_lock(&fs->lock);
2437                         current->fs = new_fs;
2438                         if (--fs->users)
2439                                 new_fs = NULL;
2440                         else
2441                                 new_fs = fs;
2442                         spin_unlock(&fs->lock);
2443                 }
2444
2445                 if (new_fd) {
2446                         fd = current->files;
2447                         current->files = new_fd;
2448                         new_fd = fd;
2449                 }
2450
2451                 task_unlock(current);
2452
2453                 if (new_cred) {
2454                         /* Install the new user namespace */
2455                         commit_creds(new_cred);
2456                         new_cred = NULL;
2457                 }
2458         }
2459
2460         perf_event_namespaces(current);
2461
2462 bad_unshare_cleanup_cred:
2463         if (new_cred)
2464                 put_cred(new_cred);
2465 bad_unshare_cleanup_fd:
2466         if (new_fd)
2467                 put_files_struct(new_fd);
2468
2469 bad_unshare_cleanup_fs:
2470         if (new_fs)
2471                 free_fs_struct(new_fs);
2472
2473 bad_unshare_out:
2474         return err;
2475 }
2476
2477 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
2478 {
2479         return ksys_unshare(unshare_flags);
2480 }
2481
2482 /*
2483  *      Helper to unshare the files of the current task.
2484  *      We don't want to expose copy_files internals to
2485  *      the exec layer of the kernel.
2486  */
2487
2488 int unshare_files(struct files_struct **displaced)
2489 {
2490         struct task_struct *task = current;
2491         struct files_struct *copy = NULL;
2492         int error;
2493
2494         error = unshare_fd(CLONE_FILES, &copy);
2495         if (error || !copy) {
2496                 *displaced = NULL;
2497                 return error;
2498         }
2499         *displaced = task->files;
2500         task_lock(task);
2501         task->files = copy;
2502         task_unlock(task);
2503         return 0;
2504 }
2505
2506 int sysctl_max_threads(struct ctl_table *table, int write,
2507                        void __user *buffer, size_t *lenp, loff_t *ppos)
2508 {
2509         struct ctl_table t;
2510         int ret;
2511         int threads = max_threads;
2512         int min = MIN_THREADS;
2513         int max = MAX_THREADS;
2514
2515         t = *table;
2516         t.data = &threads;
2517         t.extra1 = &min;
2518         t.extra2 = &max;
2519
2520         ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2521         if (ret || !write)
2522                 return ret;
2523
2524         set_max_threads(threads);
2525
2526         return 0;
2527 }