]> rtime.felk.cvut.cz Git - can-eth-gw-linux.git/blob - kernel/fork.c
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[can-eth-gw-linux.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/fs.h>
31 #include <linux/nsproxy.h>
32 #include <linux/capability.h>
33 #include <linux/cpu.h>
34 #include <linux/cgroup.h>
35 #include <linux/security.h>
36 #include <linux/hugetlb.h>
37 #include <linux/seccomp.h>
38 #include <linux/swap.h>
39 #include <linux/syscalls.h>
40 #include <linux/jiffies.h>
41 #include <linux/futex.h>
42 #include <linux/compat.h>
43 #include <linux/kthread.h>
44 #include <linux/task_io_accounting_ops.h>
45 #include <linux/rcupdate.h>
46 #include <linux/ptrace.h>
47 #include <linux/mount.h>
48 #include <linux/audit.h>
49 #include <linux/memcontrol.h>
50 #include <linux/ftrace.h>
51 #include <linux/proc_fs.h>
52 #include <linux/profile.h>
53 #include <linux/rmap.h>
54 #include <linux/ksm.h>
55 #include <linux/acct.h>
56 #include <linux/tsacct_kern.h>
57 #include <linux/cn_proc.h>
58 #include <linux/freezer.h>
59 #include <linux/delayacct.h>
60 #include <linux/taskstats_kern.h>
61 #include <linux/random.h>
62 #include <linux/tty.h>
63 #include <linux/blkdev.h>
64 #include <linux/fs_struct.h>
65 #include <linux/magic.h>
66 #include <linux/perf_event.h>
67 #include <linux/posix-timers.h>
68 #include <linux/user-return-notifier.h>
69 #include <linux/oom.h>
70 #include <linux/khugepaged.h>
71 #include <linux/signalfd.h>
72 #include <linux/uprobes.h>
73
74 #include <asm/pgtable.h>
75 #include <asm/pgalloc.h>
76 #include <asm/uaccess.h>
77 #include <asm/mmu_context.h>
78 #include <asm/cacheflush.h>
79 #include <asm/tlbflush.h>
80
81 #include <trace/events/sched.h>
82
83 #define CREATE_TRACE_POINTS
84 #include <trace/events/task.h>
85
86 /*
87  * Protected counters by write_lock_irq(&tasklist_lock)
88  */
89 unsigned long total_forks;      /* Handle normal Linux uptimes. */
90 int nr_threads;                 /* The idle threads do not count.. */
91
92 int max_threads;                /* tunable limit on nr_threads */
93
94 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
95
96 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
97
98 #ifdef CONFIG_PROVE_RCU
99 int lockdep_tasklist_lock_is_held(void)
100 {
101         return lockdep_is_held(&tasklist_lock);
102 }
103 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
104 #endif /* #ifdef CONFIG_PROVE_RCU */
105
106 int nr_processes(void)
107 {
108         int cpu;
109         int total = 0;
110
111         for_each_possible_cpu(cpu)
112                 total += per_cpu(process_counts, cpu);
113
114         return total;
115 }
116
117 void __weak arch_release_task_struct(struct task_struct *tsk)
118 {
119 }
120
121 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
122 static struct kmem_cache *task_struct_cachep;
123
124 static inline struct task_struct *alloc_task_struct_node(int node)
125 {
126         return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
127 }
128
129 static inline void free_task_struct(struct task_struct *tsk)
130 {
131         kmem_cache_free(task_struct_cachep, tsk);
132 }
133 #endif
134
135 void __weak arch_release_thread_info(struct thread_info *ti)
136 {
137 }
138
139 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
140
141 /*
142  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
143  * kmemcache based allocator.
144  */
145 # if THREAD_SIZE >= PAGE_SIZE
146 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
147                                                   int node)
148 {
149         struct page *page = alloc_pages_node(node, THREADINFO_GFP,
150                                              THREAD_SIZE_ORDER);
151
152         return page ? page_address(page) : NULL;
153 }
154
155 static inline void free_thread_info(struct thread_info *ti)
156 {
157         free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
158 }
159 # else
160 static struct kmem_cache *thread_info_cache;
161
162 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
163                                                   int node)
164 {
165         return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
166 }
167
168 static void free_thread_info(struct thread_info *ti)
169 {
170         kmem_cache_free(thread_info_cache, ti);
171 }
172
173 void thread_info_cache_init(void)
174 {
175         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
176                                               THREAD_SIZE, 0, NULL);
177         BUG_ON(thread_info_cache == NULL);
178 }
179 # endif
180 #endif
181
182 /* SLAB cache for signal_struct structures (tsk->signal) */
183 static struct kmem_cache *signal_cachep;
184
185 /* SLAB cache for sighand_struct structures (tsk->sighand) */
186 struct kmem_cache *sighand_cachep;
187
188 /* SLAB cache for files_struct structures (tsk->files) */
189 struct kmem_cache *files_cachep;
190
191 /* SLAB cache for fs_struct structures (tsk->fs) */
192 struct kmem_cache *fs_cachep;
193
194 /* SLAB cache for vm_area_struct structures */
195 struct kmem_cache *vm_area_cachep;
196
197 /* SLAB cache for mm_struct structures (tsk->mm) */
198 static struct kmem_cache *mm_cachep;
199
200 static void account_kernel_stack(struct thread_info *ti, int account)
201 {
202         struct zone *zone = page_zone(virt_to_page(ti));
203
204         mod_zone_page_state(zone, NR_KERNEL_STACK, account);
205 }
206
207 void free_task(struct task_struct *tsk)
208 {
209         account_kernel_stack(tsk->stack, -1);
210         arch_release_thread_info(tsk->stack);
211         free_thread_info(tsk->stack);
212         rt_mutex_debug_task_free(tsk);
213         ftrace_graph_exit_task(tsk);
214         put_seccomp_filter(tsk);
215         arch_release_task_struct(tsk);
216         free_task_struct(tsk);
217 }
218 EXPORT_SYMBOL(free_task);
219
220 static inline void free_signal_struct(struct signal_struct *sig)
221 {
222         taskstats_tgid_free(sig);
223         sched_autogroup_exit(sig);
224         kmem_cache_free(signal_cachep, sig);
225 }
226
227 static inline void put_signal_struct(struct signal_struct *sig)
228 {
229         if (atomic_dec_and_test(&sig->sigcnt))
230                 free_signal_struct(sig);
231 }
232
233 void __put_task_struct(struct task_struct *tsk)
234 {
235         WARN_ON(!tsk->exit_state);
236         WARN_ON(atomic_read(&tsk->usage));
237         WARN_ON(tsk == current);
238
239         security_task_free(tsk);
240         exit_creds(tsk);
241         delayacct_tsk_free(tsk);
242         put_signal_struct(tsk->signal);
243
244         if (!profile_handoff_task(tsk))
245                 free_task(tsk);
246 }
247 EXPORT_SYMBOL_GPL(__put_task_struct);
248
249 void __init __weak arch_task_cache_init(void) { }
250
251 void __init fork_init(unsigned long mempages)
252 {
253 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
254 #ifndef ARCH_MIN_TASKALIGN
255 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
256 #endif
257         /* create a slab on which task_structs can be allocated */
258         task_struct_cachep =
259                 kmem_cache_create("task_struct", sizeof(struct task_struct),
260                         ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
261 #endif
262
263         /* do the arch specific task caches init */
264         arch_task_cache_init();
265
266         /*
267          * The default maximum number of threads is set to a safe
268          * value: the thread structures can take up at most half
269          * of memory.
270          */
271         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
272
273         /*
274          * we need to allow at least 20 threads to boot a system
275          */
276         if (max_threads < 20)
277                 max_threads = 20;
278
279         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
280         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
281         init_task.signal->rlim[RLIMIT_SIGPENDING] =
282                 init_task.signal->rlim[RLIMIT_NPROC];
283 }
284
285 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
286                                                struct task_struct *src)
287 {
288         *dst = *src;
289         return 0;
290 }
291
292 static struct task_struct *dup_task_struct(struct task_struct *orig)
293 {
294         struct task_struct *tsk;
295         struct thread_info *ti;
296         unsigned long *stackend;
297         int node = tsk_fork_get_node(orig);
298         int err;
299
300         tsk = alloc_task_struct_node(node);
301         if (!tsk)
302                 return NULL;
303
304         ti = alloc_thread_info_node(tsk, node);
305         if (!ti)
306                 goto free_tsk;
307
308         err = arch_dup_task_struct(tsk, orig);
309         if (err)
310                 goto free_ti;
311
312         tsk->stack = ti;
313
314         setup_thread_stack(tsk, orig);
315         clear_user_return_notifier(tsk);
316         clear_tsk_need_resched(tsk);
317         stackend = end_of_stack(tsk);
318         *stackend = STACK_END_MAGIC;    /* for overflow detection */
319
320 #ifdef CONFIG_CC_STACKPROTECTOR
321         tsk->stack_canary = get_random_int();
322 #endif
323
324         /*
325          * One for us, one for whoever does the "release_task()" (usually
326          * parent)
327          */
328         atomic_set(&tsk->usage, 2);
329 #ifdef CONFIG_BLK_DEV_IO_TRACE
330         tsk->btrace_seq = 0;
331 #endif
332         tsk->splice_pipe = NULL;
333         tsk->task_frag.page = NULL;
334
335         account_kernel_stack(ti, 1);
336
337         return tsk;
338
339 free_ti:
340         free_thread_info(ti);
341 free_tsk:
342         free_task_struct(tsk);
343         return NULL;
344 }
345
346 #ifdef CONFIG_MMU
347 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
348 {
349         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
350         struct rb_node **rb_link, *rb_parent;
351         int retval;
352         unsigned long charge;
353         struct mempolicy *pol;
354
355         uprobe_start_dup_mmap();
356         down_write(&oldmm->mmap_sem);
357         flush_cache_dup_mm(oldmm);
358         uprobe_dup_mmap(oldmm, mm);
359         /*
360          * Not linked in yet - no deadlock potential:
361          */
362         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
363
364         mm->locked_vm = 0;
365         mm->mmap = NULL;
366         mm->mmap_cache = NULL;
367         mm->free_area_cache = oldmm->mmap_base;
368         mm->cached_hole_size = ~0UL;
369         mm->map_count = 0;
370         cpumask_clear(mm_cpumask(mm));
371         mm->mm_rb = RB_ROOT;
372         rb_link = &mm->mm_rb.rb_node;
373         rb_parent = NULL;
374         pprev = &mm->mmap;
375         retval = ksm_fork(mm, oldmm);
376         if (retval)
377                 goto out;
378         retval = khugepaged_fork(mm, oldmm);
379         if (retval)
380                 goto out;
381
382         prev = NULL;
383         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
384                 struct file *file;
385
386                 if (mpnt->vm_flags & VM_DONTCOPY) {
387                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
388                                                         -vma_pages(mpnt));
389                         continue;
390                 }
391                 charge = 0;
392                 if (mpnt->vm_flags & VM_ACCOUNT) {
393                         unsigned long len = vma_pages(mpnt);
394
395                         if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
396                                 goto fail_nomem;
397                         charge = len;
398                 }
399                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
400                 if (!tmp)
401                         goto fail_nomem;
402                 *tmp = *mpnt;
403                 INIT_LIST_HEAD(&tmp->anon_vma_chain);
404                 pol = mpol_dup(vma_policy(mpnt));
405                 retval = PTR_ERR(pol);
406                 if (IS_ERR(pol))
407                         goto fail_nomem_policy;
408                 vma_set_policy(tmp, pol);
409                 tmp->vm_mm = mm;
410                 if (anon_vma_fork(tmp, mpnt))
411                         goto fail_nomem_anon_vma_fork;
412                 tmp->vm_flags &= ~VM_LOCKED;
413                 tmp->vm_next = tmp->vm_prev = NULL;
414                 file = tmp->vm_file;
415                 if (file) {
416                         struct inode *inode = file->f_path.dentry->d_inode;
417                         struct address_space *mapping = file->f_mapping;
418
419                         get_file(file);
420                         if (tmp->vm_flags & VM_DENYWRITE)
421                                 atomic_dec(&inode->i_writecount);
422                         mutex_lock(&mapping->i_mmap_mutex);
423                         if (tmp->vm_flags & VM_SHARED)
424                                 mapping->i_mmap_writable++;
425                         flush_dcache_mmap_lock(mapping);
426                         /* insert tmp into the share list, just after mpnt */
427                         if (unlikely(tmp->vm_flags & VM_NONLINEAR))
428                                 vma_nonlinear_insert(tmp,
429                                                 &mapping->i_mmap_nonlinear);
430                         else
431                                 vma_interval_tree_insert_after(tmp, mpnt,
432                                                         &mapping->i_mmap);
433                         flush_dcache_mmap_unlock(mapping);
434                         mutex_unlock(&mapping->i_mmap_mutex);
435                 }
436
437                 /*
438                  * Clear hugetlb-related page reserves for children. This only
439                  * affects MAP_PRIVATE mappings. Faults generated by the child
440                  * are not guaranteed to succeed, even if read-only
441                  */
442                 if (is_vm_hugetlb_page(tmp))
443                         reset_vma_resv_huge_pages(tmp);
444
445                 /*
446                  * Link in the new vma and copy the page table entries.
447                  */
448                 *pprev = tmp;
449                 pprev = &tmp->vm_next;
450                 tmp->vm_prev = prev;
451                 prev = tmp;
452
453                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
454                 rb_link = &tmp->vm_rb.rb_right;
455                 rb_parent = &tmp->vm_rb;
456
457                 mm->map_count++;
458                 retval = copy_page_range(mm, oldmm, mpnt);
459
460                 if (tmp->vm_ops && tmp->vm_ops->open)
461                         tmp->vm_ops->open(tmp);
462
463                 if (retval)
464                         goto out;
465         }
466         /* a new mm has just been created */
467         arch_dup_mmap(oldmm, mm);
468         retval = 0;
469 out:
470         up_write(&mm->mmap_sem);
471         flush_tlb_mm(oldmm);
472         up_write(&oldmm->mmap_sem);
473         uprobe_end_dup_mmap();
474         return retval;
475 fail_nomem_anon_vma_fork:
476         mpol_put(pol);
477 fail_nomem_policy:
478         kmem_cache_free(vm_area_cachep, tmp);
479 fail_nomem:
480         retval = -ENOMEM;
481         vm_unacct_memory(charge);
482         goto out;
483 }
484
485 static inline int mm_alloc_pgd(struct mm_struct *mm)
486 {
487         mm->pgd = pgd_alloc(mm);
488         if (unlikely(!mm->pgd))
489                 return -ENOMEM;
490         return 0;
491 }
492
493 static inline void mm_free_pgd(struct mm_struct *mm)
494 {
495         pgd_free(mm, mm->pgd);
496 }
497 #else
498 #define dup_mmap(mm, oldmm)     (0)
499 #define mm_alloc_pgd(mm)        (0)
500 #define mm_free_pgd(mm)
501 #endif /* CONFIG_MMU */
502
503 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
504
505 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
506 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
507
508 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
509
510 static int __init coredump_filter_setup(char *s)
511 {
512         default_dump_filter =
513                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
514                 MMF_DUMP_FILTER_MASK;
515         return 1;
516 }
517
518 __setup("coredump_filter=", coredump_filter_setup);
519
520 #include <linux/init_task.h>
521
522 static void mm_init_aio(struct mm_struct *mm)
523 {
524 #ifdef CONFIG_AIO
525         spin_lock_init(&mm->ioctx_lock);
526         INIT_HLIST_HEAD(&mm->ioctx_list);
527 #endif
528 }
529
530 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
531 {
532         atomic_set(&mm->mm_users, 1);
533         atomic_set(&mm->mm_count, 1);
534         init_rwsem(&mm->mmap_sem);
535         INIT_LIST_HEAD(&mm->mmlist);
536         mm->flags = (current->mm) ?
537                 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
538         mm->core_state = NULL;
539         mm->nr_ptes = 0;
540         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
541         spin_lock_init(&mm->page_table_lock);
542         mm->free_area_cache = TASK_UNMAPPED_BASE;
543         mm->cached_hole_size = ~0UL;
544         mm_init_aio(mm);
545         mm_init_owner(mm, p);
546
547         if (likely(!mm_alloc_pgd(mm))) {
548                 mm->def_flags = 0;
549                 mmu_notifier_mm_init(mm);
550                 return mm;
551         }
552
553         free_mm(mm);
554         return NULL;
555 }
556
557 static void check_mm(struct mm_struct *mm)
558 {
559         int i;
560
561         for (i = 0; i < NR_MM_COUNTERS; i++) {
562                 long x = atomic_long_read(&mm->rss_stat.count[i]);
563
564                 if (unlikely(x))
565                         printk(KERN_ALERT "BUG: Bad rss-counter state "
566                                           "mm:%p idx:%d val:%ld\n", mm, i, x);
567         }
568
569 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
570         VM_BUG_ON(mm->pmd_huge_pte);
571 #endif
572 }
573
574 /*
575  * Allocate and initialize an mm_struct.
576  */
577 struct mm_struct *mm_alloc(void)
578 {
579         struct mm_struct *mm;
580
581         mm = allocate_mm();
582         if (!mm)
583                 return NULL;
584
585         memset(mm, 0, sizeof(*mm));
586         mm_init_cpumask(mm);
587         return mm_init(mm, current);
588 }
589
590 /*
591  * Called when the last reference to the mm
592  * is dropped: either by a lazy thread or by
593  * mmput. Free the page directory and the mm.
594  */
595 void __mmdrop(struct mm_struct *mm)
596 {
597         BUG_ON(mm == &init_mm);
598         mm_free_pgd(mm);
599         destroy_context(mm);
600         mmu_notifier_mm_destroy(mm);
601         check_mm(mm);
602         free_mm(mm);
603 }
604 EXPORT_SYMBOL_GPL(__mmdrop);
605
606 /*
607  * Decrement the use count and release all resources for an mm.
608  */
609 void mmput(struct mm_struct *mm)
610 {
611         might_sleep();
612
613         if (atomic_dec_and_test(&mm->mm_users)) {
614                 uprobe_clear_state(mm);
615                 exit_aio(mm);
616                 ksm_exit(mm);
617                 khugepaged_exit(mm); /* must run before exit_mmap */
618                 exit_mmap(mm);
619                 set_mm_exe_file(mm, NULL);
620                 if (!list_empty(&mm->mmlist)) {
621                         spin_lock(&mmlist_lock);
622                         list_del(&mm->mmlist);
623                         spin_unlock(&mmlist_lock);
624                 }
625                 if (mm->binfmt)
626                         module_put(mm->binfmt->module);
627                 mmdrop(mm);
628         }
629 }
630 EXPORT_SYMBOL_GPL(mmput);
631
632 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
633 {
634         if (new_exe_file)
635                 get_file(new_exe_file);
636         if (mm->exe_file)
637                 fput(mm->exe_file);
638         mm->exe_file = new_exe_file;
639 }
640
641 struct file *get_mm_exe_file(struct mm_struct *mm)
642 {
643         struct file *exe_file;
644
645         /* We need mmap_sem to protect against races with removal of exe_file */
646         down_read(&mm->mmap_sem);
647         exe_file = mm->exe_file;
648         if (exe_file)
649                 get_file(exe_file);
650         up_read(&mm->mmap_sem);
651         return exe_file;
652 }
653
654 static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
655 {
656         /* It's safe to write the exe_file pointer without exe_file_lock because
657          * this is called during fork when the task is not yet in /proc */
658         newmm->exe_file = get_mm_exe_file(oldmm);
659 }
660
661 /**
662  * get_task_mm - acquire a reference to the task's mm
663  *
664  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
665  * this kernel workthread has transiently adopted a user mm with use_mm,
666  * to do its AIO) is not set and if so returns a reference to it, after
667  * bumping up the use count.  User must release the mm via mmput()
668  * after use.  Typically used by /proc and ptrace.
669  */
670 struct mm_struct *get_task_mm(struct task_struct *task)
671 {
672         struct mm_struct *mm;
673
674         task_lock(task);
675         mm = task->mm;
676         if (mm) {
677                 if (task->flags & PF_KTHREAD)
678                         mm = NULL;
679                 else
680                         atomic_inc(&mm->mm_users);
681         }
682         task_unlock(task);
683         return mm;
684 }
685 EXPORT_SYMBOL_GPL(get_task_mm);
686
687 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
688 {
689         struct mm_struct *mm;
690         int err;
691
692         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
693         if (err)
694                 return ERR_PTR(err);
695
696         mm = get_task_mm(task);
697         if (mm && mm != current->mm &&
698                         !ptrace_may_access(task, mode)) {
699                 mmput(mm);
700                 mm = ERR_PTR(-EACCES);
701         }
702         mutex_unlock(&task->signal->cred_guard_mutex);
703
704         return mm;
705 }
706
707 static void complete_vfork_done(struct task_struct *tsk)
708 {
709         struct completion *vfork;
710
711         task_lock(tsk);
712         vfork = tsk->vfork_done;
713         if (likely(vfork)) {
714                 tsk->vfork_done = NULL;
715                 complete(vfork);
716         }
717         task_unlock(tsk);
718 }
719
720 static int wait_for_vfork_done(struct task_struct *child,
721                                 struct completion *vfork)
722 {
723         int killed;
724
725         freezer_do_not_count();
726         killed = wait_for_completion_killable(vfork);
727         freezer_count();
728
729         if (killed) {
730                 task_lock(child);
731                 child->vfork_done = NULL;
732                 task_unlock(child);
733         }
734
735         put_task_struct(child);
736         return killed;
737 }
738
739 /* Please note the differences between mmput and mm_release.
740  * mmput is called whenever we stop holding onto a mm_struct,
741  * error success whatever.
742  *
743  * mm_release is called after a mm_struct has been removed
744  * from the current process.
745  *
746  * This difference is important for error handling, when we
747  * only half set up a mm_struct for a new process and need to restore
748  * the old one.  Because we mmput the new mm_struct before
749  * restoring the old one. . .
750  * Eric Biederman 10 January 1998
751  */
752 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
753 {
754         /* Get rid of any futexes when releasing the mm */
755 #ifdef CONFIG_FUTEX
756         if (unlikely(tsk->robust_list)) {
757                 exit_robust_list(tsk);
758                 tsk->robust_list = NULL;
759         }
760 #ifdef CONFIG_COMPAT
761         if (unlikely(tsk->compat_robust_list)) {
762                 compat_exit_robust_list(tsk);
763                 tsk->compat_robust_list = NULL;
764         }
765 #endif
766         if (unlikely(!list_empty(&tsk->pi_state_list)))
767                 exit_pi_state_list(tsk);
768 #endif
769
770         uprobe_free_utask(tsk);
771
772         /* Get rid of any cached register state */
773         deactivate_mm(tsk, mm);
774
775         /*
776          * If we're exiting normally, clear a user-space tid field if
777          * requested.  We leave this alone when dying by signal, to leave
778          * the value intact in a core dump, and to save the unnecessary
779          * trouble, say, a killed vfork parent shouldn't touch this mm.
780          * Userland only wants this done for a sys_exit.
781          */
782         if (tsk->clear_child_tid) {
783                 if (!(tsk->flags & PF_SIGNALED) &&
784                     atomic_read(&mm->mm_users) > 1) {
785                         /*
786                          * We don't check the error code - if userspace has
787                          * not set up a proper pointer then tough luck.
788                          */
789                         put_user(0, tsk->clear_child_tid);
790                         sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
791                                         1, NULL, NULL, 0);
792                 }
793                 tsk->clear_child_tid = NULL;
794         }
795
796         /*
797          * All done, finally we can wake up parent and return this mm to him.
798          * Also kthread_stop() uses this completion for synchronization.
799          */
800         if (tsk->vfork_done)
801                 complete_vfork_done(tsk);
802 }
803
804 /*
805  * Allocate a new mm structure and copy contents from the
806  * mm structure of the passed in task structure.
807  */
808 struct mm_struct *dup_mm(struct task_struct *tsk)
809 {
810         struct mm_struct *mm, *oldmm = current->mm;
811         int err;
812
813         if (!oldmm)
814                 return NULL;
815
816         mm = allocate_mm();
817         if (!mm)
818                 goto fail_nomem;
819
820         memcpy(mm, oldmm, sizeof(*mm));
821         mm_init_cpumask(mm);
822
823 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
824         mm->pmd_huge_pte = NULL;
825 #endif
826         if (!mm_init(mm, tsk))
827                 goto fail_nomem;
828
829         if (init_new_context(tsk, mm))
830                 goto fail_nocontext;
831
832         dup_mm_exe_file(oldmm, mm);
833
834         err = dup_mmap(mm, oldmm);
835         if (err)
836                 goto free_pt;
837
838         mm->hiwater_rss = get_mm_rss(mm);
839         mm->hiwater_vm = mm->total_vm;
840
841         if (mm->binfmt && !try_module_get(mm->binfmt->module))
842                 goto free_pt;
843
844         return mm;
845
846 free_pt:
847         /* don't put binfmt in mmput, we haven't got module yet */
848         mm->binfmt = NULL;
849         mmput(mm);
850
851 fail_nomem:
852         return NULL;
853
854 fail_nocontext:
855         /*
856          * If init_new_context() failed, we cannot use mmput() to free the mm
857          * because it calls destroy_context()
858          */
859         mm_free_pgd(mm);
860         free_mm(mm);
861         return NULL;
862 }
863
864 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
865 {
866         struct mm_struct *mm, *oldmm;
867         int retval;
868
869         tsk->min_flt = tsk->maj_flt = 0;
870         tsk->nvcsw = tsk->nivcsw = 0;
871 #ifdef CONFIG_DETECT_HUNG_TASK
872         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
873 #endif
874
875         tsk->mm = NULL;
876         tsk->active_mm = NULL;
877
878         /*
879          * Are we cloning a kernel thread?
880          *
881          * We need to steal a active VM for that..
882          */
883         oldmm = current->mm;
884         if (!oldmm)
885                 return 0;
886
887         if (clone_flags & CLONE_VM) {
888                 atomic_inc(&oldmm->mm_users);
889                 mm = oldmm;
890                 goto good_mm;
891         }
892
893         retval = -ENOMEM;
894         mm = dup_mm(tsk);
895         if (!mm)
896                 goto fail_nomem;
897
898 good_mm:
899         tsk->mm = mm;
900         tsk->active_mm = mm;
901         return 0;
902
903 fail_nomem:
904         return retval;
905 }
906
907 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
908 {
909         struct fs_struct *fs = current->fs;
910         if (clone_flags & CLONE_FS) {
911                 /* tsk->fs is already what we want */
912                 spin_lock(&fs->lock);
913                 if (fs->in_exec) {
914                         spin_unlock(&fs->lock);
915                         return -EAGAIN;
916                 }
917                 fs->users++;
918                 spin_unlock(&fs->lock);
919                 return 0;
920         }
921         tsk->fs = copy_fs_struct(fs);
922         if (!tsk->fs)
923                 return -ENOMEM;
924         return 0;
925 }
926
927 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
928 {
929         struct files_struct *oldf, *newf;
930         int error = 0;
931
932         /*
933          * A background process may not have any files ...
934          */
935         oldf = current->files;
936         if (!oldf)
937                 goto out;
938
939         if (clone_flags & CLONE_FILES) {
940                 atomic_inc(&oldf->count);
941                 goto out;
942         }
943
944         newf = dup_fd(oldf, &error);
945         if (!newf)
946                 goto out;
947
948         tsk->files = newf;
949         error = 0;
950 out:
951         return error;
952 }
953
954 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
955 {
956 #ifdef CONFIG_BLOCK
957         struct io_context *ioc = current->io_context;
958         struct io_context *new_ioc;
959
960         if (!ioc)
961                 return 0;
962         /*
963          * Share io context with parent, if CLONE_IO is set
964          */
965         if (clone_flags & CLONE_IO) {
966                 ioc_task_link(ioc);
967                 tsk->io_context = ioc;
968         } else if (ioprio_valid(ioc->ioprio)) {
969                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
970                 if (unlikely(!new_ioc))
971                         return -ENOMEM;
972
973                 new_ioc->ioprio = ioc->ioprio;
974                 put_io_context(new_ioc);
975         }
976 #endif
977         return 0;
978 }
979
980 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
981 {
982         struct sighand_struct *sig;
983
984         if (clone_flags & CLONE_SIGHAND) {
985                 atomic_inc(&current->sighand->count);
986                 return 0;
987         }
988         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
989         rcu_assign_pointer(tsk->sighand, sig);
990         if (!sig)
991                 return -ENOMEM;
992         atomic_set(&sig->count, 1);
993         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
994         return 0;
995 }
996
997 void __cleanup_sighand(struct sighand_struct *sighand)
998 {
999         if (atomic_dec_and_test(&sighand->count)) {
1000                 signalfd_cleanup(sighand);
1001                 kmem_cache_free(sighand_cachep, sighand);
1002         }
1003 }
1004
1005
1006 /*
1007  * Initialize POSIX timer handling for a thread group.
1008  */
1009 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1010 {
1011         unsigned long cpu_limit;
1012
1013         /* Thread group counters. */
1014         thread_group_cputime_init(sig);
1015
1016         cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1017         if (cpu_limit != RLIM_INFINITY) {
1018                 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1019                 sig->cputimer.running = 1;
1020         }
1021
1022         /* The timer lists. */
1023         INIT_LIST_HEAD(&sig->cpu_timers[0]);
1024         INIT_LIST_HEAD(&sig->cpu_timers[1]);
1025         INIT_LIST_HEAD(&sig->cpu_timers[2]);
1026 }
1027
1028 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1029 {
1030         struct signal_struct *sig;
1031
1032         if (clone_flags & CLONE_THREAD)
1033                 return 0;
1034
1035         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1036         tsk->signal = sig;
1037         if (!sig)
1038                 return -ENOMEM;
1039
1040         sig->nr_threads = 1;
1041         atomic_set(&sig->live, 1);
1042         atomic_set(&sig->sigcnt, 1);
1043         init_waitqueue_head(&sig->wait_chldexit);
1044         if (clone_flags & CLONE_NEWPID)
1045                 sig->flags |= SIGNAL_UNKILLABLE;
1046         sig->curr_target = tsk;
1047         init_sigpending(&sig->shared_pending);
1048         INIT_LIST_HEAD(&sig->posix_timers);
1049
1050         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1051         sig->real_timer.function = it_real_fn;
1052
1053         task_lock(current->group_leader);
1054         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1055         task_unlock(current->group_leader);
1056
1057         posix_cpu_timers_init_group(sig);
1058
1059         tty_audit_fork(sig);
1060         sched_autogroup_fork(sig);
1061
1062 #ifdef CONFIG_CGROUPS
1063         init_rwsem(&sig->group_rwsem);
1064 #endif
1065
1066         sig->oom_score_adj = current->signal->oom_score_adj;
1067         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1068
1069         sig->has_child_subreaper = current->signal->has_child_subreaper ||
1070                                    current->signal->is_child_subreaper;
1071
1072         mutex_init(&sig->cred_guard_mutex);
1073
1074         return 0;
1075 }
1076
1077 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1078 {
1079         unsigned long new_flags = p->flags;
1080
1081         new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1082         new_flags |= PF_FORKNOEXEC;
1083         p->flags = new_flags;
1084 }
1085
1086 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1087 {
1088         current->clear_child_tid = tidptr;
1089
1090         return task_pid_vnr(current);
1091 }
1092
1093 static void rt_mutex_init_task(struct task_struct *p)
1094 {
1095         raw_spin_lock_init(&p->pi_lock);
1096 #ifdef CONFIG_RT_MUTEXES
1097         plist_head_init(&p->pi_waiters);
1098         p->pi_blocked_on = NULL;
1099 #endif
1100 }
1101
1102 #ifdef CONFIG_MM_OWNER
1103 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1104 {
1105         mm->owner = p;
1106 }
1107 #endif /* CONFIG_MM_OWNER */
1108
1109 /*
1110  * Initialize POSIX timer handling for a single task.
1111  */
1112 static void posix_cpu_timers_init(struct task_struct *tsk)
1113 {
1114         tsk->cputime_expires.prof_exp = 0;
1115         tsk->cputime_expires.virt_exp = 0;
1116         tsk->cputime_expires.sched_exp = 0;
1117         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1118         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1119         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1120 }
1121
1122 /*
1123  * This creates a new process as a copy of the old one,
1124  * but does not actually start it yet.
1125  *
1126  * It copies the registers, and all the appropriate
1127  * parts of the process environment (as per the clone
1128  * flags). The actual kick-off is left to the caller.
1129  */
1130 static struct task_struct *copy_process(unsigned long clone_flags,
1131                                         unsigned long stack_start,
1132                                         struct pt_regs *regs,
1133                                         unsigned long stack_size,
1134                                         int __user *child_tidptr,
1135                                         struct pid *pid,
1136                                         int trace)
1137 {
1138         int retval;
1139         struct task_struct *p;
1140         int cgroup_callbacks_done = 0;
1141
1142         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1143                 return ERR_PTR(-EINVAL);
1144
1145         /*
1146          * Thread groups must share signals as well, and detached threads
1147          * can only be started up within the thread group.
1148          */
1149         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1150                 return ERR_PTR(-EINVAL);
1151
1152         /*
1153          * Shared signal handlers imply shared VM. By way of the above,
1154          * thread groups also imply shared VM. Blocking this case allows
1155          * for various simplifications in other code.
1156          */
1157         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1158                 return ERR_PTR(-EINVAL);
1159
1160         /*
1161          * Siblings of global init remain as zombies on exit since they are
1162          * not reaped by their parent (swapper). To solve this and to avoid
1163          * multi-rooted process trees, prevent global and container-inits
1164          * from creating siblings.
1165          */
1166         if ((clone_flags & CLONE_PARENT) &&
1167                                 current->signal->flags & SIGNAL_UNKILLABLE)
1168                 return ERR_PTR(-EINVAL);
1169
1170         retval = security_task_create(clone_flags);
1171         if (retval)
1172                 goto fork_out;
1173
1174         retval = -ENOMEM;
1175         p = dup_task_struct(current);
1176         if (!p)
1177                 goto fork_out;
1178
1179         ftrace_graph_init_task(p);
1180         get_seccomp_filter(p);
1181
1182         rt_mutex_init_task(p);
1183
1184 #ifdef CONFIG_PROVE_LOCKING
1185         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1186         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1187 #endif
1188         retval = -EAGAIN;
1189         if (atomic_read(&p->real_cred->user->processes) >=
1190                         task_rlimit(p, RLIMIT_NPROC)) {
1191                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1192                     p->real_cred->user != INIT_USER)
1193                         goto bad_fork_free;
1194         }
1195         current->flags &= ~PF_NPROC_EXCEEDED;
1196
1197         retval = copy_creds(p, clone_flags);
1198         if (retval < 0)
1199                 goto bad_fork_free;
1200
1201         /*
1202          * If multiple threads are within copy_process(), then this check
1203          * triggers too late. This doesn't hurt, the check is only there
1204          * to stop root fork bombs.
1205          */
1206         retval = -EAGAIN;
1207         if (nr_threads >= max_threads)
1208                 goto bad_fork_cleanup_count;
1209
1210         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1211                 goto bad_fork_cleanup_count;
1212
1213         p->did_exec = 0;
1214         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1215         copy_flags(clone_flags, p);
1216         INIT_LIST_HEAD(&p->children);
1217         INIT_LIST_HEAD(&p->sibling);
1218         rcu_copy_process(p);
1219         p->vfork_done = NULL;
1220         spin_lock_init(&p->alloc_lock);
1221
1222         init_sigpending(&p->pending);
1223
1224         p->utime = p->stime = p->gtime = 0;
1225         p->utimescaled = p->stimescaled = 0;
1226 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1227         p->prev_cputime.utime = p->prev_cputime.stime = 0;
1228 #endif
1229 #if defined(SPLIT_RSS_COUNTING)
1230         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1231 #endif
1232
1233         p->default_timer_slack_ns = current->timer_slack_ns;
1234
1235         task_io_accounting_init(&p->ioac);
1236         acct_clear_integrals(p);
1237
1238         posix_cpu_timers_init(p);
1239
1240         do_posix_clock_monotonic_gettime(&p->start_time);
1241         p->real_start_time = p->start_time;
1242         monotonic_to_bootbased(&p->real_start_time);
1243         p->io_context = NULL;
1244         p->audit_context = NULL;
1245         if (clone_flags & CLONE_THREAD)
1246                 threadgroup_change_begin(current);
1247         cgroup_fork(p);
1248 #ifdef CONFIG_NUMA
1249         p->mempolicy = mpol_dup(p->mempolicy);
1250         if (IS_ERR(p->mempolicy)) {
1251                 retval = PTR_ERR(p->mempolicy);
1252                 p->mempolicy = NULL;
1253                 goto bad_fork_cleanup_cgroup;
1254         }
1255         mpol_fix_fork_child_flag(p);
1256 #endif
1257 #ifdef CONFIG_CPUSETS
1258         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1259         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1260         seqcount_init(&p->mems_allowed_seq);
1261 #endif
1262 #ifdef CONFIG_TRACE_IRQFLAGS
1263         p->irq_events = 0;
1264         p->hardirqs_enabled = 0;
1265         p->hardirq_enable_ip = 0;
1266         p->hardirq_enable_event = 0;
1267         p->hardirq_disable_ip = _THIS_IP_;
1268         p->hardirq_disable_event = 0;
1269         p->softirqs_enabled = 1;
1270         p->softirq_enable_ip = _THIS_IP_;
1271         p->softirq_enable_event = 0;
1272         p->softirq_disable_ip = 0;
1273         p->softirq_disable_event = 0;
1274         p->hardirq_context = 0;
1275         p->softirq_context = 0;
1276 #endif
1277 #ifdef CONFIG_LOCKDEP
1278         p->lockdep_depth = 0; /* no locks held yet */
1279         p->curr_chain_key = 0;
1280         p->lockdep_recursion = 0;
1281 #endif
1282
1283 #ifdef CONFIG_DEBUG_MUTEXES
1284         p->blocked_on = NULL; /* not blocked yet */
1285 #endif
1286 #ifdef CONFIG_MEMCG
1287         p->memcg_batch.do_batch = 0;
1288         p->memcg_batch.memcg = NULL;
1289 #endif
1290
1291         /* Perform scheduler related setup. Assign this task to a CPU. */
1292         sched_fork(p);
1293
1294         retval = perf_event_init_task(p);
1295         if (retval)
1296                 goto bad_fork_cleanup_policy;
1297         retval = audit_alloc(p);
1298         if (retval)
1299                 goto bad_fork_cleanup_policy;
1300         /* copy all the process information */
1301         retval = copy_semundo(clone_flags, p);
1302         if (retval)
1303                 goto bad_fork_cleanup_audit;
1304         retval = copy_files(clone_flags, p);
1305         if (retval)
1306                 goto bad_fork_cleanup_semundo;
1307         retval = copy_fs(clone_flags, p);
1308         if (retval)
1309                 goto bad_fork_cleanup_files;
1310         retval = copy_sighand(clone_flags, p);
1311         if (retval)
1312                 goto bad_fork_cleanup_fs;
1313         retval = copy_signal(clone_flags, p);
1314         if (retval)
1315                 goto bad_fork_cleanup_sighand;
1316         retval = copy_mm(clone_flags, p);
1317         if (retval)
1318                 goto bad_fork_cleanup_signal;
1319         retval = copy_namespaces(clone_flags, p);
1320         if (retval)
1321                 goto bad_fork_cleanup_mm;
1322         retval = copy_io(clone_flags, p);
1323         if (retval)
1324                 goto bad_fork_cleanup_namespaces;
1325         retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1326         if (retval)
1327                 goto bad_fork_cleanup_io;
1328
1329         if (pid != &init_struct_pid) {
1330                 retval = -ENOMEM;
1331                 pid = alloc_pid(p->nsproxy->pid_ns);
1332                 if (!pid)
1333                         goto bad_fork_cleanup_io;
1334         }
1335
1336         p->pid = pid_nr(pid);
1337         p->tgid = p->pid;
1338         if (clone_flags & CLONE_THREAD)
1339                 p->tgid = current->tgid;
1340
1341         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1342         /*
1343          * Clear TID on mm_release()?
1344          */
1345         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1346 #ifdef CONFIG_BLOCK
1347         p->plug = NULL;
1348 #endif
1349 #ifdef CONFIG_FUTEX
1350         p->robust_list = NULL;
1351 #ifdef CONFIG_COMPAT
1352         p->compat_robust_list = NULL;
1353 #endif
1354         INIT_LIST_HEAD(&p->pi_state_list);
1355         p->pi_state_cache = NULL;
1356 #endif
1357         uprobe_copy_process(p);
1358         /*
1359          * sigaltstack should be cleared when sharing the same VM
1360          */
1361         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1362                 p->sas_ss_sp = p->sas_ss_size = 0;
1363
1364         /*
1365          * Syscall tracing and stepping should be turned off in the
1366          * child regardless of CLONE_PTRACE.
1367          */
1368         user_disable_single_step(p);
1369         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1370 #ifdef TIF_SYSCALL_EMU
1371         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1372 #endif
1373         clear_all_latency_tracing(p);
1374
1375         /* ok, now we should be set up.. */
1376         if (clone_flags & CLONE_THREAD)
1377                 p->exit_signal = -1;
1378         else if (clone_flags & CLONE_PARENT)
1379                 p->exit_signal = current->group_leader->exit_signal;
1380         else
1381                 p->exit_signal = (clone_flags & CSIGNAL);
1382
1383         p->pdeath_signal = 0;
1384         p->exit_state = 0;
1385
1386         p->nr_dirtied = 0;
1387         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1388         p->dirty_paused_when = 0;
1389
1390         /*
1391          * Ok, make it visible to the rest of the system.
1392          * We dont wake it up yet.
1393          */
1394         p->group_leader = p;
1395         INIT_LIST_HEAD(&p->thread_group);
1396         p->task_works = NULL;
1397
1398         /* Now that the task is set up, run cgroup callbacks if
1399          * necessary. We need to run them before the task is visible
1400          * on the tasklist. */
1401         cgroup_fork_callbacks(p);
1402         cgroup_callbacks_done = 1;
1403
1404         /* Need tasklist lock for parent etc handling! */
1405         write_lock_irq(&tasklist_lock);
1406
1407         /* CLONE_PARENT re-uses the old parent */
1408         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1409                 p->real_parent = current->real_parent;
1410                 p->parent_exec_id = current->parent_exec_id;
1411         } else {
1412                 p->real_parent = current;
1413                 p->parent_exec_id = current->self_exec_id;
1414         }
1415
1416         spin_lock(&current->sighand->siglock);
1417
1418         /*
1419          * Process group and session signals need to be delivered to just the
1420          * parent before the fork or both the parent and the child after the
1421          * fork. Restart if a signal comes in before we add the new process to
1422          * it's process group.
1423          * A fatal signal pending means that current will exit, so the new
1424          * thread can't slip out of an OOM kill (or normal SIGKILL).
1425         */
1426         recalc_sigpending();
1427         if (signal_pending(current)) {
1428                 spin_unlock(&current->sighand->siglock);
1429                 write_unlock_irq(&tasklist_lock);
1430                 retval = -ERESTARTNOINTR;
1431                 goto bad_fork_free_pid;
1432         }
1433
1434         if (clone_flags & CLONE_THREAD) {
1435                 current->signal->nr_threads++;
1436                 atomic_inc(&current->signal->live);
1437                 atomic_inc(&current->signal->sigcnt);
1438                 p->group_leader = current->group_leader;
1439                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1440         }
1441
1442         if (likely(p->pid)) {
1443                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1444
1445                 if (thread_group_leader(p)) {
1446                         if (is_child_reaper(pid))
1447                                 p->nsproxy->pid_ns->child_reaper = p;
1448
1449                         p->signal->leader_pid = pid;
1450                         p->signal->tty = tty_kref_get(current->signal->tty);
1451                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1452                         attach_pid(p, PIDTYPE_SID, task_session(current));
1453                         list_add_tail(&p->sibling, &p->real_parent->children);
1454                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1455                         __this_cpu_inc(process_counts);
1456                 }
1457                 attach_pid(p, PIDTYPE_PID, pid);
1458                 nr_threads++;
1459         }
1460
1461         total_forks++;
1462         spin_unlock(&current->sighand->siglock);
1463         write_unlock_irq(&tasklist_lock);
1464         proc_fork_connector(p);
1465         cgroup_post_fork(p);
1466         if (clone_flags & CLONE_THREAD)
1467                 threadgroup_change_end(current);
1468         perf_event_fork(p);
1469
1470         trace_task_newtask(p, clone_flags);
1471
1472         return p;
1473
1474 bad_fork_free_pid:
1475         if (pid != &init_struct_pid)
1476                 free_pid(pid);
1477 bad_fork_cleanup_io:
1478         if (p->io_context)
1479                 exit_io_context(p);
1480 bad_fork_cleanup_namespaces:
1481         if (unlikely(clone_flags & CLONE_NEWPID))
1482                 pid_ns_release_proc(p->nsproxy->pid_ns);
1483         exit_task_namespaces(p);
1484 bad_fork_cleanup_mm:
1485         if (p->mm)
1486                 mmput(p->mm);
1487 bad_fork_cleanup_signal:
1488         if (!(clone_flags & CLONE_THREAD))
1489                 free_signal_struct(p->signal);
1490 bad_fork_cleanup_sighand:
1491         __cleanup_sighand(p->sighand);
1492 bad_fork_cleanup_fs:
1493         exit_fs(p); /* blocking */
1494 bad_fork_cleanup_files:
1495         exit_files(p); /* blocking */
1496 bad_fork_cleanup_semundo:
1497         exit_sem(p);
1498 bad_fork_cleanup_audit:
1499         audit_free(p);
1500 bad_fork_cleanup_policy:
1501         perf_event_free_task(p);
1502 #ifdef CONFIG_NUMA
1503         mpol_put(p->mempolicy);
1504 bad_fork_cleanup_cgroup:
1505 #endif
1506         if (clone_flags & CLONE_THREAD)
1507                 threadgroup_change_end(current);
1508         cgroup_exit(p, cgroup_callbacks_done);
1509         delayacct_tsk_free(p);
1510         module_put(task_thread_info(p)->exec_domain->module);
1511 bad_fork_cleanup_count:
1512         atomic_dec(&p->cred->user->processes);
1513         exit_creds(p);
1514 bad_fork_free:
1515         free_task(p);
1516 fork_out:
1517         return ERR_PTR(retval);
1518 }
1519
1520 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1521 {
1522         memset(regs, 0, sizeof(struct pt_regs));
1523         return regs;
1524 }
1525
1526 static inline void init_idle_pids(struct pid_link *links)
1527 {
1528         enum pid_type type;
1529
1530         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1531                 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1532                 links[type].pid = &init_struct_pid;
1533         }
1534 }
1535
1536 struct task_struct * __cpuinit fork_idle(int cpu)
1537 {
1538         struct task_struct *task;
1539         struct pt_regs regs;
1540
1541         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1542                             &init_struct_pid, 0);
1543         if (!IS_ERR(task)) {
1544                 init_idle_pids(task->pids);
1545                 init_idle(task, cpu);
1546         }
1547
1548         return task;
1549 }
1550
1551 /*
1552  *  Ok, this is the main fork-routine.
1553  *
1554  * It copies the process, and if successful kick-starts
1555  * it and waits for it to finish using the VM if required.
1556  */
1557 long do_fork(unsigned long clone_flags,
1558               unsigned long stack_start,
1559               struct pt_regs *regs,
1560               unsigned long stack_size,
1561               int __user *parent_tidptr,
1562               int __user *child_tidptr)
1563 {
1564         struct task_struct *p;
1565         int trace = 0;
1566         long nr;
1567
1568         /*
1569          * Do some preliminary argument and permissions checking before we
1570          * actually start allocating stuff
1571          */
1572         if (clone_flags & CLONE_NEWUSER) {
1573                 if (clone_flags & CLONE_THREAD)
1574                         return -EINVAL;
1575                 /* hopefully this check will go away when userns support is
1576                  * complete
1577                  */
1578                 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1579                                 !capable(CAP_SETGID))
1580                         return -EPERM;
1581         }
1582
1583         /*
1584          * Determine whether and which event to report to ptracer.  When
1585          * called from kernel_thread or CLONE_UNTRACED is explicitly
1586          * requested, no event is reported; otherwise, report if the event
1587          * for the type of forking is enabled.
1588          */
1589         if (!(clone_flags & CLONE_UNTRACED) && likely(user_mode(regs))) {
1590                 if (clone_flags & CLONE_VFORK)
1591                         trace = PTRACE_EVENT_VFORK;
1592                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1593                         trace = PTRACE_EVENT_CLONE;
1594                 else
1595                         trace = PTRACE_EVENT_FORK;
1596
1597                 if (likely(!ptrace_event_enabled(current, trace)))
1598                         trace = 0;
1599         }
1600
1601         p = copy_process(clone_flags, stack_start, regs, stack_size,
1602                          child_tidptr, NULL, trace);
1603         /*
1604          * Do this prior waking up the new thread - the thread pointer
1605          * might get invalid after that point, if the thread exits quickly.
1606          */
1607         if (!IS_ERR(p)) {
1608                 struct completion vfork;
1609
1610                 trace_sched_process_fork(current, p);
1611
1612                 nr = task_pid_vnr(p);
1613
1614                 if (clone_flags & CLONE_PARENT_SETTID)
1615                         put_user(nr, parent_tidptr);
1616
1617                 if (clone_flags & CLONE_VFORK) {
1618                         p->vfork_done = &vfork;
1619                         init_completion(&vfork);
1620                         get_task_struct(p);
1621                 }
1622
1623                 wake_up_new_task(p);
1624
1625                 /* forking complete and child started to run, tell ptracer */
1626                 if (unlikely(trace))
1627                         ptrace_event(trace, nr);
1628
1629                 if (clone_flags & CLONE_VFORK) {
1630                         if (!wait_for_vfork_done(p, &vfork))
1631                                 ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1632                 }
1633         } else {
1634                 nr = PTR_ERR(p);
1635         }
1636         return nr;
1637 }
1638
1639 #ifdef CONFIG_GENERIC_KERNEL_THREAD
1640 /*
1641  * Create a kernel thread.
1642  */
1643 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
1644 {
1645         return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn, NULL,
1646                 (unsigned long)arg, NULL, NULL);
1647 }
1648 #endif
1649
1650 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1651 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1652 #endif
1653
1654 static void sighand_ctor(void *data)
1655 {
1656         struct sighand_struct *sighand = data;
1657
1658         spin_lock_init(&sighand->siglock);
1659         init_waitqueue_head(&sighand->signalfd_wqh);
1660 }
1661
1662 void __init proc_caches_init(void)
1663 {
1664         sighand_cachep = kmem_cache_create("sighand_cache",
1665                         sizeof(struct sighand_struct), 0,
1666                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1667                         SLAB_NOTRACK, sighand_ctor);
1668         signal_cachep = kmem_cache_create("signal_cache",
1669                         sizeof(struct signal_struct), 0,
1670                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1671         files_cachep = kmem_cache_create("files_cache",
1672                         sizeof(struct files_struct), 0,
1673                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1674         fs_cachep = kmem_cache_create("fs_cache",
1675                         sizeof(struct fs_struct), 0,
1676                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1677         /*
1678          * FIXME! The "sizeof(struct mm_struct)" currently includes the
1679          * whole struct cpumask for the OFFSTACK case. We could change
1680          * this to *only* allocate as much of it as required by the
1681          * maximum number of CPU's we can ever have.  The cpumask_allocation
1682          * is at the end of the structure, exactly for that reason.
1683          */
1684         mm_cachep = kmem_cache_create("mm_struct",
1685                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1686                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1687         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1688         mmap_init();
1689         nsproxy_cache_init();
1690 }
1691
1692 /*
1693  * Check constraints on flags passed to the unshare system call.
1694  */
1695 static int check_unshare_flags(unsigned long unshare_flags)
1696 {
1697         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1698                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1699                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1700                 return -EINVAL;
1701         /*
1702          * Not implemented, but pretend it works if there is nothing to
1703          * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1704          * needs to unshare vm.
1705          */
1706         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1707                 /* FIXME: get_task_mm() increments ->mm_users */
1708                 if (atomic_read(&current->mm->mm_users) > 1)
1709                         return -EINVAL;
1710         }
1711
1712         return 0;
1713 }
1714
1715 /*
1716  * Unshare the filesystem structure if it is being shared
1717  */
1718 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1719 {
1720         struct fs_struct *fs = current->fs;
1721
1722         if (!(unshare_flags & CLONE_FS) || !fs)
1723                 return 0;
1724
1725         /* don't need lock here; in the worst case we'll do useless copy */
1726         if (fs->users == 1)
1727                 return 0;
1728
1729         *new_fsp = copy_fs_struct(fs);
1730         if (!*new_fsp)
1731                 return -ENOMEM;
1732
1733         return 0;
1734 }
1735
1736 /*
1737  * Unshare file descriptor table if it is being shared
1738  */
1739 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1740 {
1741         struct files_struct *fd = current->files;
1742         int error = 0;
1743
1744         if ((unshare_flags & CLONE_FILES) &&
1745             (fd && atomic_read(&fd->count) > 1)) {
1746                 *new_fdp = dup_fd(fd, &error);
1747                 if (!*new_fdp)
1748                         return error;
1749         }
1750
1751         return 0;
1752 }
1753
1754 /*
1755  * unshare allows a process to 'unshare' part of the process
1756  * context which was originally shared using clone.  copy_*
1757  * functions used by do_fork() cannot be used here directly
1758  * because they modify an inactive task_struct that is being
1759  * constructed. Here we are modifying the current, active,
1760  * task_struct.
1761  */
1762 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1763 {
1764         struct fs_struct *fs, *new_fs = NULL;
1765         struct files_struct *fd, *new_fd = NULL;
1766         struct nsproxy *new_nsproxy = NULL;
1767         int do_sysvsem = 0;
1768         int err;
1769
1770         err = check_unshare_flags(unshare_flags);
1771         if (err)
1772                 goto bad_unshare_out;
1773
1774         /*
1775          * If unsharing namespace, must also unshare filesystem information.
1776          */
1777         if (unshare_flags & CLONE_NEWNS)
1778                 unshare_flags |= CLONE_FS;
1779         /*
1780          * CLONE_NEWIPC must also detach from the undolist: after switching
1781          * to a new ipc namespace, the semaphore arrays from the old
1782          * namespace are unreachable.
1783          */
1784         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1785                 do_sysvsem = 1;
1786         err = unshare_fs(unshare_flags, &new_fs);
1787         if (err)
1788                 goto bad_unshare_out;
1789         err = unshare_fd(unshare_flags, &new_fd);
1790         if (err)
1791                 goto bad_unshare_cleanup_fs;
1792         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
1793         if (err)
1794                 goto bad_unshare_cleanup_fd;
1795
1796         if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
1797                 if (do_sysvsem) {
1798                         /*
1799                          * CLONE_SYSVSEM is equivalent to sys_exit().
1800                          */
1801                         exit_sem(current);
1802                 }
1803
1804                 if (new_nsproxy) {
1805                         switch_task_namespaces(current, new_nsproxy);
1806                         new_nsproxy = NULL;
1807                 }
1808
1809                 task_lock(current);
1810
1811                 if (new_fs) {
1812                         fs = current->fs;
1813                         spin_lock(&fs->lock);
1814                         current->fs = new_fs;
1815                         if (--fs->users)
1816                                 new_fs = NULL;
1817                         else
1818                                 new_fs = fs;
1819                         spin_unlock(&fs->lock);
1820                 }
1821
1822                 if (new_fd) {
1823                         fd = current->files;
1824                         current->files = new_fd;
1825                         new_fd = fd;
1826                 }
1827
1828                 task_unlock(current);
1829         }
1830
1831         if (new_nsproxy)
1832                 put_nsproxy(new_nsproxy);
1833
1834 bad_unshare_cleanup_fd:
1835         if (new_fd)
1836                 put_files_struct(new_fd);
1837
1838 bad_unshare_cleanup_fs:
1839         if (new_fs)
1840                 free_fs_struct(new_fs);
1841
1842 bad_unshare_out:
1843         return err;
1844 }
1845
1846 /*
1847  *      Helper to unshare the files of the current task.
1848  *      We don't want to expose copy_files internals to
1849  *      the exec layer of the kernel.
1850  */
1851
1852 int unshare_files(struct files_struct **displaced)
1853 {
1854         struct task_struct *task = current;
1855         struct files_struct *copy = NULL;
1856         int error;
1857
1858         error = unshare_fd(CLONE_FILES, &copy);
1859         if (error || !copy) {
1860                 *displaced = NULL;
1861                 return error;
1862         }
1863         *displaced = task->files;
1864         task_lock(task);
1865         task->files = copy;
1866         task_unlock(task);
1867         return 0;
1868 }