4 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 'fork.c' contains the help-routines for the 'fork' system call
9 * (see also entry.S and others).
10 * Fork is rather simple, once you get the hang of it, but the memory
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
32 #include <linux/vmacache.h>
33 #include <linux/nsproxy.h>
34 #include <linux/capability.h>
35 #include <linux/cpu.h>
36 #include <linux/cgroup.h>
37 #include <linux/security.h>
38 #include <linux/hugetlb.h>
39 #include <linux/seccomp.h>
40 #include <linux/swap.h>
41 #include <linux/syscalls.h>
42 #include <linux/jiffies.h>
43 #include <linux/futex.h>
44 #include <linux/compat.h>
45 #include <linux/kthread.h>
46 #include <linux/task_io_accounting_ops.h>
47 #include <linux/rcupdate.h>
48 #include <linux/ptrace.h>
49 #include <linux/mount.h>
50 #include <linux/audit.h>
51 #include <linux/memcontrol.h>
52 #include <linux/ftrace.h>
53 #include <linux/proc_fs.h>
54 #include <linux/profile.h>
55 #include <linux/rmap.h>
56 #include <linux/ksm.h>
57 #include <linux/acct.h>
58 #include <linux/tsacct_kern.h>
59 #include <linux/cn_proc.h>
60 #include <linux/freezer.h>
61 #include <linux/delayacct.h>
62 #include <linux/taskstats_kern.h>
63 #include <linux/random.h>
64 #include <linux/tty.h>
65 #include <linux/blkdev.h>
66 #include <linux/fs_struct.h>
67 #include <linux/magic.h>
68 #include <linux/perf_event.h>
69 #include <linux/posix-timers.h>
70 #include <linux/user-return-notifier.h>
71 #include <linux/oom.h>
72 #include <linux/khugepaged.h>
73 #include <linux/signalfd.h>
74 #include <linux/uprobes.h>
75 #include <linux/aio.h>
76 #include <linux/compiler.h>
77 #include <linux/sysctl.h>
78 #include <linux/kcov.h>
79 #include <linux/kprobes.h>
81 #include <asm/pgtable.h>
82 #include <asm/pgalloc.h>
83 #include <asm/uaccess.h>
84 #include <asm/mmu_context.h>
85 #include <asm/cacheflush.h>
86 #include <asm/tlbflush.h>
88 #include <trace/events/sched.h>
90 #define CREATE_TRACE_POINTS
91 #include <trace/events/task.h>
94 * Minimum number of threads to boot the kernel
96 #define MIN_THREADS 20
99 * Maximum number of threads
101 #define MAX_THREADS FUTEX_TID_MASK
104 * Protected counters by write_lock_irq(&tasklist_lock)
106 unsigned long total_forks; /* Handle normal Linux uptimes. */
107 int nr_threads; /* The idle threads do not count.. */
109 int max_threads; /* tunable limit on nr_threads */
111 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
113 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
115 #ifdef CONFIG_PROVE_RCU
116 int lockdep_tasklist_lock_is_held(void)
118 return lockdep_is_held(&tasklist_lock);
120 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
121 #endif /* #ifdef CONFIG_PROVE_RCU */
123 int nr_processes(void)
128 for_each_possible_cpu(cpu)
129 total += per_cpu(process_counts, cpu);
134 void __weak arch_release_task_struct(struct task_struct *tsk)
138 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
139 static struct kmem_cache *task_struct_cachep;
141 static inline struct task_struct *alloc_task_struct_node(int node)
143 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
146 static inline void free_task_struct(struct task_struct *tsk)
148 kmem_cache_free(task_struct_cachep, tsk);
152 void __weak arch_release_thread_stack(unsigned long *stack)
156 #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
159 * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
160 * kmemcache based allocator.
162 # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
164 #ifdef CONFIG_VMAP_STACK
166 * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
167 * flush. Try to minimize the number of calls by caching stacks.
169 #define NR_CACHED_STACKS 2
170 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
173 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
175 #ifdef CONFIG_VMAP_STACK
180 for (i = 0; i < NR_CACHED_STACKS; i++) {
181 struct vm_struct *s = this_cpu_read(cached_stacks[i]);
185 this_cpu_write(cached_stacks[i], NULL);
187 tsk->stack_vm_area = s;
193 stack = __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
194 VMALLOC_START, VMALLOC_END,
195 THREADINFO_GFP | __GFP_HIGHMEM,
197 0, node, __builtin_return_address(0));
200 * We can't call find_vm_area() in interrupt context, and
201 * free_thread_stack() can be called in interrupt context,
202 * so cache the vm_struct.
205 tsk->stack_vm_area = find_vm_area(stack);
208 struct page *page = alloc_pages_node(node, THREADINFO_GFP,
211 return page ? page_address(page) : NULL;
215 static inline void free_thread_stack(struct task_struct *tsk)
217 #ifdef CONFIG_VMAP_STACK
218 if (task_stack_vm_area(tsk)) {
222 local_irq_save(flags);
223 for (i = 0; i < NR_CACHED_STACKS; i++) {
224 if (this_cpu_read(cached_stacks[i]))
227 this_cpu_write(cached_stacks[i], tsk->stack_vm_area);
228 local_irq_restore(flags);
231 local_irq_restore(flags);
238 __free_pages(virt_to_page(tsk->stack), THREAD_SIZE_ORDER);
241 static struct kmem_cache *thread_stack_cache;
243 static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
246 return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
249 static void free_thread_stack(struct task_struct *tsk)
251 kmem_cache_free(thread_stack_cache, tsk->stack);
254 void thread_stack_cache_init(void)
256 thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
257 THREAD_SIZE, 0, NULL);
258 BUG_ON(thread_stack_cache == NULL);
263 /* SLAB cache for signal_struct structures (tsk->signal) */
264 static struct kmem_cache *signal_cachep;
266 /* SLAB cache for sighand_struct structures (tsk->sighand) */
267 struct kmem_cache *sighand_cachep;
269 /* SLAB cache for files_struct structures (tsk->files) */
270 struct kmem_cache *files_cachep;
272 /* SLAB cache for fs_struct structures (tsk->fs) */
273 struct kmem_cache *fs_cachep;
275 /* SLAB cache for vm_area_struct structures */
276 struct kmem_cache *vm_area_cachep;
278 /* SLAB cache for mm_struct structures (tsk->mm) */
279 static struct kmem_cache *mm_cachep;
281 static void account_kernel_stack(struct task_struct *tsk, int account)
283 void *stack = task_stack_page(tsk);
284 struct vm_struct *vm = task_stack_vm_area(tsk);
286 BUILD_BUG_ON(IS_ENABLED(CONFIG_VMAP_STACK) && PAGE_SIZE % 1024 != 0);
291 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
293 for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
294 mod_zone_page_state(page_zone(vm->pages[i]),
296 PAGE_SIZE / 1024 * account);
299 /* All stack pages belong to the same memcg. */
300 memcg_kmem_update_page_stat(vm->pages[0], MEMCG_KERNEL_STACK_KB,
301 account * (THREAD_SIZE / 1024));
304 * All stack pages are in the same zone and belong to the
307 struct page *first_page = virt_to_page(stack);
309 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
310 THREAD_SIZE / 1024 * account);
312 memcg_kmem_update_page_stat(first_page, MEMCG_KERNEL_STACK_KB,
313 account * (THREAD_SIZE / 1024));
317 static void release_task_stack(struct task_struct *tsk)
319 if (WARN_ON(tsk->state != TASK_DEAD))
320 return; /* Better to leak the stack than to free prematurely */
322 account_kernel_stack(tsk, -1);
323 arch_release_thread_stack(tsk->stack);
324 free_thread_stack(tsk);
326 #ifdef CONFIG_VMAP_STACK
327 tsk->stack_vm_area = NULL;
331 #ifdef CONFIG_THREAD_INFO_IN_TASK
332 void put_task_stack(struct task_struct *tsk)
334 if (atomic_dec_and_test(&tsk->stack_refcount))
335 release_task_stack(tsk);
339 void free_task(struct task_struct *tsk)
341 #ifndef CONFIG_THREAD_INFO_IN_TASK
343 * The task is finally done with both the stack and thread_info,
346 release_task_stack(tsk);
349 * If the task had a separate stack allocation, it should be gone
352 WARN_ON_ONCE(atomic_read(&tsk->stack_refcount) != 0);
354 rt_mutex_debug_task_free(tsk);
355 ftrace_graph_exit_task(tsk);
356 put_seccomp_filter(tsk);
357 arch_release_task_struct(tsk);
358 free_task_struct(tsk);
360 EXPORT_SYMBOL(free_task);
362 static inline void free_signal_struct(struct signal_struct *sig)
364 taskstats_tgid_free(sig);
365 sched_autogroup_exit(sig);
367 * __mmdrop is not safe to call from softirq context on x86 due to
368 * pgd_dtor so postpone it to the async context
371 mmdrop_async(sig->oom_mm);
372 kmem_cache_free(signal_cachep, sig);
375 static inline void put_signal_struct(struct signal_struct *sig)
377 if (atomic_dec_and_test(&sig->sigcnt))
378 free_signal_struct(sig);
380 #ifdef CONFIG_PREEMPT_RT_BASE
383 void __put_task_struct(struct task_struct *tsk)
385 WARN_ON(!tsk->exit_state);
386 WARN_ON(atomic_read(&tsk->usage));
387 WARN_ON(tsk == current);
390 * Remove function-return probe instances associated with this
391 * task and put them back on the free list.
393 kprobe_flush_task(tsk);
395 /* Task is done with its stack. */
400 security_task_free(tsk);
402 delayacct_tsk_free(tsk);
403 put_signal_struct(tsk->signal);
405 if (!profile_handoff_task(tsk))
408 #ifndef CONFIG_PREEMPT_RT_BASE
409 EXPORT_SYMBOL_GPL(__put_task_struct);
411 void __put_task_struct_cb(struct rcu_head *rhp)
413 struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
415 __put_task_struct(tsk);
418 EXPORT_SYMBOL_GPL(__put_task_struct_cb);
421 void __init __weak arch_task_cache_init(void) { }
426 static void set_max_threads(unsigned int max_threads_suggested)
431 * The number of threads shall be limited such that the thread
432 * structures may only consume a small part of the available memory.
434 if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
435 threads = MAX_THREADS;
437 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
438 (u64) THREAD_SIZE * 8UL);
440 if (threads > max_threads_suggested)
441 threads = max_threads_suggested;
443 max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
446 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
447 /* Initialized by the architecture: */
448 int arch_task_struct_size __read_mostly;
451 void __init fork_init(void)
454 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
455 #ifndef ARCH_MIN_TASKALIGN
456 #define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
458 /* create a slab on which task_structs can be allocated */
459 task_struct_cachep = kmem_cache_create("task_struct",
460 arch_task_struct_size, ARCH_MIN_TASKALIGN,
461 SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
464 /* do the arch specific task caches init */
465 arch_task_cache_init();
467 set_max_threads(MAX_THREADS);
469 init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
470 init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
471 init_task.signal->rlim[RLIMIT_SIGPENDING] =
472 init_task.signal->rlim[RLIMIT_NPROC];
474 for (i = 0; i < UCOUNT_COUNTS; i++) {
475 init_user_ns.ucount_max[i] = max_threads/2;
479 int __weak arch_dup_task_struct(struct task_struct *dst,
480 struct task_struct *src)
486 void set_task_stack_end_magic(struct task_struct *tsk)
488 unsigned long *stackend;
490 stackend = end_of_stack(tsk);
491 *stackend = STACK_END_MAGIC; /* for overflow detection */
494 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
496 struct task_struct *tsk;
497 unsigned long *stack;
498 struct vm_struct *stack_vm_area;
501 if (node == NUMA_NO_NODE)
502 node = tsk_fork_get_node(orig);
503 tsk = alloc_task_struct_node(node);
507 stack = alloc_thread_stack_node(tsk, node);
511 stack_vm_area = task_stack_vm_area(tsk);
513 err = arch_dup_task_struct(tsk, orig);
516 * arch_dup_task_struct() clobbers the stack-related fields. Make
517 * sure they're properly initialized before using any stack-related
521 #ifdef CONFIG_VMAP_STACK
522 tsk->stack_vm_area = stack_vm_area;
524 #ifdef CONFIG_THREAD_INFO_IN_TASK
525 atomic_set(&tsk->stack_refcount, 1);
531 #ifdef CONFIG_SECCOMP
533 * We must handle setting up seccomp filters once we're under
534 * the sighand lock in case orig has changed between now and
535 * then. Until then, filter must be NULL to avoid messing up
536 * the usage counts on the error path calling free_task.
538 tsk->seccomp.filter = NULL;
541 setup_thread_stack(tsk, orig);
542 clear_user_return_notifier(tsk);
543 clear_tsk_need_resched(tsk);
544 set_task_stack_end_magic(tsk);
546 #ifdef CONFIG_CC_STACKPROTECTOR
547 tsk->stack_canary = get_random_int();
551 * One for us, one for whoever does the "release_task()" (usually
554 atomic_set(&tsk->usage, 2);
555 #ifdef CONFIG_BLK_DEV_IO_TRACE
558 tsk->splice_pipe = NULL;
559 tsk->task_frag.page = NULL;
560 tsk->wake_q.next = NULL;
562 account_kernel_stack(tsk, 1);
569 free_thread_stack(tsk);
571 free_task_struct(tsk);
576 static __latent_entropy int dup_mmap(struct mm_struct *mm,
577 struct mm_struct *oldmm)
579 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
580 struct rb_node **rb_link, *rb_parent;
582 unsigned long charge;
584 uprobe_start_dup_mmap();
585 if (down_write_killable(&oldmm->mmap_sem)) {
587 goto fail_uprobe_end;
589 flush_cache_dup_mm(oldmm);
590 uprobe_dup_mmap(oldmm, mm);
592 * Not linked in yet - no deadlock potential:
594 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
596 /* No ordering required: file already has been exposed. */
597 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
599 mm->total_vm = oldmm->total_vm;
600 mm->data_vm = oldmm->data_vm;
601 mm->exec_vm = oldmm->exec_vm;
602 mm->stack_vm = oldmm->stack_vm;
604 rb_link = &mm->mm_rb.rb_node;
607 retval = ksm_fork(mm, oldmm);
610 retval = khugepaged_fork(mm, oldmm);
615 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
618 if (mpnt->vm_flags & VM_DONTCOPY) {
619 vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
623 if (mpnt->vm_flags & VM_ACCOUNT) {
624 unsigned long len = vma_pages(mpnt);
626 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
630 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
634 INIT_LIST_HEAD(&tmp->anon_vma_chain);
635 retval = vma_dup_policy(mpnt, tmp);
637 goto fail_nomem_policy;
639 if (anon_vma_fork(tmp, mpnt))
640 goto fail_nomem_anon_vma_fork;
642 ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
643 tmp->vm_next = tmp->vm_prev = NULL;
644 tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
647 struct inode *inode = file_inode(file);
648 struct address_space *mapping = file->f_mapping;
651 if (tmp->vm_flags & VM_DENYWRITE)
652 atomic_dec(&inode->i_writecount);
653 i_mmap_lock_write(mapping);
654 if (tmp->vm_flags & VM_SHARED)
655 atomic_inc(&mapping->i_mmap_writable);
656 flush_dcache_mmap_lock(mapping);
657 /* insert tmp into the share list, just after mpnt */
658 vma_interval_tree_insert_after(tmp, mpnt,
660 flush_dcache_mmap_unlock(mapping);
661 i_mmap_unlock_write(mapping);
665 * Clear hugetlb-related page reserves for children. This only
666 * affects MAP_PRIVATE mappings. Faults generated by the child
667 * are not guaranteed to succeed, even if read-only
669 if (is_vm_hugetlb_page(tmp))
670 reset_vma_resv_huge_pages(tmp);
673 * Link in the new vma and copy the page table entries.
676 pprev = &tmp->vm_next;
680 __vma_link_rb(mm, tmp, rb_link, rb_parent);
681 rb_link = &tmp->vm_rb.rb_right;
682 rb_parent = &tmp->vm_rb;
685 retval = copy_page_range(mm, oldmm, mpnt);
687 if (tmp->vm_ops && tmp->vm_ops->open)
688 tmp->vm_ops->open(tmp);
693 /* a new mm has just been created */
694 arch_dup_mmap(oldmm, mm);
697 up_write(&mm->mmap_sem);
699 up_write(&oldmm->mmap_sem);
701 uprobe_end_dup_mmap();
703 fail_nomem_anon_vma_fork:
704 mpol_put(vma_policy(tmp));
706 kmem_cache_free(vm_area_cachep, tmp);
709 vm_unacct_memory(charge);
713 static inline int mm_alloc_pgd(struct mm_struct *mm)
715 mm->pgd = pgd_alloc(mm);
716 if (unlikely(!mm->pgd))
721 static inline void mm_free_pgd(struct mm_struct *mm)
723 pgd_free(mm, mm->pgd);
726 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
728 down_write(&oldmm->mmap_sem);
729 RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
730 up_write(&oldmm->mmap_sem);
733 #define mm_alloc_pgd(mm) (0)
734 #define mm_free_pgd(mm)
735 #endif /* CONFIG_MMU */
737 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
739 #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
740 #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
742 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
744 static int __init coredump_filter_setup(char *s)
746 default_dump_filter =
747 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
748 MMF_DUMP_FILTER_MASK;
752 __setup("coredump_filter=", coredump_filter_setup);
754 #include <linux/init_task.h>
756 static void mm_init_aio(struct mm_struct *mm)
759 spin_lock_init(&mm->ioctx_lock);
760 mm->ioctx_table = NULL;
764 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
771 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
775 mm->vmacache_seqnum = 0;
776 atomic_set(&mm->mm_users, 1);
777 atomic_set(&mm->mm_count, 1);
778 init_rwsem(&mm->mmap_sem);
779 INIT_LIST_HEAD(&mm->mmlist);
780 mm->core_state = NULL;
781 atomic_long_set(&mm->nr_ptes, 0);
786 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
787 spin_lock_init(&mm->page_table_lock);
790 mm_init_owner(mm, p);
791 mmu_notifier_mm_init(mm);
792 clear_tlb_flush_pending(mm);
793 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
794 mm->pmd_huge_pte = NULL;
798 mm->flags = current->mm->flags & MMF_INIT_MASK;
799 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
801 mm->flags = default_dump_filter;
805 if (mm_alloc_pgd(mm))
808 if (init_new_context(p, mm))
820 static void check_mm(struct mm_struct *mm)
824 for (i = 0; i < NR_MM_COUNTERS; i++) {
825 long x = atomic_long_read(&mm->rss_stat.count[i]);
828 printk(KERN_ALERT "BUG: Bad rss-counter state "
829 "mm:%p idx:%d val:%ld\n", mm, i, x);
832 if (atomic_long_read(&mm->nr_ptes))
833 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
834 atomic_long_read(&mm->nr_ptes));
836 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
839 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
840 VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
845 * Allocate and initialize an mm_struct.
847 struct mm_struct *mm_alloc(void)
849 struct mm_struct *mm;
855 memset(mm, 0, sizeof(*mm));
856 return mm_init(mm, current);
860 * Called when the last reference to the mm
861 * is dropped: either by a lazy thread or by
862 * mmput. Free the page directory and the mm.
864 void __mmdrop(struct mm_struct *mm)
866 BUG_ON(mm == &init_mm);
869 mmu_notifier_mm_destroy(mm);
873 EXPORT_SYMBOL_GPL(__mmdrop);
875 #ifdef CONFIG_PREEMPT_RT_BASE
877 * RCU callback for delayed mm drop. Not strictly rcu, but we don't
878 * want another facility to make this work.
880 void __mmdrop_delayed(struct rcu_head *rhp)
882 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
888 static inline void __mmput(struct mm_struct *mm)
890 VM_BUG_ON(atomic_read(&mm->mm_users));
892 uprobe_clear_state(mm);
895 khugepaged_exit(mm); /* must run before exit_mmap */
897 mm_put_huge_zero_page(mm);
898 set_mm_exe_file(mm, NULL);
899 if (!list_empty(&mm->mmlist)) {
900 spin_lock(&mmlist_lock);
901 list_del(&mm->mmlist);
902 spin_unlock(&mmlist_lock);
905 module_put(mm->binfmt->module);
906 set_bit(MMF_OOM_SKIP, &mm->flags);
911 * Decrement the use count and release all resources for an mm.
913 void mmput(struct mm_struct *mm)
917 if (atomic_dec_and_test(&mm->mm_users))
920 EXPORT_SYMBOL_GPL(mmput);
923 static void mmput_async_fn(struct work_struct *work)
925 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
929 void mmput_async(struct mm_struct *mm)
931 if (atomic_dec_and_test(&mm->mm_users)) {
932 INIT_WORK(&mm->async_put_work, mmput_async_fn);
933 schedule_work(&mm->async_put_work);
939 * set_mm_exe_file - change a reference to the mm's executable file
941 * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
943 * Main users are mmput() and sys_execve(). Callers prevent concurrent
944 * invocations: in mmput() nobody alive left, in execve task is single
945 * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
946 * mm->exe_file, but does so without using set_mm_exe_file() in order
947 * to do avoid the need for any locks.
949 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
951 struct file *old_exe_file;
954 * It is safe to dereference the exe_file without RCU as
955 * this function is only called if nobody else can access
956 * this mm -- see comment above for justification.
958 old_exe_file = rcu_dereference_raw(mm->exe_file);
961 get_file(new_exe_file);
962 rcu_assign_pointer(mm->exe_file, new_exe_file);
968 * get_mm_exe_file - acquire a reference to the mm's executable file
970 * Returns %NULL if mm has no associated executable file.
971 * User must release file via fput().
973 struct file *get_mm_exe_file(struct mm_struct *mm)
975 struct file *exe_file;
978 exe_file = rcu_dereference(mm->exe_file);
979 if (exe_file && !get_file_rcu(exe_file))
984 EXPORT_SYMBOL(get_mm_exe_file);
987 * get_task_exe_file - acquire a reference to the task's executable file
989 * Returns %NULL if task's mm (if any) has no associated executable file or
990 * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
991 * User must release file via fput().
993 struct file *get_task_exe_file(struct task_struct *task)
995 struct file *exe_file = NULL;
996 struct mm_struct *mm;
1001 if (!(task->flags & PF_KTHREAD))
1002 exe_file = get_mm_exe_file(mm);
1007 EXPORT_SYMBOL(get_task_exe_file);
1010 * get_task_mm - acquire a reference to the task's mm
1012 * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
1013 * this kernel workthread has transiently adopted a user mm with use_mm,
1014 * to do its AIO) is not set and if so returns a reference to it, after
1015 * bumping up the use count. User must release the mm via mmput()
1016 * after use. Typically used by /proc and ptrace.
1018 struct mm_struct *get_task_mm(struct task_struct *task)
1020 struct mm_struct *mm;
1025 if (task->flags & PF_KTHREAD)
1028 atomic_inc(&mm->mm_users);
1033 EXPORT_SYMBOL_GPL(get_task_mm);
1035 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1037 struct mm_struct *mm;
1040 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
1042 return ERR_PTR(err);
1044 mm = get_task_mm(task);
1045 if (mm && mm != current->mm &&
1046 !ptrace_may_access(task, mode)) {
1048 mm = ERR_PTR(-EACCES);
1050 mutex_unlock(&task->signal->cred_guard_mutex);
1055 static void complete_vfork_done(struct task_struct *tsk)
1057 struct completion *vfork;
1060 vfork = tsk->vfork_done;
1061 if (likely(vfork)) {
1062 tsk->vfork_done = NULL;
1068 static int wait_for_vfork_done(struct task_struct *child,
1069 struct completion *vfork)
1073 freezer_do_not_count();
1074 killed = wait_for_completion_killable(vfork);
1079 child->vfork_done = NULL;
1083 put_task_struct(child);
1087 /* Please note the differences between mmput and mm_release.
1088 * mmput is called whenever we stop holding onto a mm_struct,
1089 * error success whatever.
1091 * mm_release is called after a mm_struct has been removed
1092 * from the current process.
1094 * This difference is important for error handling, when we
1095 * only half set up a mm_struct for a new process and need to restore
1096 * the old one. Because we mmput the new mm_struct before
1097 * restoring the old one. . .
1098 * Eric Biederman 10 January 1998
1100 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1102 /* Get rid of any futexes when releasing the mm */
1104 if (unlikely(tsk->robust_list)) {
1105 exit_robust_list(tsk);
1106 tsk->robust_list = NULL;
1108 #ifdef CONFIG_COMPAT
1109 if (unlikely(tsk->compat_robust_list)) {
1110 compat_exit_robust_list(tsk);
1111 tsk->compat_robust_list = NULL;
1114 if (unlikely(!list_empty(&tsk->pi_state_list)))
1115 exit_pi_state_list(tsk);
1118 uprobe_free_utask(tsk);
1120 /* Get rid of any cached register state */
1121 deactivate_mm(tsk, mm);
1124 * Signal userspace if we're not exiting with a core dump
1125 * because we want to leave the value intact for debugging
1128 if (tsk->clear_child_tid) {
1129 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
1130 atomic_read(&mm->mm_users) > 1) {
1132 * We don't check the error code - if userspace has
1133 * not set up a proper pointer then tough luck.
1135 put_user(0, tsk->clear_child_tid);
1136 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1139 tsk->clear_child_tid = NULL;
1143 * All done, finally we can wake up parent and return this mm to him.
1144 * Also kthread_stop() uses this completion for synchronization.
1146 if (tsk->vfork_done)
1147 complete_vfork_done(tsk);
1151 * Allocate a new mm structure and copy contents from the
1152 * mm structure of the passed in task structure.
1154 static struct mm_struct *dup_mm(struct task_struct *tsk)
1156 struct mm_struct *mm, *oldmm = current->mm;
1163 memcpy(mm, oldmm, sizeof(*mm));
1165 if (!mm_init(mm, tsk))
1168 err = dup_mmap(mm, oldmm);
1172 mm->hiwater_rss = get_mm_rss(mm);
1173 mm->hiwater_vm = mm->total_vm;
1175 if (mm->binfmt && !try_module_get(mm->binfmt->module))
1181 /* don't put binfmt in mmput, we haven't got module yet */
1189 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1191 struct mm_struct *mm, *oldmm;
1194 tsk->min_flt = tsk->maj_flt = 0;
1195 tsk->nvcsw = tsk->nivcsw = 0;
1196 #ifdef CONFIG_DETECT_HUNG_TASK
1197 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1201 tsk->active_mm = NULL;
1204 * Are we cloning a kernel thread?
1206 * We need to steal a active VM for that..
1208 oldmm = current->mm;
1212 /* initialize the new vmacache entries */
1213 vmacache_flush(tsk);
1215 if (clone_flags & CLONE_VM) {
1216 atomic_inc(&oldmm->mm_users);
1228 tsk->active_mm = mm;
1235 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1237 struct fs_struct *fs = current->fs;
1238 if (clone_flags & CLONE_FS) {
1239 /* tsk->fs is already what we want */
1240 spin_lock(&fs->lock);
1242 spin_unlock(&fs->lock);
1246 spin_unlock(&fs->lock);
1249 tsk->fs = copy_fs_struct(fs);
1255 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1257 struct files_struct *oldf, *newf;
1261 * A background process may not have any files ...
1263 oldf = current->files;
1267 if (clone_flags & CLONE_FILES) {
1268 atomic_inc(&oldf->count);
1272 newf = dup_fd(oldf, &error);
1282 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1285 struct io_context *ioc = current->io_context;
1286 struct io_context *new_ioc;
1291 * Share io context with parent, if CLONE_IO is set
1293 if (clone_flags & CLONE_IO) {
1295 tsk->io_context = ioc;
1296 } else if (ioprio_valid(ioc->ioprio)) {
1297 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1298 if (unlikely(!new_ioc))
1301 new_ioc->ioprio = ioc->ioprio;
1302 put_io_context(new_ioc);
1308 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1310 struct sighand_struct *sig;
1312 if (clone_flags & CLONE_SIGHAND) {
1313 atomic_inc(¤t->sighand->count);
1316 sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1317 rcu_assign_pointer(tsk->sighand, sig);
1321 atomic_set(&sig->count, 1);
1322 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1326 void __cleanup_sighand(struct sighand_struct *sighand)
1328 if (atomic_dec_and_test(&sighand->count)) {
1329 signalfd_cleanup(sighand);
1331 * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
1332 * without an RCU grace period, see __lock_task_sighand().
1334 kmem_cache_free(sighand_cachep, sighand);
1339 * Initialize POSIX timer handling for a thread group.
1341 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1343 unsigned long cpu_limit;
1345 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1346 if (cpu_limit != RLIM_INFINITY) {
1347 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1348 sig->cputimer.running = true;
1351 /* The timer lists. */
1352 INIT_LIST_HEAD(&sig->cpu_timers[0]);
1353 INIT_LIST_HEAD(&sig->cpu_timers[1]);
1354 INIT_LIST_HEAD(&sig->cpu_timers[2]);
1357 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1359 struct signal_struct *sig;
1361 if (clone_flags & CLONE_THREAD)
1364 sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1369 sig->nr_threads = 1;
1370 atomic_set(&sig->live, 1);
1371 atomic_set(&sig->sigcnt, 1);
1373 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1374 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1375 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1377 init_waitqueue_head(&sig->wait_chldexit);
1378 sig->curr_target = tsk;
1379 init_sigpending(&sig->shared_pending);
1380 INIT_LIST_HEAD(&sig->posix_timers);
1381 seqlock_init(&sig->stats_lock);
1382 prev_cputime_init(&sig->prev_cputime);
1384 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1385 sig->real_timer.function = it_real_fn;
1387 task_lock(current->group_leader);
1388 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1389 task_unlock(current->group_leader);
1391 posix_cpu_timers_init_group(sig);
1393 tty_audit_fork(sig);
1394 sched_autogroup_fork(sig);
1396 sig->oom_score_adj = current->signal->oom_score_adj;
1397 sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1399 sig->has_child_subreaper = current->signal->has_child_subreaper ||
1400 current->signal->is_child_subreaper;
1402 mutex_init(&sig->cred_guard_mutex);
1407 static void copy_seccomp(struct task_struct *p)
1409 #ifdef CONFIG_SECCOMP
1411 * Must be called with sighand->lock held, which is common to
1412 * all threads in the group. Holding cred_guard_mutex is not
1413 * needed because this new task is not yet running and cannot
1416 assert_spin_locked(¤t->sighand->siglock);
1418 /* Ref-count the new filter user, and assign it. */
1419 get_seccomp_filter(current);
1420 p->seccomp = current->seccomp;
1423 * Explicitly enable no_new_privs here in case it got set
1424 * between the task_struct being duplicated and holding the
1425 * sighand lock. The seccomp state and nnp must be in sync.
1427 if (task_no_new_privs(current))
1428 task_set_no_new_privs(p);
1431 * If the parent gained a seccomp mode after copying thread
1432 * flags and between before we held the sighand lock, we have
1433 * to manually enable the seccomp thread flag here.
1435 if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1436 set_tsk_thread_flag(p, TIF_SECCOMP);
1440 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1442 current->clear_child_tid = tidptr;
1444 return task_pid_vnr(current);
1447 static void rt_mutex_init_task(struct task_struct *p)
1449 raw_spin_lock_init(&p->pi_lock);
1450 #ifdef CONFIG_RT_MUTEXES
1451 p->pi_waiters = RB_ROOT;
1452 p->pi_waiters_leftmost = NULL;
1453 p->pi_blocked_on = NULL;
1458 * Initialize POSIX timer handling for a single task.
1460 static void posix_cpu_timers_init(struct task_struct *tsk)
1462 #ifdef CONFIG_PREEMPT_RT_BASE
1463 tsk->posix_timer_list = NULL;
1465 tsk->cputime_expires.prof_exp = 0;
1466 tsk->cputime_expires.virt_exp = 0;
1467 tsk->cputime_expires.sched_exp = 0;
1468 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1469 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1470 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1474 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1476 task->pids[type].pid = pid;
1480 * This creates a new process as a copy of the old one,
1481 * but does not actually start it yet.
1483 * It copies the registers, and all the appropriate
1484 * parts of the process environment (as per the clone
1485 * flags). The actual kick-off is left to the caller.
1487 static __latent_entropy struct task_struct *copy_process(
1488 unsigned long clone_flags,
1489 unsigned long stack_start,
1490 unsigned long stack_size,
1491 int __user *child_tidptr,
1498 struct task_struct *p;
1500 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1501 return ERR_PTR(-EINVAL);
1503 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1504 return ERR_PTR(-EINVAL);
1507 * Thread groups must share signals as well, and detached threads
1508 * can only be started up within the thread group.
1510 if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1511 return ERR_PTR(-EINVAL);
1514 * Shared signal handlers imply shared VM. By way of the above,
1515 * thread groups also imply shared VM. Blocking this case allows
1516 * for various simplifications in other code.
1518 if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1519 return ERR_PTR(-EINVAL);
1522 * Siblings of global init remain as zombies on exit since they are
1523 * not reaped by their parent (swapper). To solve this and to avoid
1524 * multi-rooted process trees, prevent global and container-inits
1525 * from creating siblings.
1527 if ((clone_flags & CLONE_PARENT) &&
1528 current->signal->flags & SIGNAL_UNKILLABLE)
1529 return ERR_PTR(-EINVAL);
1532 * If the new process will be in a different pid or user namespace
1533 * do not allow it to share a thread group with the forking task.
1535 if (clone_flags & CLONE_THREAD) {
1536 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1537 (task_active_pid_ns(current) !=
1538 current->nsproxy->pid_ns_for_children))
1539 return ERR_PTR(-EINVAL);
1542 retval = security_task_create(clone_flags);
1547 p = dup_task_struct(current, node);
1551 ftrace_graph_init_task(p);
1553 rt_mutex_init_task(p);
1555 #ifdef CONFIG_PROVE_LOCKING
1556 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1557 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1560 if (atomic_read(&p->real_cred->user->processes) >=
1561 task_rlimit(p, RLIMIT_NPROC)) {
1562 if (p->real_cred->user != INIT_USER &&
1563 !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1566 current->flags &= ~PF_NPROC_EXCEEDED;
1568 retval = copy_creds(p, clone_flags);
1573 * If multiple threads are within copy_process(), then this check
1574 * triggers too late. This doesn't hurt, the check is only there
1575 * to stop root fork bombs.
1578 if (nr_threads >= max_threads)
1579 goto bad_fork_cleanup_count;
1581 delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
1582 p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1583 p->flags |= PF_FORKNOEXEC;
1584 INIT_LIST_HEAD(&p->children);
1585 INIT_LIST_HEAD(&p->sibling);
1586 rcu_copy_process(p);
1587 p->vfork_done = NULL;
1588 spin_lock_init(&p->alloc_lock);
1590 init_sigpending(&p->pending);
1591 p->sigqueue_cache = NULL;
1593 p->utime = p->stime = p->gtime = 0;
1594 p->utimescaled = p->stimescaled = 0;
1595 prev_cputime_init(&p->prev_cputime);
1597 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1598 seqcount_init(&p->vtime_seqcount);
1600 p->vtime_snap_whence = VTIME_INACTIVE;
1603 #if defined(SPLIT_RSS_COUNTING)
1604 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1607 p->default_timer_slack_ns = current->timer_slack_ns;
1609 task_io_accounting_init(&p->ioac);
1610 acct_clear_integrals(p);
1612 posix_cpu_timers_init(p);
1614 p->start_time = ktime_get_ns();
1615 p->real_start_time = ktime_get_boot_ns();
1616 p->io_context = NULL;
1617 p->audit_context = NULL;
1620 p->mempolicy = mpol_dup(p->mempolicy);
1621 if (IS_ERR(p->mempolicy)) {
1622 retval = PTR_ERR(p->mempolicy);
1623 p->mempolicy = NULL;
1624 goto bad_fork_cleanup_threadgroup_lock;
1627 #ifdef CONFIG_CPUSETS
1628 p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1629 p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1630 seqcount_init(&p->mems_allowed_seq);
1632 #ifdef CONFIG_TRACE_IRQFLAGS
1634 p->hardirqs_enabled = 0;
1635 p->hardirq_enable_ip = 0;
1636 p->hardirq_enable_event = 0;
1637 p->hardirq_disable_ip = _THIS_IP_;
1638 p->hardirq_disable_event = 0;
1639 p->softirqs_enabled = 1;
1640 p->softirq_enable_ip = _THIS_IP_;
1641 p->softirq_enable_event = 0;
1642 p->softirq_disable_ip = 0;
1643 p->softirq_disable_event = 0;
1644 p->hardirq_context = 0;
1645 p->softirq_context = 0;
1648 p->pagefault_disabled = 0;
1650 #ifdef CONFIG_LOCKDEP
1651 p->lockdep_depth = 0; /* no locks held yet */
1652 p->curr_chain_key = 0;
1653 p->lockdep_recursion = 0;
1656 #ifdef CONFIG_DEBUG_MUTEXES
1657 p->blocked_on = NULL; /* not blocked yet */
1659 #ifdef CONFIG_BCACHE
1660 p->sequential_io = 0;
1661 p->sequential_io_avg = 0;
1664 /* Perform scheduler related setup. Assign this task to a CPU. */
1665 retval = sched_fork(clone_flags, p);
1667 goto bad_fork_cleanup_policy;
1669 retval = perf_event_init_task(p);
1671 goto bad_fork_cleanup_policy;
1672 retval = audit_alloc(p);
1674 goto bad_fork_cleanup_perf;
1675 /* copy all the process information */
1677 retval = copy_semundo(clone_flags, p);
1679 goto bad_fork_cleanup_audit;
1680 retval = copy_files(clone_flags, p);
1682 goto bad_fork_cleanup_semundo;
1683 retval = copy_fs(clone_flags, p);
1685 goto bad_fork_cleanup_files;
1686 retval = copy_sighand(clone_flags, p);
1688 goto bad_fork_cleanup_fs;
1689 retval = copy_signal(clone_flags, p);
1691 goto bad_fork_cleanup_sighand;
1692 retval = copy_mm(clone_flags, p);
1694 goto bad_fork_cleanup_signal;
1695 retval = copy_namespaces(clone_flags, p);
1697 goto bad_fork_cleanup_mm;
1698 retval = copy_io(clone_flags, p);
1700 goto bad_fork_cleanup_namespaces;
1701 retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
1703 goto bad_fork_cleanup_io;
1705 if (pid != &init_struct_pid) {
1706 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1708 retval = PTR_ERR(pid);
1709 goto bad_fork_cleanup_thread;
1713 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1715 * Clear TID on mm_release()?
1717 p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1722 p->robust_list = NULL;
1723 #ifdef CONFIG_COMPAT
1724 p->compat_robust_list = NULL;
1726 INIT_LIST_HEAD(&p->pi_state_list);
1727 p->pi_state_cache = NULL;
1730 * sigaltstack should be cleared when sharing the same VM
1732 if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1736 * Syscall tracing and stepping should be turned off in the
1737 * child regardless of CLONE_PTRACE.
1739 user_disable_single_step(p);
1740 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1741 #ifdef TIF_SYSCALL_EMU
1742 clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1744 clear_all_latency_tracing(p);
1746 /* ok, now we should be set up.. */
1747 p->pid = pid_nr(pid);
1748 if (clone_flags & CLONE_THREAD) {
1749 p->exit_signal = -1;
1750 p->group_leader = current->group_leader;
1751 p->tgid = current->tgid;
1753 if (clone_flags & CLONE_PARENT)
1754 p->exit_signal = current->group_leader->exit_signal;
1756 p->exit_signal = (clone_flags & CSIGNAL);
1757 p->group_leader = p;
1762 p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1763 p->dirty_paused_when = 0;
1765 p->pdeath_signal = 0;
1766 INIT_LIST_HEAD(&p->thread_group);
1767 p->task_works = NULL;
1769 threadgroup_change_begin(current);
1771 * Ensure that the cgroup subsystem policies allow the new process to be
1772 * forked. It should be noted the the new process's css_set can be changed
1773 * between here and cgroup_post_fork() if an organisation operation is in
1776 retval = cgroup_can_fork(p);
1778 goto bad_fork_free_pid;
1781 * Make it visible to the rest of the system, but dont wake it up yet.
1782 * Need tasklist lock for parent etc handling!
1784 write_lock_irq(&tasklist_lock);
1786 /* CLONE_PARENT re-uses the old parent */
1787 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1788 p->real_parent = current->real_parent;
1789 p->parent_exec_id = current->parent_exec_id;
1791 p->real_parent = current;
1792 p->parent_exec_id = current->self_exec_id;
1795 spin_lock(¤t->sighand->siglock);
1798 * Copy seccomp details explicitly here, in case they were changed
1799 * before holding sighand lock.
1804 * Process group and session signals need to be delivered to just the
1805 * parent before the fork or both the parent and the child after the
1806 * fork. Restart if a signal comes in before we add the new process to
1807 * it's process group.
1808 * A fatal signal pending means that current will exit, so the new
1809 * thread can't slip out of an OOM kill (or normal SIGKILL).
1811 recalc_sigpending();
1812 if (signal_pending(current)) {
1813 spin_unlock(¤t->sighand->siglock);
1814 write_unlock_irq(&tasklist_lock);
1815 retval = -ERESTARTNOINTR;
1816 goto bad_fork_cancel_cgroup;
1819 if (likely(p->pid)) {
1820 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1822 init_task_pid(p, PIDTYPE_PID, pid);
1823 if (thread_group_leader(p)) {
1824 init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
1825 init_task_pid(p, PIDTYPE_SID, task_session(current));
1827 if (is_child_reaper(pid)) {
1828 ns_of_pid(pid)->child_reaper = p;
1829 p->signal->flags |= SIGNAL_UNKILLABLE;
1832 p->signal->leader_pid = pid;
1833 p->signal->tty = tty_kref_get(current->signal->tty);
1834 list_add_tail(&p->sibling, &p->real_parent->children);
1835 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1836 attach_pid(p, PIDTYPE_PGID);
1837 attach_pid(p, PIDTYPE_SID);
1838 __this_cpu_inc(process_counts);
1840 current->signal->nr_threads++;
1841 atomic_inc(¤t->signal->live);
1842 atomic_inc(¤t->signal->sigcnt);
1843 list_add_tail_rcu(&p->thread_group,
1844 &p->group_leader->thread_group);
1845 list_add_tail_rcu(&p->thread_node,
1846 &p->signal->thread_head);
1848 attach_pid(p, PIDTYPE_PID);
1853 spin_unlock(¤t->sighand->siglock);
1854 syscall_tracepoint_update(p);
1855 write_unlock_irq(&tasklist_lock);
1857 proc_fork_connector(p);
1858 cgroup_post_fork(p);
1859 threadgroup_change_end(current);
1862 trace_task_newtask(p, clone_flags);
1863 uprobe_copy_process(p, clone_flags);
1867 bad_fork_cancel_cgroup:
1868 cgroup_cancel_fork(p);
1870 threadgroup_change_end(current);
1871 if (pid != &init_struct_pid)
1873 bad_fork_cleanup_thread:
1875 bad_fork_cleanup_io:
1878 bad_fork_cleanup_namespaces:
1879 exit_task_namespaces(p);
1880 bad_fork_cleanup_mm:
1883 bad_fork_cleanup_signal:
1884 if (!(clone_flags & CLONE_THREAD))
1885 free_signal_struct(p->signal);
1886 bad_fork_cleanup_sighand:
1887 __cleanup_sighand(p->sighand);
1888 bad_fork_cleanup_fs:
1889 exit_fs(p); /* blocking */
1890 bad_fork_cleanup_files:
1891 exit_files(p); /* blocking */
1892 bad_fork_cleanup_semundo:
1894 bad_fork_cleanup_audit:
1896 bad_fork_cleanup_perf:
1897 perf_event_free_task(p);
1898 bad_fork_cleanup_policy:
1900 mpol_put(p->mempolicy);
1901 bad_fork_cleanup_threadgroup_lock:
1903 delayacct_tsk_free(p);
1904 bad_fork_cleanup_count:
1905 atomic_dec(&p->cred->user->processes);
1908 p->state = TASK_DEAD;
1912 return ERR_PTR(retval);
1915 static inline void init_idle_pids(struct pid_link *links)
1919 for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1920 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1921 links[type].pid = &init_struct_pid;
1925 struct task_struct *fork_idle(int cpu)
1927 struct task_struct *task;
1928 task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
1930 if (!IS_ERR(task)) {
1931 init_idle_pids(task->pids);
1932 init_idle(task, cpu);
1939 * Ok, this is the main fork-routine.
1941 * It copies the process, and if successful kick-starts
1942 * it and waits for it to finish using the VM if required.
1944 long _do_fork(unsigned long clone_flags,
1945 unsigned long stack_start,
1946 unsigned long stack_size,
1947 int __user *parent_tidptr,
1948 int __user *child_tidptr,
1951 struct task_struct *p;
1956 * Determine whether and which event to report to ptracer. When
1957 * called from kernel_thread or CLONE_UNTRACED is explicitly
1958 * requested, no event is reported; otherwise, report if the event
1959 * for the type of forking is enabled.
1961 if (!(clone_flags & CLONE_UNTRACED)) {
1962 if (clone_flags & CLONE_VFORK)
1963 trace = PTRACE_EVENT_VFORK;
1964 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1965 trace = PTRACE_EVENT_CLONE;
1967 trace = PTRACE_EVENT_FORK;
1969 if (likely(!ptrace_event_enabled(current, trace)))
1973 p = copy_process(clone_flags, stack_start, stack_size,
1974 child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
1975 add_latent_entropy();
1977 * Do this prior waking up the new thread - the thread pointer
1978 * might get invalid after that point, if the thread exits quickly.
1981 struct completion vfork;
1984 trace_sched_process_fork(current, p);
1986 pid = get_task_pid(p, PIDTYPE_PID);
1989 if (clone_flags & CLONE_PARENT_SETTID)
1990 put_user(nr, parent_tidptr);
1992 if (clone_flags & CLONE_VFORK) {
1993 p->vfork_done = &vfork;
1994 init_completion(&vfork);
1998 wake_up_new_task(p);
2000 /* forking complete and child started to run, tell ptracer */
2001 if (unlikely(trace))
2002 ptrace_event_pid(trace, pid);
2004 if (clone_flags & CLONE_VFORK) {
2005 if (!wait_for_vfork_done(p, &vfork))
2006 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2016 #ifndef CONFIG_HAVE_COPY_THREAD_TLS
2017 /* For compatibility with architectures that call do_fork directly rather than
2018 * using the syscall entry points below. */
2019 long do_fork(unsigned long clone_flags,
2020 unsigned long stack_start,
2021 unsigned long stack_size,
2022 int __user *parent_tidptr,
2023 int __user *child_tidptr)
2025 return _do_fork(clone_flags, stack_start, stack_size,
2026 parent_tidptr, child_tidptr, 0);
2031 * Create a kernel thread.
2033 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
2035 return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
2036 (unsigned long)arg, NULL, NULL, 0);
2039 #ifdef __ARCH_WANT_SYS_FORK
2040 SYSCALL_DEFINE0(fork)
2043 return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
2045 /* can not support in nommu mode */
2051 #ifdef __ARCH_WANT_SYS_VFORK
2052 SYSCALL_DEFINE0(vfork)
2054 return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
2059 #ifdef __ARCH_WANT_SYS_CLONE
2060 #ifdef CONFIG_CLONE_BACKWARDS
2061 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2062 int __user *, parent_tidptr,
2064 int __user *, child_tidptr)
2065 #elif defined(CONFIG_CLONE_BACKWARDS2)
2066 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
2067 int __user *, parent_tidptr,
2068 int __user *, child_tidptr,
2070 #elif defined(CONFIG_CLONE_BACKWARDS3)
2071 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
2073 int __user *, parent_tidptr,
2074 int __user *, child_tidptr,
2077 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
2078 int __user *, parent_tidptr,
2079 int __user *, child_tidptr,
2083 return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
2087 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
2088 #define ARCH_MIN_MMSTRUCT_ALIGN 0
2091 static void sighand_ctor(void *data)
2093 struct sighand_struct *sighand = data;
2095 spin_lock_init(&sighand->siglock);
2096 init_waitqueue_head(&sighand->signalfd_wqh);
2099 void __init proc_caches_init(void)
2101 sighand_cachep = kmem_cache_create("sighand_cache",
2102 sizeof(struct sighand_struct), 0,
2103 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
2104 SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
2105 signal_cachep = kmem_cache_create("signal_cache",
2106 sizeof(struct signal_struct), 0,
2107 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2109 files_cachep = kmem_cache_create("files_cache",
2110 sizeof(struct files_struct), 0,
2111 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2113 fs_cachep = kmem_cache_create("fs_cache",
2114 sizeof(struct fs_struct), 0,
2115 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2118 * FIXME! The "sizeof(struct mm_struct)" currently includes the
2119 * whole struct cpumask for the OFFSTACK case. We could change
2120 * this to *only* allocate as much of it as required by the
2121 * maximum number of CPU's we can ever have. The cpumask_allocation
2122 * is at the end of the structure, exactly for that reason.
2124 mm_cachep = kmem_cache_create("mm_struct",
2125 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
2126 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
2128 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
2130 nsproxy_cache_init();
2134 * Check constraints on flags passed to the unshare system call.
2136 static int check_unshare_flags(unsigned long unshare_flags)
2138 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
2139 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
2140 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
2141 CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP))
2144 * Not implemented, but pretend it works if there is nothing
2145 * to unshare. Note that unsharing the address space or the
2146 * signal handlers also need to unshare the signal queues (aka
2149 if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
2150 if (!thread_group_empty(current))
2153 if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
2154 if (atomic_read(¤t->sighand->count) > 1)
2157 if (unshare_flags & CLONE_VM) {
2158 if (!current_is_single_threaded())
2166 * Unshare the filesystem structure if it is being shared
2168 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
2170 struct fs_struct *fs = current->fs;
2172 if (!(unshare_flags & CLONE_FS) || !fs)
2175 /* don't need lock here; in the worst case we'll do useless copy */
2179 *new_fsp = copy_fs_struct(fs);
2187 * Unshare file descriptor table if it is being shared
2189 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
2191 struct files_struct *fd = current->files;
2194 if ((unshare_flags & CLONE_FILES) &&
2195 (fd && atomic_read(&fd->count) > 1)) {
2196 *new_fdp = dup_fd(fd, &error);
2205 * unshare allows a process to 'unshare' part of the process
2206 * context which was originally shared using clone. copy_*
2207 * functions used by do_fork() cannot be used here directly
2208 * because they modify an inactive task_struct that is being
2209 * constructed. Here we are modifying the current, active,
2212 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
2214 struct fs_struct *fs, *new_fs = NULL;
2215 struct files_struct *fd, *new_fd = NULL;
2216 struct cred *new_cred = NULL;
2217 struct nsproxy *new_nsproxy = NULL;
2222 * If unsharing a user namespace must also unshare the thread group
2223 * and unshare the filesystem root and working directories.
2225 if (unshare_flags & CLONE_NEWUSER)
2226 unshare_flags |= CLONE_THREAD | CLONE_FS;
2228 * If unsharing vm, must also unshare signal handlers.
2230 if (unshare_flags & CLONE_VM)
2231 unshare_flags |= CLONE_SIGHAND;
2233 * If unsharing a signal handlers, must also unshare the signal queues.
2235 if (unshare_flags & CLONE_SIGHAND)
2236 unshare_flags |= CLONE_THREAD;
2238 * If unsharing namespace, must also unshare filesystem information.
2240 if (unshare_flags & CLONE_NEWNS)
2241 unshare_flags |= CLONE_FS;
2243 err = check_unshare_flags(unshare_flags);
2245 goto bad_unshare_out;
2247 * CLONE_NEWIPC must also detach from the undolist: after switching
2248 * to a new ipc namespace, the semaphore arrays from the old
2249 * namespace are unreachable.
2251 if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2253 err = unshare_fs(unshare_flags, &new_fs);
2255 goto bad_unshare_out;
2256 err = unshare_fd(unshare_flags, &new_fd);
2258 goto bad_unshare_cleanup_fs;
2259 err = unshare_userns(unshare_flags, &new_cred);
2261 goto bad_unshare_cleanup_fd;
2262 err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2265 goto bad_unshare_cleanup_cred;
2267 if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2270 * CLONE_SYSVSEM is equivalent to sys_exit().
2274 if (unshare_flags & CLONE_NEWIPC) {
2275 /* Orphan segments in old ns (see sem above). */
2277 shm_init_task(current);
2281 switch_task_namespaces(current, new_nsproxy);
2287 spin_lock(&fs->lock);
2288 current->fs = new_fs;
2293 spin_unlock(&fs->lock);
2297 fd = current->files;
2298 current->files = new_fd;
2302 task_unlock(current);
2305 /* Install the new user namespace */
2306 commit_creds(new_cred);
2311 bad_unshare_cleanup_cred:
2314 bad_unshare_cleanup_fd:
2316 put_files_struct(new_fd);
2318 bad_unshare_cleanup_fs:
2320 free_fs_struct(new_fs);
2327 * Helper to unshare the files of the current task.
2328 * We don't want to expose copy_files internals to
2329 * the exec layer of the kernel.
2332 int unshare_files(struct files_struct **displaced)
2334 struct task_struct *task = current;
2335 struct files_struct *copy = NULL;
2338 error = unshare_fd(CLONE_FILES, ©);
2339 if (error || !copy) {
2343 *displaced = task->files;
2350 int sysctl_max_threads(struct ctl_table *table, int write,
2351 void __user *buffer, size_t *lenp, loff_t *ppos)
2355 int threads = max_threads;
2356 int min = MIN_THREADS;
2357 int max = MAX_THREADS;
2364 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2368 set_max_threads(threads);