OSDN Git Service

kprobes: Don't call BUG_ON() if there is a kprobe in use on free list
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/fs.h>
31 #include <linux/mm.h>
32 #include <linux/vmacache.h>
33 #include <linux/nsproxy.h>
34 #include <linux/capability.h>
35 #include <linux/cpu.h>
36 #include <linux/cgroup.h>
37 #include <linux/security.h>
38 #include <linux/hugetlb.h>
39 #include <linux/seccomp.h>
40 #include <linux/swap.h>
41 #include <linux/syscalls.h>
42 #include <linux/jiffies.h>
43 #include <linux/futex.h>
44 #include <linux/compat.h>
45 #include <linux/kthread.h>
46 #include <linux/task_io_accounting_ops.h>
47 #include <linux/rcupdate.h>
48 #include <linux/ptrace.h>
49 #include <linux/mount.h>
50 #include <linux/audit.h>
51 #include <linux/memcontrol.h>
52 #include <linux/ftrace.h>
53 #include <linux/proc_fs.h>
54 #include <linux/profile.h>
55 #include <linux/rmap.h>
56 #include <linux/ksm.h>
57 #include <linux/acct.h>
58 #include <linux/tsacct_kern.h>
59 #include <linux/cn_proc.h>
60 #include <linux/freezer.h>
61 #include <linux/kaiser.h>
62 #include <linux/delayacct.h>
63 #include <linux/taskstats_kern.h>
64 #include <linux/random.h>
65 #include <linux/tty.h>
66 #include <linux/blkdev.h>
67 #include <linux/fs_struct.h>
68 #include <linux/magic.h>
69 #include <linux/perf_event.h>
70 #include <linux/posix-timers.h>
71 #include <linux/user-return-notifier.h>
72 #include <linux/oom.h>
73 #include <linux/khugepaged.h>
74 #include <linux/signalfd.h>
75 #include <linux/uprobes.h>
76 #include <linux/aio.h>
77 #include <linux/compiler.h>
78 #include <linux/sysctl.h>
79
80 #include <asm/pgtable.h>
81 #include <asm/pgalloc.h>
82 #include <asm/uaccess.h>
83 #include <asm/mmu_context.h>
84 #include <asm/cacheflush.h>
85 #include <asm/tlbflush.h>
86
87 #include <trace/events/sched.h>
88
89 #define CREATE_TRACE_POINTS
90 #include <trace/events/task.h>
91
92 /*
93  * Minimum number of threads to boot the kernel
94  */
95 #define MIN_THREADS 20
96
97 /*
98  * Maximum number of threads
99  */
100 #define MAX_THREADS FUTEX_TID_MASK
101
102 /*
103  * Protected counters by write_lock_irq(&tasklist_lock)
104  */
105 unsigned long total_forks;      /* Handle normal Linux uptimes. */
106 int nr_threads;                 /* The idle threads do not count.. */
107
108 int max_threads;                /* tunable limit on nr_threads */
109
110 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
111
112 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
113
114 #ifdef CONFIG_PROVE_RCU
115 int lockdep_tasklist_lock_is_held(void)
116 {
117         return lockdep_is_held(&tasklist_lock);
118 }
119 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
120 #endif /* #ifdef CONFIG_PROVE_RCU */
121
122 int nr_processes(void)
123 {
124         int cpu;
125         int total = 0;
126
127         for_each_possible_cpu(cpu)
128                 total += per_cpu(process_counts, cpu);
129
130         return total;
131 }
132
133 void __weak arch_release_task_struct(struct task_struct *tsk)
134 {
135 }
136
137 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
138 static struct kmem_cache *task_struct_cachep;
139
140 static inline struct task_struct *alloc_task_struct_node(int node)
141 {
142         return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
143 }
144
145 static inline void free_task_struct(struct task_struct *tsk)
146 {
147         kmem_cache_free(task_struct_cachep, tsk);
148 }
149 #endif
150
151 void __weak arch_release_thread_info(struct thread_info *ti)
152 {
153 }
154
155 #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
156
157 /*
158  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
159  * kmemcache based allocator.
160  */
161 # if THREAD_SIZE >= PAGE_SIZE
162 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
163                                                   int node)
164 {
165         struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
166                                                   THREAD_SIZE_ORDER);
167
168         return page ? page_address(page) : NULL;
169 }
170
171 static inline void free_thread_info(struct thread_info *ti)
172 {
173         kaiser_unmap_thread_stack(ti);
174         free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
175 }
176 # else
177 static struct kmem_cache *thread_info_cache;
178
179 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
180                                                   int node)
181 {
182         return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
183 }
184
185 static void free_thread_info(struct thread_info *ti)
186 {
187         kmem_cache_free(thread_info_cache, ti);
188 }
189
190 void thread_info_cache_init(void)
191 {
192         thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
193                                               THREAD_SIZE, 0, NULL);
194         BUG_ON(thread_info_cache == NULL);
195 }
196 # endif
197 #endif
198
199 /* SLAB cache for signal_struct structures (tsk->signal) */
200 static struct kmem_cache *signal_cachep;
201
202 /* SLAB cache for sighand_struct structures (tsk->sighand) */
203 struct kmem_cache *sighand_cachep;
204
205 /* SLAB cache for files_struct structures (tsk->files) */
206 struct kmem_cache *files_cachep;
207
208 /* SLAB cache for fs_struct structures (tsk->fs) */
209 struct kmem_cache *fs_cachep;
210
211 /* SLAB cache for vm_area_struct structures */
212 struct kmem_cache *vm_area_cachep;
213
214 /* SLAB cache for mm_struct structures (tsk->mm) */
215 static struct kmem_cache *mm_cachep;
216
217 static void account_kernel_stack(struct thread_info *ti, int account)
218 {
219         struct zone *zone = page_zone(virt_to_page(ti));
220
221         mod_zone_page_state(zone, NR_KERNEL_STACK, account);
222 }
223
224 void free_task(struct task_struct *tsk)
225 {
226         account_kernel_stack(tsk->stack, -1);
227         arch_release_thread_info(tsk->stack);
228         free_thread_info(tsk->stack);
229         rt_mutex_debug_task_free(tsk);
230         ftrace_graph_exit_task(tsk);
231         put_seccomp_filter(tsk);
232         arch_release_task_struct(tsk);
233         free_task_struct(tsk);
234 }
235 EXPORT_SYMBOL(free_task);
236
237 static inline void free_signal_struct(struct signal_struct *sig)
238 {
239         taskstats_tgid_free(sig);
240         sched_autogroup_exit(sig);
241         kmem_cache_free(signal_cachep, sig);
242 }
243
244 static inline void put_signal_struct(struct signal_struct *sig)
245 {
246         if (atomic_dec_and_test(&sig->sigcnt))
247                 free_signal_struct(sig);
248 }
249
250 void __put_task_struct(struct task_struct *tsk)
251 {
252         WARN_ON(!tsk->exit_state);
253         WARN_ON(atomic_read(&tsk->usage));
254         WARN_ON(tsk == current);
255
256         cgroup_free(tsk);
257         task_numa_free(tsk, true);
258         security_task_free(tsk);
259         exit_creds(tsk);
260         delayacct_tsk_free(tsk);
261         put_signal_struct(tsk->signal);
262
263         if (!profile_handoff_task(tsk))
264                 free_task(tsk);
265 }
266 EXPORT_SYMBOL_GPL(__put_task_struct);
267
268 void __init __weak arch_task_cache_init(void) { }
269
270 /*
271  * set_max_threads
272  */
273 static void set_max_threads(unsigned int max_threads_suggested)
274 {
275         u64 threads;
276
277         /*
278          * The number of threads shall be limited such that the thread
279          * structures may only consume a small part of the available memory.
280          */
281         if (fls64(totalram_pages) + fls64(PAGE_SIZE) > 64)
282                 threads = MAX_THREADS;
283         else
284                 threads = div64_u64((u64) totalram_pages * (u64) PAGE_SIZE,
285                                     (u64) THREAD_SIZE * 8UL);
286
287         if (threads > max_threads_suggested)
288                 threads = max_threads_suggested;
289
290         max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
291 }
292
293 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
294 /* Initialized by the architecture: */
295 int arch_task_struct_size __read_mostly;
296 #endif
297
298 void __init fork_init(void)
299 {
300 #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
301 #ifndef ARCH_MIN_TASKALIGN
302 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
303 #endif
304         /* create a slab on which task_structs can be allocated */
305         task_struct_cachep =
306                 kmem_cache_create("task_struct", arch_task_struct_size,
307                         ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
308 #endif
309
310         /* do the arch specific task caches init */
311         arch_task_cache_init();
312
313         set_max_threads(MAX_THREADS);
314
315         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
316         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
317         init_task.signal->rlim[RLIMIT_SIGPENDING] =
318                 init_task.signal->rlim[RLIMIT_NPROC];
319 }
320
321 int __weak arch_dup_task_struct(struct task_struct *dst,
322                                                struct task_struct *src)
323 {
324         *dst = *src;
325         return 0;
326 }
327
328 void set_task_stack_end_magic(struct task_struct *tsk)
329 {
330         unsigned long *stackend;
331
332         stackend = end_of_stack(tsk);
333         *stackend = STACK_END_MAGIC;    /* for overflow detection */
334 }
335
336 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
337 {
338         struct task_struct *tsk;
339         struct thread_info *ti;
340         int err;
341
342         if (node == NUMA_NO_NODE)
343                 node = tsk_fork_get_node(orig);
344         tsk = alloc_task_struct_node(node);
345         if (!tsk)
346                 return NULL;
347
348         ti = alloc_thread_info_node(tsk, node);
349         if (!ti)
350                 goto free_tsk;
351
352         err = arch_dup_task_struct(tsk, orig);
353         if (err)
354                 goto free_ti;
355
356         tsk->stack = ti;
357
358         err = kaiser_map_thread_stack(tsk->stack);
359         if (err)
360                 goto free_ti;
361 #ifdef CONFIG_SECCOMP
362         /*
363          * We must handle setting up seccomp filters once we're under
364          * the sighand lock in case orig has changed between now and
365          * then. Until then, filter must be NULL to avoid messing up
366          * the usage counts on the error path calling free_task.
367          */
368         tsk->seccomp.filter = NULL;
369 #endif
370
371         setup_thread_stack(tsk, orig);
372         clear_user_return_notifier(tsk);
373         clear_tsk_need_resched(tsk);
374         set_task_stack_end_magic(tsk);
375
376 #ifdef CONFIG_CC_STACKPROTECTOR
377         tsk->stack_canary = get_random_long();
378 #endif
379
380         /*
381          * One for us, one for whoever does the "release_task()" (usually
382          * parent)
383          */
384         atomic_set(&tsk->usage, 2);
385 #ifdef CONFIG_BLK_DEV_IO_TRACE
386         tsk->btrace_seq = 0;
387 #endif
388         tsk->splice_pipe = NULL;
389         tsk->task_frag.page = NULL;
390         tsk->wake_q.next = NULL;
391
392         account_kernel_stack(ti, 1);
393
394         return tsk;
395
396 free_ti:
397         free_thread_info(ti);
398 free_tsk:
399         free_task_struct(tsk);
400         return NULL;
401 }
402
403 #ifdef CONFIG_MMU
404 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
405 {
406         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
407         struct rb_node **rb_link, *rb_parent;
408         int retval;
409         unsigned long charge;
410
411         uprobe_start_dup_mmap();
412         down_write(&oldmm->mmap_sem);
413         flush_cache_dup_mm(oldmm);
414         uprobe_dup_mmap(oldmm, mm);
415         /*
416          * Not linked in yet - no deadlock potential:
417          */
418         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
419
420         /* No ordering required: file already has been exposed. */
421         RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
422
423         mm->total_vm = oldmm->total_vm;
424         mm->shared_vm = oldmm->shared_vm;
425         mm->exec_vm = oldmm->exec_vm;
426         mm->stack_vm = oldmm->stack_vm;
427
428         rb_link = &mm->mm_rb.rb_node;
429         rb_parent = NULL;
430         pprev = &mm->mmap;
431         retval = ksm_fork(mm, oldmm);
432         if (retval)
433                 goto out;
434         retval = khugepaged_fork(mm, oldmm);
435         if (retval)
436                 goto out;
437
438         prev = NULL;
439         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
440                 struct file *file;
441
442                 if (mpnt->vm_flags & VM_DONTCOPY) {
443                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
444                                                         -vma_pages(mpnt));
445                         continue;
446                 }
447                 charge = 0;
448                 if (mpnt->vm_flags & VM_ACCOUNT) {
449                         unsigned long len = vma_pages(mpnt);
450
451                         if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
452                                 goto fail_nomem;
453                         charge = len;
454                 }
455                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
456                 if (!tmp)
457                         goto fail_nomem;
458                 *tmp = *mpnt;
459                 INIT_LIST_HEAD(&tmp->anon_vma_chain);
460                 retval = vma_dup_policy(mpnt, tmp);
461                 if (retval)
462                         goto fail_nomem_policy;
463                 tmp->vm_mm = mm;
464                 if (anon_vma_fork(tmp, mpnt))
465                         goto fail_nomem_anon_vma_fork;
466                 tmp->vm_flags &=
467                         ~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
468                 tmp->vm_next = tmp->vm_prev = NULL;
469                 tmp->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
470                 file = tmp->vm_file;
471                 if (file) {
472                         struct inode *inode = file_inode(file);
473                         struct address_space *mapping = file->f_mapping;
474
475                         get_file(file);
476                         if (tmp->vm_flags & VM_DENYWRITE)
477                                 atomic_dec(&inode->i_writecount);
478                         i_mmap_lock_write(mapping);
479                         if (tmp->vm_flags & VM_SHARED)
480                                 atomic_inc(&mapping->i_mmap_writable);
481                         flush_dcache_mmap_lock(mapping);
482                         /* insert tmp into the share list, just after mpnt */
483                         vma_interval_tree_insert_after(tmp, mpnt,
484                                         &mapping->i_mmap);
485                         flush_dcache_mmap_unlock(mapping);
486                         i_mmap_unlock_write(mapping);
487                 }
488
489                 /*
490                  * Clear hugetlb-related page reserves for children. This only
491                  * affects MAP_PRIVATE mappings. Faults generated by the child
492                  * are not guaranteed to succeed, even if read-only
493                  */
494                 if (is_vm_hugetlb_page(tmp))
495                         reset_vma_resv_huge_pages(tmp);
496
497                 /*
498                  * Link in the new vma and copy the page table entries.
499                  */
500                 *pprev = tmp;
501                 pprev = &tmp->vm_next;
502                 tmp->vm_prev = prev;
503                 prev = tmp;
504
505                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
506                 rb_link = &tmp->vm_rb.rb_right;
507                 rb_parent = &tmp->vm_rb;
508
509                 mm->map_count++;
510                 retval = copy_page_range(mm, oldmm, mpnt);
511
512                 if (tmp->vm_ops && tmp->vm_ops->open)
513                         tmp->vm_ops->open(tmp);
514
515                 if (retval)
516                         goto out;
517         }
518         /* a new mm has just been created */
519         arch_dup_mmap(oldmm, mm);
520         retval = 0;
521 out:
522         up_write(&mm->mmap_sem);
523         flush_tlb_mm(oldmm);
524         up_write(&oldmm->mmap_sem);
525         uprobe_end_dup_mmap();
526         return retval;
527 fail_nomem_anon_vma_fork:
528         mpol_put(vma_policy(tmp));
529 fail_nomem_policy:
530         kmem_cache_free(vm_area_cachep, tmp);
531 fail_nomem:
532         retval = -ENOMEM;
533         vm_unacct_memory(charge);
534         goto out;
535 }
536
537 static inline int mm_alloc_pgd(struct mm_struct *mm)
538 {
539         mm->pgd = pgd_alloc(mm);
540         if (unlikely(!mm->pgd))
541                 return -ENOMEM;
542         return 0;
543 }
544
545 static inline void mm_free_pgd(struct mm_struct *mm)
546 {
547         pgd_free(mm, mm->pgd);
548 }
549 #else
550 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
551 {
552         down_write(&oldmm->mmap_sem);
553         RCU_INIT_POINTER(mm->exe_file, get_mm_exe_file(oldmm));
554         up_write(&oldmm->mmap_sem);
555         return 0;
556 }
557 #define mm_alloc_pgd(mm)        (0)
558 #define mm_free_pgd(mm)
559 #endif /* CONFIG_MMU */
560
561 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
562
563 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
564 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
565
566 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
567
568 static int __init coredump_filter_setup(char *s)
569 {
570         default_dump_filter =
571                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
572                 MMF_DUMP_FILTER_MASK;
573         return 1;
574 }
575
576 __setup("coredump_filter=", coredump_filter_setup);
577
578 #include <linux/init_task.h>
579
580 static void mm_init_aio(struct mm_struct *mm)
581 {
582 #ifdef CONFIG_AIO
583         spin_lock_init(&mm->ioctx_lock);
584         mm->ioctx_table = NULL;
585 #endif
586 }
587
588 static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
589 {
590 #ifdef CONFIG_MEMCG
591         mm->owner = p;
592 #endif
593 }
594
595 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
596         struct user_namespace *user_ns)
597 {
598         mm->mmap = NULL;
599         mm->mm_rb = RB_ROOT;
600         mm->vmacache_seqnum = 0;
601         atomic_set(&mm->mm_users, 1);
602         atomic_set(&mm->mm_count, 1);
603         init_rwsem(&mm->mmap_sem);
604         INIT_LIST_HEAD(&mm->mmlist);
605         mm->core_state = NULL;
606         atomic_long_set(&mm->nr_ptes, 0);
607         mm_nr_pmds_init(mm);
608         mm->map_count = 0;
609         mm->locked_vm = 0;
610         mm->pinned_vm = 0;
611         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
612         spin_lock_init(&mm->page_table_lock);
613         mm_init_cpumask(mm);
614         mm_init_aio(mm);
615         mm_init_owner(mm, p);
616         mmu_notifier_mm_init(mm);
617         clear_tlb_flush_pending(mm);
618 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
619         mm->pmd_huge_pte = NULL;
620 #endif
621
622         if (current->mm) {
623                 mm->flags = current->mm->flags & MMF_INIT_MASK;
624                 mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
625         } else {
626                 mm->flags = default_dump_filter;
627                 mm->def_flags = 0;
628         }
629
630         if (mm_alloc_pgd(mm))
631                 goto fail_nopgd;
632
633         if (init_new_context(p, mm))
634                 goto fail_nocontext;
635
636         mm->user_ns = get_user_ns(user_ns);
637         return mm;
638
639 fail_nocontext:
640         mm_free_pgd(mm);
641 fail_nopgd:
642         free_mm(mm);
643         return NULL;
644 }
645
646 static void check_mm(struct mm_struct *mm)
647 {
648         int i;
649
650         for (i = 0; i < NR_MM_COUNTERS; i++) {
651                 long x = atomic_long_read(&mm->rss_stat.count[i]);
652
653                 if (unlikely(x))
654                         printk(KERN_ALERT "BUG: Bad rss-counter state "
655                                           "mm:%p idx:%d val:%ld\n", mm, i, x);
656         }
657
658         if (atomic_long_read(&mm->nr_ptes))
659                 pr_alert("BUG: non-zero nr_ptes on freeing mm: %ld\n",
660                                 atomic_long_read(&mm->nr_ptes));
661         if (mm_nr_pmds(mm))
662                 pr_alert("BUG: non-zero nr_pmds on freeing mm: %ld\n",
663                                 mm_nr_pmds(mm));
664
665 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
666         VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
667 #endif
668 }
669
670 /*
671  * Allocate and initialize an mm_struct.
672  */
673 struct mm_struct *mm_alloc(void)
674 {
675         struct mm_struct *mm;
676
677         mm = allocate_mm();
678         if (!mm)
679                 return NULL;
680
681         memset(mm, 0, sizeof(*mm));
682         return mm_init(mm, current, current_user_ns());
683 }
684
685 /*
686  * Called when the last reference to the mm
687  * is dropped: either by a lazy thread or by
688  * mmput. Free the page directory and the mm.
689  */
690 void __mmdrop(struct mm_struct *mm)
691 {
692         BUG_ON(mm == &init_mm);
693         mm_free_pgd(mm);
694         destroy_context(mm);
695         mmu_notifier_mm_destroy(mm);
696         check_mm(mm);
697         put_user_ns(mm->user_ns);
698         free_mm(mm);
699 }
700 EXPORT_SYMBOL_GPL(__mmdrop);
701
702 /*
703  * Decrement the use count and release all resources for an mm.
704  */
705 void mmput(struct mm_struct *mm)
706 {
707         might_sleep();
708
709         if (atomic_dec_and_test(&mm->mm_users)) {
710                 uprobe_clear_state(mm);
711                 exit_aio(mm);
712                 ksm_exit(mm);
713                 khugepaged_exit(mm); /* must run before exit_mmap */
714                 exit_mmap(mm);
715                 set_mm_exe_file(mm, NULL);
716                 if (!list_empty(&mm->mmlist)) {
717                         spin_lock(&mmlist_lock);
718                         list_del(&mm->mmlist);
719                         spin_unlock(&mmlist_lock);
720                 }
721                 if (mm->binfmt)
722                         module_put(mm->binfmt->module);
723                 mmdrop(mm);
724         }
725 }
726 EXPORT_SYMBOL_GPL(mmput);
727
728 /**
729  * set_mm_exe_file - change a reference to the mm's executable file
730  *
731  * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
732  *
733  * Main users are mmput() and sys_execve(). Callers prevent concurrent
734  * invocations: in mmput() nobody alive left, in execve task is single
735  * threaded. sys_prctl(PR_SET_MM_MAP/EXE_FILE) also needs to set the
736  * mm->exe_file, but does so without using set_mm_exe_file() in order
737  * to do avoid the need for any locks.
738  */
739 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
740 {
741         struct file *old_exe_file;
742
743         /*
744          * It is safe to dereference the exe_file without RCU as
745          * this function is only called if nobody else can access
746          * this mm -- see comment above for justification.
747          */
748         old_exe_file = rcu_dereference_raw(mm->exe_file);
749
750         if (new_exe_file)
751                 get_file(new_exe_file);
752         rcu_assign_pointer(mm->exe_file, new_exe_file);
753         if (old_exe_file)
754                 fput(old_exe_file);
755 }
756
757 /**
758  * get_mm_exe_file - acquire a reference to the mm's executable file
759  *
760  * Returns %NULL if mm has no associated executable file.
761  * User must release file via fput().
762  */
763 struct file *get_mm_exe_file(struct mm_struct *mm)
764 {
765         struct file *exe_file;
766
767         rcu_read_lock();
768         exe_file = rcu_dereference(mm->exe_file);
769         if (exe_file && !get_file_rcu(exe_file))
770                 exe_file = NULL;
771         rcu_read_unlock();
772         return exe_file;
773 }
774 EXPORT_SYMBOL(get_mm_exe_file);
775
776 /**
777  * get_task_exe_file - acquire a reference to the task's executable file
778  *
779  * Returns %NULL if task's mm (if any) has no associated executable file or
780  * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
781  * User must release file via fput().
782  */
783 struct file *get_task_exe_file(struct task_struct *task)
784 {
785         struct file *exe_file = NULL;
786         struct mm_struct *mm;
787
788         task_lock(task);
789         mm = task->mm;
790         if (mm) {
791                 if (!(task->flags & PF_KTHREAD))
792                         exe_file = get_mm_exe_file(mm);
793         }
794         task_unlock(task);
795         return exe_file;
796 }
797 EXPORT_SYMBOL(get_task_exe_file);
798
799 /**
800  * get_task_mm - acquire a reference to the task's mm
801  *
802  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
803  * this kernel workthread has transiently adopted a user mm with use_mm,
804  * to do its AIO) is not set and if so returns a reference to it, after
805  * bumping up the use count.  User must release the mm via mmput()
806  * after use.  Typically used by /proc and ptrace.
807  */
808 struct mm_struct *get_task_mm(struct task_struct *task)
809 {
810         struct mm_struct *mm;
811
812         task_lock(task);
813         mm = task->mm;
814         if (mm) {
815                 if (task->flags & PF_KTHREAD)
816                         mm = NULL;
817                 else
818                         atomic_inc(&mm->mm_users);
819         }
820         task_unlock(task);
821         return mm;
822 }
823 EXPORT_SYMBOL_GPL(get_task_mm);
824
825 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
826 {
827         struct mm_struct *mm;
828         int err;
829
830         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
831         if (err)
832                 return ERR_PTR(err);
833
834         mm = get_task_mm(task);
835         if (mm && mm != current->mm &&
836                         !ptrace_may_access(task, mode)) {
837                 mmput(mm);
838                 mm = ERR_PTR(-EACCES);
839         }
840         mutex_unlock(&task->signal->cred_guard_mutex);
841
842         return mm;
843 }
844
845 static void complete_vfork_done(struct task_struct *tsk)
846 {
847         struct completion *vfork;
848
849         task_lock(tsk);
850         vfork = tsk->vfork_done;
851         if (likely(vfork)) {
852                 tsk->vfork_done = NULL;
853                 complete(vfork);
854         }
855         task_unlock(tsk);
856 }
857
858 static int wait_for_vfork_done(struct task_struct *child,
859                                 struct completion *vfork)
860 {
861         int killed;
862
863         freezer_do_not_count();
864         killed = wait_for_completion_killable(vfork);
865         freezer_count();
866
867         if (killed) {
868                 task_lock(child);
869                 child->vfork_done = NULL;
870                 task_unlock(child);
871         }
872
873         put_task_struct(child);
874         return killed;
875 }
876
877 /* Please note the differences between mmput and mm_release.
878  * mmput is called whenever we stop holding onto a mm_struct,
879  * error success whatever.
880  *
881  * mm_release is called after a mm_struct has been removed
882  * from the current process.
883  *
884  * This difference is important for error handling, when we
885  * only half set up a mm_struct for a new process and need to restore
886  * the old one.  Because we mmput the new mm_struct before
887  * restoring the old one. . .
888  * Eric Biederman 10 January 1998
889  */
890 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
891 {
892         /* Get rid of any futexes when releasing the mm */
893 #ifdef CONFIG_FUTEX
894         if (unlikely(tsk->robust_list)) {
895                 exit_robust_list(tsk);
896                 tsk->robust_list = NULL;
897         }
898 #ifdef CONFIG_COMPAT
899         if (unlikely(tsk->compat_robust_list)) {
900                 compat_exit_robust_list(tsk);
901                 tsk->compat_robust_list = NULL;
902         }
903 #endif
904         if (unlikely(!list_empty(&tsk->pi_state_list)))
905                 exit_pi_state_list(tsk);
906 #endif
907
908         uprobe_free_utask(tsk);
909
910         /* Get rid of any cached register state */
911         deactivate_mm(tsk, mm);
912
913         /*
914          * Signal userspace if we're not exiting with a core dump
915          * because we want to leave the value intact for debugging
916          * purposes.
917          */
918         if (tsk->clear_child_tid) {
919                 if (!(tsk->signal->flags & SIGNAL_GROUP_COREDUMP) &&
920                     atomic_read(&mm->mm_users) > 1) {
921                         /*
922                          * We don't check the error code - if userspace has
923                          * not set up a proper pointer then tough luck.
924                          */
925                         put_user(0, tsk->clear_child_tid);
926                         sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
927                                         1, NULL, NULL, 0);
928                 }
929                 tsk->clear_child_tid = NULL;
930         }
931
932         /*
933          * All done, finally we can wake up parent and return this mm to him.
934          * Also kthread_stop() uses this completion for synchronization.
935          */
936         if (tsk->vfork_done)
937                 complete_vfork_done(tsk);
938 }
939
940 /*
941  * Allocate a new mm structure and copy contents from the
942  * mm structure of the passed in task structure.
943  */
944 static struct mm_struct *dup_mm(struct task_struct *tsk)
945 {
946         struct mm_struct *mm, *oldmm = current->mm;
947         int err;
948
949         mm = allocate_mm();
950         if (!mm)
951                 goto fail_nomem;
952
953         memcpy(mm, oldmm, sizeof(*mm));
954
955         if (!mm_init(mm, tsk, mm->user_ns))
956                 goto fail_nomem;
957
958         err = dup_mmap(mm, oldmm);
959         if (err)
960                 goto free_pt;
961
962         mm->hiwater_rss = get_mm_rss(mm);
963         mm->hiwater_vm = mm->total_vm;
964
965         if (mm->binfmt && !try_module_get(mm->binfmt->module))
966                 goto free_pt;
967
968         return mm;
969
970 free_pt:
971         /* don't put binfmt in mmput, we haven't got module yet */
972         mm->binfmt = NULL;
973         mmput(mm);
974
975 fail_nomem:
976         return NULL;
977 }
978
979 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
980 {
981         struct mm_struct *mm, *oldmm;
982         int retval;
983
984         tsk->min_flt = tsk->maj_flt = 0;
985         tsk->nvcsw = tsk->nivcsw = 0;
986 #ifdef CONFIG_DETECT_HUNG_TASK
987         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
988 #endif
989
990         tsk->mm = NULL;
991         tsk->active_mm = NULL;
992
993         /*
994          * Are we cloning a kernel thread?
995          *
996          * We need to steal a active VM for that..
997          */
998         oldmm = current->mm;
999         if (!oldmm)
1000                 return 0;
1001
1002         /* initialize the new vmacache entries */
1003         vmacache_flush(tsk);
1004
1005         if (clone_flags & CLONE_VM) {
1006                 atomic_inc(&oldmm->mm_users);
1007                 mm = oldmm;
1008                 goto good_mm;
1009         }
1010
1011         retval = -ENOMEM;
1012         mm = dup_mm(tsk);
1013         if (!mm)
1014                 goto fail_nomem;
1015
1016 good_mm:
1017         tsk->mm = mm;
1018         tsk->active_mm = mm;
1019         return 0;
1020
1021 fail_nomem:
1022         return retval;
1023 }
1024
1025 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1026 {
1027         struct fs_struct *fs = current->fs;
1028         if (clone_flags & CLONE_FS) {
1029                 /* tsk->fs is already what we want */
1030                 spin_lock(&fs->lock);
1031                 if (fs->in_exec) {
1032                         spin_unlock(&fs->lock);
1033                         return -EAGAIN;
1034                 }
1035                 fs->users++;
1036                 spin_unlock(&fs->lock);
1037                 return 0;
1038         }
1039         tsk->fs = copy_fs_struct(fs);
1040         if (!tsk->fs)
1041                 return -ENOMEM;
1042         return 0;
1043 }
1044
1045 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
1046 {
1047         struct files_struct *oldf, *newf;
1048         int error = 0;
1049
1050         /*
1051          * A background process may not have any files ...
1052          */
1053         oldf = current->files;
1054         if (!oldf)
1055                 goto out;
1056
1057         if (clone_flags & CLONE_FILES) {
1058                 atomic_inc(&oldf->count);
1059                 goto out;
1060         }
1061
1062         newf = dup_fd(oldf, &error);
1063         if (!newf)
1064                 goto out;
1065
1066         tsk->files = newf;
1067         error = 0;
1068 out:
1069         return error;
1070 }
1071
1072 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
1073 {
1074 #ifdef CONFIG_BLOCK
1075         struct io_context *ioc = current->io_context;
1076         struct io_context *new_ioc;
1077
1078         if (!ioc)
1079                 return 0;
1080         /*
1081          * Share io context with parent, if CLONE_IO is set
1082          */
1083         if (clone_flags & CLONE_IO) {
1084                 ioc_task_link(ioc);
1085                 tsk->io_context = ioc;
1086         } else if (ioprio_valid(ioc->ioprio)) {
1087                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
1088                 if (unlikely(!new_ioc))
1089                         return -ENOMEM;
1090
1091                 new_ioc->ioprio = ioc->ioprio;
1092                 put_io_context(new_ioc);
1093         }
1094 #endif
1095         return 0;
1096 }
1097
1098 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1099 {
1100         struct sighand_struct *sig;
1101
1102         if (clone_flags & CLONE_SIGHAND) {
1103                 atomic_inc(&current->sighand->count);
1104                 return 0;
1105         }
1106         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1107         rcu_assign_pointer(tsk->sighand, sig);
1108         if (!sig)
1109                 return -ENOMEM;
1110
1111         atomic_set(&sig->count, 1);
1112         spin_lock_irq(&current->sighand->siglock);
1113         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1114         spin_unlock_irq(&current->sighand->siglock);
1115         return 0;
1116 }
1117
1118 void __cleanup_sighand(struct sighand_struct *sighand)
1119 {
1120         if (atomic_dec_and_test(&sighand->count)) {
1121                 signalfd_cleanup(sighand);
1122                 /*
1123                  * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it
1124                  * without an RCU grace period, see __lock_task_sighand().
1125                  */
1126                 kmem_cache_free(sighand_cachep, sighand);
1127         }
1128 }
1129
1130 /*
1131  * Initialize POSIX timer handling for a thread group.
1132  */
1133 static void posix_cpu_timers_init_group(struct signal_struct *sig)
1134 {
1135         unsigned long cpu_limit;
1136
1137         cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1138         if (cpu_limit != RLIM_INFINITY) {
1139                 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
1140                 sig->cputimer.running = true;
1141         }
1142
1143         /* The timer lists. */
1144         INIT_LIST_HEAD(&sig->cpu_timers[0]);
1145         INIT_LIST_HEAD(&sig->cpu_timers[1]);
1146         INIT_LIST_HEAD(&sig->cpu_timers[2]);
1147 }
1148
1149 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1150 {
1151         struct signal_struct *sig;
1152
1153         if (clone_flags & CLONE_THREAD)
1154                 return 0;
1155
1156         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1157         tsk->signal = sig;
1158         if (!sig)
1159                 return -ENOMEM;
1160
1161         sig->nr_threads = 1;
1162         atomic_set(&sig->live, 1);
1163         atomic_set(&sig->sigcnt, 1);
1164
1165         /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1166         sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1167         tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1168
1169         init_waitqueue_head(&sig->wait_chldexit);
1170         sig->curr_target = tsk;
1171         init_sigpending(&sig->shared_pending);
1172         INIT_LIST_HEAD(&sig->posix_timers);
1173         seqlock_init(&sig->stats_lock);
1174         prev_cputime_init(&sig->prev_cputime);
1175
1176         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1177         sig->real_timer.function = it_real_fn;
1178
1179         task_lock(current->group_leader);
1180         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1181         task_unlock(current->group_leader);
1182
1183         posix_cpu_timers_init_group(sig);
1184
1185         tty_audit_fork(sig);
1186         sched_autogroup_fork(sig);
1187
1188         sig->oom_score_adj = current->signal->oom_score_adj;
1189         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1190
1191         sig->has_child_subreaper = current->signal->has_child_subreaper ||
1192                                    current->signal->is_child_subreaper;
1193
1194         mutex_init(&sig->cred_guard_mutex);
1195
1196         return 0;
1197 }
1198
1199 static void copy_seccomp(struct task_struct *p)
1200 {
1201 #ifdef CONFIG_SECCOMP
1202         /*
1203          * Must be called with sighand->lock held, which is common to
1204          * all threads in the group. Holding cred_guard_mutex is not
1205          * needed because this new task is not yet running and cannot
1206          * be racing exec.
1207          */
1208         assert_spin_locked(&current->sighand->siglock);
1209
1210         /* Ref-count the new filter user, and assign it. */
1211         get_seccomp_filter(current);
1212         p->seccomp = current->seccomp;
1213
1214         /*
1215          * Explicitly enable no_new_privs here in case it got set
1216          * between the task_struct being duplicated and holding the
1217          * sighand lock. The seccomp state and nnp must be in sync.
1218          */
1219         if (task_no_new_privs(current))
1220                 task_set_no_new_privs(p);
1221
1222         /*
1223          * If the parent gained a seccomp mode after copying thread
1224          * flags and between before we held the sighand lock, we have
1225          * to manually enable the seccomp thread flag here.
1226          */
1227         if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1228                 set_tsk_thread_flag(p, TIF_SECCOMP);
1229 #endif
1230 }
1231
1232 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1233 {
1234         current->clear_child_tid = tidptr;
1235
1236         return task_pid_vnr(current);
1237 }
1238
1239 static void rt_mutex_init_task(struct task_struct *p)
1240 {
1241         raw_spin_lock_init(&p->pi_lock);
1242 #ifdef CONFIG_RT_MUTEXES
1243         p->pi_waiters = RB_ROOT;
1244         p->pi_waiters_leftmost = NULL;
1245         p->pi_blocked_on = NULL;
1246 #endif
1247 }
1248
1249 /*
1250  * Initialize POSIX timer handling for a single task.
1251  */
1252 static void posix_cpu_timers_init(struct task_struct *tsk)
1253 {
1254         tsk->cputime_expires.prof_exp = 0;
1255         tsk->cputime_expires.virt_exp = 0;
1256         tsk->cputime_expires.sched_exp = 0;
1257         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1258         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1259         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1260 }
1261
1262 static inline void
1263 init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1264 {
1265          task->pids[type].pid = pid;
1266 }
1267
1268 /*
1269  * This creates a new process as a copy of the old one,
1270  * but does not actually start it yet.
1271  *
1272  * It copies the registers, and all the appropriate
1273  * parts of the process environment (as per the clone
1274  * flags). The actual kick-off is left to the caller.
1275  */
1276 static struct task_struct *copy_process(unsigned long clone_flags,
1277                                         unsigned long stack_start,
1278                                         unsigned long stack_size,
1279                                         int __user *child_tidptr,
1280                                         struct pid *pid,
1281                                         int trace,
1282                                         unsigned long tls,
1283                                         int node)
1284 {
1285         int retval;
1286         struct task_struct *p;
1287         void *cgrp_ss_priv[CGROUP_CANFORK_COUNT] = {};
1288
1289         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1290                 return ERR_PTR(-EINVAL);
1291
1292         if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1293                 return ERR_PTR(-EINVAL);
1294
1295         /*
1296          * Thread groups must share signals as well, and detached threads
1297          * can only be started up within the thread group.
1298          */
1299         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1300                 return ERR_PTR(-EINVAL);
1301
1302         /*
1303          * Shared signal handlers imply shared VM. By way of the above,
1304          * thread groups also imply shared VM. Blocking this case allows
1305          * for various simplifications in other code.
1306          */
1307         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1308                 return ERR_PTR(-EINVAL);
1309
1310         /*
1311          * Siblings of global init remain as zombies on exit since they are
1312          * not reaped by their parent (swapper). To solve this and to avoid
1313          * multi-rooted process trees, prevent global and container-inits
1314          * from creating siblings.
1315          */
1316         if ((clone_flags & CLONE_PARENT) &&
1317                                 current->signal->flags & SIGNAL_UNKILLABLE)
1318                 return ERR_PTR(-EINVAL);
1319
1320         /*
1321          * If the new process will be in a different pid or user namespace
1322          * do not allow it to share a thread group with the forking task.
1323          */
1324         if (clone_flags & CLONE_THREAD) {
1325                 if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
1326                     (task_active_pid_ns(current) !=
1327                                 current->nsproxy->pid_ns_for_children))
1328                         return ERR_PTR(-EINVAL);
1329         }
1330
1331         retval = security_task_create(clone_flags);
1332         if (retval)
1333                 goto fork_out;
1334
1335         retval = -ENOMEM;
1336         p = dup_task_struct(current, node);
1337         if (!p)
1338                 goto fork_out;
1339
1340         /*
1341          * This _must_ happen before we call free_task(), i.e. before we jump
1342          * to any of the bad_fork_* labels. This is to avoid freeing
1343          * p->set_child_tid which is (ab)used as a kthread's data pointer for
1344          * kernel threads (PF_KTHREAD).
1345          */
1346         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1347         /*
1348          * Clear TID on mm_release()?
1349          */
1350         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1351
1352         ftrace_graph_init_task(p);
1353
1354         rt_mutex_init_task(p);
1355
1356 #ifdef CONFIG_PROVE_LOCKING
1357         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1358         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1359 #endif
1360         retval = -EAGAIN;
1361         if (atomic_read(&p->real_cred->user->processes) >=
1362                         task_rlimit(p, RLIMIT_NPROC)) {
1363                 if (p->real_cred->user != INIT_USER &&
1364                     !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
1365                         goto bad_fork_free;
1366         }
1367         current->flags &= ~PF_NPROC_EXCEEDED;
1368
1369         retval = copy_creds(p, clone_flags);
1370         if (retval < 0)
1371                 goto bad_fork_free;
1372
1373         /*
1374          * If multiple threads are within copy_process(), then this check
1375          * triggers too late. This doesn't hurt, the check is only there
1376          * to stop root fork bombs.
1377          */
1378         retval = -EAGAIN;
1379         if (nr_threads >= max_threads)
1380                 goto bad_fork_cleanup_count;
1381
1382         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1383         p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1384         p->flags |= PF_FORKNOEXEC;
1385         INIT_LIST_HEAD(&p->children);
1386         INIT_LIST_HEAD(&p->sibling);
1387         rcu_copy_process(p);
1388         p->vfork_done = NULL;
1389         spin_lock_init(&p->alloc_lock);
1390
1391         init_sigpending(&p->pending);
1392
1393         p->utime = p->stime = p->gtime = 0;
1394         p->utimescaled = p->stimescaled = 0;
1395         prev_cputime_init(&p->prev_cputime);
1396
1397 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1398         seqlock_init(&p->vtime_seqlock);
1399         p->vtime_snap = 0;
1400         p->vtime_snap_whence = VTIME_SLEEPING;
1401 #endif
1402
1403 #if defined(SPLIT_RSS_COUNTING)
1404         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1405 #endif
1406
1407         p->default_timer_slack_ns = current->timer_slack_ns;
1408
1409         task_io_accounting_init(&p->ioac);
1410         acct_clear_integrals(p);
1411
1412         posix_cpu_timers_init(p);
1413
1414         p->io_context = NULL;
1415         p->audit_context = NULL;
1416         cgroup_fork(p);
1417 #ifdef CONFIG_NUMA
1418         p->mempolicy = mpol_dup(p->mempolicy);
1419         if (IS_ERR(p->mempolicy)) {
1420                 retval = PTR_ERR(p->mempolicy);
1421                 p->mempolicy = NULL;
1422                 goto bad_fork_cleanup_threadgroup_lock;
1423         }
1424 #endif
1425 #ifdef CONFIG_CPUSETS
1426         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1427         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1428         seqcount_init(&p->mems_allowed_seq);
1429 #endif
1430 #ifdef CONFIG_TRACE_IRQFLAGS
1431         p->irq_events = 0;
1432         p->hardirqs_enabled = 0;
1433         p->hardirq_enable_ip = 0;
1434         p->hardirq_enable_event = 0;
1435         p->hardirq_disable_ip = _THIS_IP_;
1436         p->hardirq_disable_event = 0;
1437         p->softirqs_enabled = 1;
1438         p->softirq_enable_ip = _THIS_IP_;
1439         p->softirq_enable_event = 0;
1440         p->softirq_disable_ip = 0;
1441         p->softirq_disable_event = 0;
1442         p->hardirq_context = 0;
1443         p->softirq_context = 0;
1444 #endif
1445
1446         p->pagefault_disabled = 0;
1447
1448 #ifdef CONFIG_LOCKDEP
1449         p->lockdep_depth = 0; /* no locks held yet */
1450         p->curr_chain_key = 0;
1451         p->lockdep_recursion = 0;
1452 #endif
1453
1454 #ifdef CONFIG_DEBUG_MUTEXES
1455         p->blocked_on = NULL; /* not blocked yet */
1456 #endif
1457 #ifdef CONFIG_BCACHE
1458         p->sequential_io        = 0;
1459         p->sequential_io_avg    = 0;
1460 #endif
1461
1462         /* Perform scheduler related setup. Assign this task to a CPU. */
1463         retval = sched_fork(clone_flags, p);
1464         if (retval)
1465                 goto bad_fork_cleanup_policy;
1466
1467         retval = perf_event_init_task(p);
1468         if (retval)
1469                 goto bad_fork_cleanup_policy;
1470         retval = audit_alloc(p);
1471         if (retval)
1472                 goto bad_fork_cleanup_perf;
1473         /* copy all the process information */
1474         shm_init_task(p);
1475         retval = copy_semundo(clone_flags, p);
1476         if (retval)
1477                 goto bad_fork_cleanup_audit;
1478         retval = copy_files(clone_flags, p);
1479         if (retval)
1480                 goto bad_fork_cleanup_semundo;
1481         retval = copy_fs(clone_flags, p);
1482         if (retval)
1483                 goto bad_fork_cleanup_files;
1484         retval = copy_sighand(clone_flags, p);
1485         if (retval)
1486                 goto bad_fork_cleanup_fs;
1487         retval = copy_signal(clone_flags, p);
1488         if (retval)
1489                 goto bad_fork_cleanup_sighand;
1490         retval = copy_mm(clone_flags, p);
1491         if (retval)
1492                 goto bad_fork_cleanup_signal;
1493         retval = copy_namespaces(clone_flags, p);
1494         if (retval)
1495                 goto bad_fork_cleanup_mm;
1496         retval = copy_io(clone_flags, p);
1497         if (retval)
1498                 goto bad_fork_cleanup_namespaces;
1499         retval = copy_thread_tls(clone_flags, stack_start, stack_size, p, tls);
1500         if (retval)
1501                 goto bad_fork_cleanup_io;
1502
1503         if (pid != &init_struct_pid) {
1504                 pid = alloc_pid(p->nsproxy->pid_ns_for_children);
1505                 if (IS_ERR(pid)) {
1506                         retval = PTR_ERR(pid);
1507                         goto bad_fork_cleanup_io;
1508                 }
1509         }
1510
1511 #ifdef CONFIG_BLOCK
1512         p->plug = NULL;
1513 #endif
1514 #ifdef CONFIG_FUTEX
1515         p->robust_list = NULL;
1516 #ifdef CONFIG_COMPAT
1517         p->compat_robust_list = NULL;
1518 #endif
1519         INIT_LIST_HEAD(&p->pi_state_list);
1520         p->pi_state_cache = NULL;
1521 #endif
1522         /*
1523          * sigaltstack should be cleared when sharing the same VM
1524          */
1525         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1526                 p->sas_ss_sp = p->sas_ss_size = 0;
1527
1528         /*
1529          * Syscall tracing and stepping should be turned off in the
1530          * child regardless of CLONE_PTRACE.
1531          */
1532         user_disable_single_step(p);
1533         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1534 #ifdef TIF_SYSCALL_EMU
1535         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1536 #endif
1537         clear_all_latency_tracing(p);
1538
1539         /* ok, now we should be set up.. */
1540         p->pid = pid_nr(pid);
1541         if (clone_flags & CLONE_THREAD) {
1542                 p->exit_signal = -1;
1543                 p->group_leader = current->group_leader;
1544                 p->tgid = current->tgid;
1545         } else {
1546                 if (clone_flags & CLONE_PARENT)
1547                         p->exit_signal = current->group_leader->exit_signal;
1548                 else
1549                         p->exit_signal = (clone_flags & CSIGNAL);
1550                 p->group_leader = p;
1551                 p->tgid = p->pid;
1552         }
1553
1554         p->nr_dirtied = 0;
1555         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1556         p->dirty_paused_when = 0;
1557
1558         p->pdeath_signal = 0;
1559         INIT_LIST_HEAD(&p->thread_group);
1560         p->task_works = NULL;
1561
1562         threadgroup_change_begin(current);
1563         /*
1564          * Ensure that the cgroup subsystem policies allow the new process to be
1565          * forked. It should be noted the the new process's css_set can be changed
1566          * between here and cgroup_post_fork() if an organisation operation is in
1567          * progress.
1568          */
1569         retval = cgroup_can_fork(p, cgrp_ss_priv);
1570         if (retval)
1571                 goto bad_fork_free_pid;
1572
1573         /*
1574          * From this point on we must avoid any synchronous user-space
1575          * communication until we take the tasklist-lock. In particular, we do
1576          * not want user-space to be able to predict the process start-time by
1577          * stalling fork(2) after we recorded the start_time but before it is
1578          * visible to the system.
1579          */
1580
1581         p->start_time = ktime_get_ns();
1582         p->real_start_time = ktime_get_boot_ns();
1583
1584         /*
1585          * Make it visible to the rest of the system, but dont wake it up yet.
1586          * Need tasklist lock for parent etc handling!
1587          */
1588         write_lock_irq(&tasklist_lock);
1589
1590         /* CLONE_PARENT re-uses the old parent */
1591         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1592                 p->real_parent = current->real_parent;
1593                 p->parent_exec_id = current->parent_exec_id;
1594         } else {
1595                 p->real_parent = current;
1596                 p->parent_exec_id = current->self_exec_id;
1597         }
1598
1599         spin_lock(&current->sighand->siglock);
1600
1601         /*
1602          * Copy seccomp details explicitly here, in case they were changed
1603          * before holding sighand lock.
1604          */
1605         copy_seccomp(p);
1606
1607         /*
1608          * Process group and session signals need to be delivered to just the
1609          * parent before the fork or both the parent and the child after the
1610          * fork. Restart if a signal comes in before we add the new process to
1611          * it's process group.
1612          * A fatal signal pending means that current will exit, so the new
1613          * thread can't slip out of an OOM kill (or normal SIGKILL).
1614         */
1615         recalc_sigpending();
1616         if (signal_pending(current)) {
1617                 retval = -ERESTARTNOINTR;
1618                 goto bad_fork_cancel_cgroup;
1619         }
1620         if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
1621                 retval = -ENOMEM;
1622                 goto bad_fork_cancel_cgroup;
1623         }
1624
1625         if (likely(p->pid)) {
1626                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1627
1628                 init_task_pid(p, PIDTYPE_PID, pid);
1629                 if (thread_group_leader(p)) {
1630                         init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
1631                         init_task_pid(p, PIDTYPE_SID, task_session(current));
1632
1633                         if (is_child_reaper(pid)) {
1634                                 ns_of_pid(pid)->child_reaper = p;
1635                                 p->signal->flags |= SIGNAL_UNKILLABLE;
1636                         }
1637
1638                         p->signal->leader_pid = pid;
1639                         p->signal->tty = tty_kref_get(current->signal->tty);
1640                         list_add_tail(&p->sibling, &p->real_parent->children);
1641                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1642                         attach_pid(p, PIDTYPE_PGID);
1643                         attach_pid(p, PIDTYPE_SID);
1644                         __this_cpu_inc(process_counts);
1645                 } else {
1646                         current->signal->nr_threads++;
1647                         atomic_inc(&current->signal->live);
1648                         atomic_inc(&current->signal->sigcnt);
1649                         list_add_tail_rcu(&p->thread_group,
1650                                           &p->group_leader->thread_group);
1651                         list_add_tail_rcu(&p->thread_node,
1652                                           &p->signal->thread_head);
1653                 }
1654                 attach_pid(p, PIDTYPE_PID);
1655                 nr_threads++;
1656         }
1657
1658         total_forks++;
1659         spin_unlock(&current->sighand->siglock);
1660         syscall_tracepoint_update(p);
1661         write_unlock_irq(&tasklist_lock);
1662
1663         proc_fork_connector(p);
1664         cgroup_post_fork(p, cgrp_ss_priv);
1665         threadgroup_change_end(current);
1666         perf_event_fork(p);
1667
1668         trace_task_newtask(p, clone_flags);
1669         uprobe_copy_process(p, clone_flags);
1670
1671         return p;
1672
1673 bad_fork_cancel_cgroup:
1674         spin_unlock(&current->sighand->siglock);
1675         write_unlock_irq(&tasklist_lock);
1676         cgroup_cancel_fork(p, cgrp_ss_priv);
1677 bad_fork_free_pid:
1678         threadgroup_change_end(current);
1679         if (pid != &init_struct_pid)
1680                 free_pid(pid);
1681 bad_fork_cleanup_io:
1682         if (p->io_context)
1683                 exit_io_context(p);
1684 bad_fork_cleanup_namespaces:
1685         exit_task_namespaces(p);
1686 bad_fork_cleanup_mm:
1687         if (p->mm)
1688                 mmput(p->mm);
1689 bad_fork_cleanup_signal:
1690         if (!(clone_flags & CLONE_THREAD))
1691                 free_signal_struct(p->signal);
1692 bad_fork_cleanup_sighand:
1693         __cleanup_sighand(p->sighand);
1694 bad_fork_cleanup_fs:
1695         exit_fs(p); /* blocking */
1696 bad_fork_cleanup_files:
1697         exit_files(p); /* blocking */
1698 bad_fork_cleanup_semundo:
1699         exit_sem(p);
1700 bad_fork_cleanup_audit:
1701         audit_free(p);
1702 bad_fork_cleanup_perf:
1703         perf_event_free_task(p);
1704 bad_fork_cleanup_policy:
1705 #ifdef CONFIG_NUMA
1706         mpol_put(p->mempolicy);
1707 bad_fork_cleanup_threadgroup_lock:
1708 #endif
1709         delayacct_tsk_free(p);
1710 bad_fork_cleanup_count:
1711         atomic_dec(&p->cred->user->processes);
1712         exit_creds(p);
1713 bad_fork_free:
1714         free_task(p);
1715 fork_out:
1716         return ERR_PTR(retval);
1717 }
1718
1719 static inline void init_idle_pids(struct pid_link *links)
1720 {
1721         enum pid_type type;
1722
1723         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1724                 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1725                 links[type].pid = &init_struct_pid;
1726         }
1727 }
1728
1729 struct task_struct *fork_idle(int cpu)
1730 {
1731         struct task_struct *task;
1732         task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
1733                             cpu_to_node(cpu));
1734         if (!IS_ERR(task)) {
1735                 init_idle_pids(task->pids);
1736                 init_idle(task, cpu);
1737         }
1738
1739         return task;
1740 }
1741
1742 /*
1743  *  Ok, this is the main fork-routine.
1744  *
1745  * It copies the process, and if successful kick-starts
1746  * it and waits for it to finish using the VM if required.
1747  */
1748 long _do_fork(unsigned long clone_flags,
1749               unsigned long stack_start,
1750               unsigned long stack_size,
1751               int __user *parent_tidptr,
1752               int __user *child_tidptr,
1753               unsigned long tls)
1754 {
1755         struct task_struct *p;
1756         int trace = 0;
1757         long nr;
1758
1759         /*
1760          * Determine whether and which event to report to ptracer.  When
1761          * called from kernel_thread or CLONE_UNTRACED is explicitly
1762          * requested, no event is reported; otherwise, report if the event
1763          * for the type of forking is enabled.
1764          */
1765         if (!(clone_flags & CLONE_UNTRACED)) {
1766                 if (clone_flags & CLONE_VFORK)
1767                         trace = PTRACE_EVENT_VFORK;
1768                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1769                         trace = PTRACE_EVENT_CLONE;
1770                 else
1771                         trace = PTRACE_EVENT_FORK;
1772
1773                 if (likely(!ptrace_event_enabled(current, trace)))
1774                         trace = 0;
1775         }
1776
1777         p = copy_process(clone_flags, stack_start, stack_size,
1778                          child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
1779         /*
1780          * Do this prior waking up the new thread - the thread pointer
1781          * might get invalid after that point, if the thread exits quickly.
1782          */
1783         if (!IS_ERR(p)) {
1784                 struct completion vfork;
1785                 struct pid *pid;
1786
1787                 trace_sched_process_fork(current, p);
1788
1789                 pid = get_task_pid(p, PIDTYPE_PID);
1790                 nr = pid_vnr(pid);
1791
1792                 if (clone_flags & CLONE_PARENT_SETTID)
1793                         put_user(nr, parent_tidptr);
1794
1795                 if (clone_flags & CLONE_VFORK) {
1796                         p->vfork_done = &vfork;
1797                         init_completion(&vfork);
1798                         get_task_struct(p);
1799                 }
1800
1801                 wake_up_new_task(p);
1802
1803                 /* forking complete and child started to run, tell ptracer */
1804                 if (unlikely(trace))
1805                         ptrace_event_pid(trace, pid);
1806
1807                 if (clone_flags & CLONE_VFORK) {
1808                         if (!wait_for_vfork_done(p, &vfork))
1809                                 ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
1810                 }
1811
1812                 put_pid(pid);
1813         } else {
1814                 nr = PTR_ERR(p);
1815         }
1816         return nr;
1817 }
1818
1819 #ifndef CONFIG_HAVE_COPY_THREAD_TLS
1820 /* For compatibility with architectures that call do_fork directly rather than
1821  * using the syscall entry points below. */
1822 long do_fork(unsigned long clone_flags,
1823               unsigned long stack_start,
1824               unsigned long stack_size,
1825               int __user *parent_tidptr,
1826               int __user *child_tidptr)
1827 {
1828         return _do_fork(clone_flags, stack_start, stack_size,
1829                         parent_tidptr, child_tidptr, 0);
1830 }
1831 #endif
1832
1833 /*
1834  * Create a kernel thread.
1835  */
1836 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
1837 {
1838         return _do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
1839                 (unsigned long)arg, NULL, NULL, 0);
1840 }
1841
1842 #ifdef __ARCH_WANT_SYS_FORK
1843 SYSCALL_DEFINE0(fork)
1844 {
1845 #ifdef CONFIG_MMU
1846         return _do_fork(SIGCHLD, 0, 0, NULL, NULL, 0);
1847 #else
1848         /* can not support in nommu mode */
1849         return -EINVAL;
1850 #endif
1851 }
1852 #endif
1853
1854 #ifdef __ARCH_WANT_SYS_VFORK
1855 SYSCALL_DEFINE0(vfork)
1856 {
1857         return _do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
1858                         0, NULL, NULL, 0);
1859 }
1860 #endif
1861
1862 #ifdef __ARCH_WANT_SYS_CLONE
1863 #ifdef CONFIG_CLONE_BACKWARDS
1864 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1865                  int __user *, parent_tidptr,
1866                  unsigned long, tls,
1867                  int __user *, child_tidptr)
1868 #elif defined(CONFIG_CLONE_BACKWARDS2)
1869 SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
1870                  int __user *, parent_tidptr,
1871                  int __user *, child_tidptr,
1872                  unsigned long, tls)
1873 #elif defined(CONFIG_CLONE_BACKWARDS3)
1874 SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
1875                 int, stack_size,
1876                 int __user *, parent_tidptr,
1877                 int __user *, child_tidptr,
1878                 unsigned long, tls)
1879 #else
1880 SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
1881                  int __user *, parent_tidptr,
1882                  int __user *, child_tidptr,
1883                  unsigned long, tls)
1884 #endif
1885 {
1886         return _do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr, tls);
1887 }
1888 #endif
1889
1890 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1891 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1892 #endif
1893
1894 static void sighand_ctor(void *data)
1895 {
1896         struct sighand_struct *sighand = data;
1897
1898         spin_lock_init(&sighand->siglock);
1899         init_waitqueue_head(&sighand->signalfd_wqh);
1900 }
1901
1902 void __init proc_caches_init(void)
1903 {
1904         sighand_cachep = kmem_cache_create("sighand_cache",
1905                         sizeof(struct sighand_struct), 0,
1906                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1907                         SLAB_NOTRACK, sighand_ctor);
1908         signal_cachep = kmem_cache_create("signal_cache",
1909                         sizeof(struct signal_struct), 0,
1910                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1911         files_cachep = kmem_cache_create("files_cache",
1912                         sizeof(struct files_struct), 0,
1913                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1914         fs_cachep = kmem_cache_create("fs_cache",
1915                         sizeof(struct fs_struct), 0,
1916                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1917         /*
1918          * FIXME! The "sizeof(struct mm_struct)" currently includes the
1919          * whole struct cpumask for the OFFSTACK case. We could change
1920          * this to *only* allocate as much of it as required by the
1921          * maximum number of CPU's we can ever have.  The cpumask_allocation
1922          * is at the end of the structure, exactly for that reason.
1923          */
1924         mm_cachep = kmem_cache_create("mm_struct",
1925                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1926                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1927         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1928         mmap_init();
1929         nsproxy_cache_init();
1930 }
1931
1932 /*
1933  * Check constraints on flags passed to the unshare system call.
1934  */
1935 static int check_unshare_flags(unsigned long unshare_flags)
1936 {
1937         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1938                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1939                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
1940                                 CLONE_NEWUSER|CLONE_NEWPID))
1941                 return -EINVAL;
1942         /*
1943          * Not implemented, but pretend it works if there is nothing
1944          * to unshare.  Note that unsharing the address space or the
1945          * signal handlers also need to unshare the signal queues (aka
1946          * CLONE_THREAD).
1947          */
1948         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1949                 if (!thread_group_empty(current))
1950                         return -EINVAL;
1951         }
1952         if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
1953                 if (atomic_read(&current->sighand->count) > 1)
1954                         return -EINVAL;
1955         }
1956         if (unshare_flags & CLONE_VM) {
1957                 if (!current_is_single_threaded())
1958                         return -EINVAL;
1959         }
1960
1961         return 0;
1962 }
1963
1964 /*
1965  * Unshare the filesystem structure if it is being shared
1966  */
1967 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1968 {
1969         struct fs_struct *fs = current->fs;
1970
1971         if (!(unshare_flags & CLONE_FS) || !fs)
1972                 return 0;
1973
1974         /* don't need lock here; in the worst case we'll do useless copy */
1975         if (fs->users == 1)
1976                 return 0;
1977
1978         *new_fsp = copy_fs_struct(fs);
1979         if (!*new_fsp)
1980                 return -ENOMEM;
1981
1982         return 0;
1983 }
1984
1985 /*
1986  * Unshare file descriptor table if it is being shared
1987  */
1988 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1989 {
1990         struct files_struct *fd = current->files;
1991         int error = 0;
1992
1993         if ((unshare_flags & CLONE_FILES) &&
1994             (fd && atomic_read(&fd->count) > 1)) {
1995                 *new_fdp = dup_fd(fd, &error);
1996                 if (!*new_fdp)
1997                         return error;
1998         }
1999
2000         return 0;
2001 }
2002
2003 /*
2004  * unshare allows a process to 'unshare' part of the process
2005  * context which was originally shared using clone.  copy_*
2006  * functions used by do_fork() cannot be used here directly
2007  * because they modify an inactive task_struct that is being
2008  * constructed. Here we are modifying the current, active,
2009  * task_struct.
2010  */
2011 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
2012 {
2013         struct fs_struct *fs, *new_fs = NULL;
2014         struct files_struct *fd, *new_fd = NULL;
2015         struct cred *new_cred = NULL;
2016         struct nsproxy *new_nsproxy = NULL;
2017         int do_sysvsem = 0;
2018         int err;
2019
2020         /*
2021          * If unsharing a user namespace must also unshare the thread group
2022          * and unshare the filesystem root and working directories.
2023          */
2024         if (unshare_flags & CLONE_NEWUSER)
2025                 unshare_flags |= CLONE_THREAD | CLONE_FS;
2026         /*
2027          * If unsharing vm, must also unshare signal handlers.
2028          */
2029         if (unshare_flags & CLONE_VM)
2030                 unshare_flags |= CLONE_SIGHAND;
2031         /*
2032          * If unsharing a signal handlers, must also unshare the signal queues.
2033          */
2034         if (unshare_flags & CLONE_SIGHAND)
2035                 unshare_flags |= CLONE_THREAD;
2036         /*
2037          * If unsharing namespace, must also unshare filesystem information.
2038          */
2039         if (unshare_flags & CLONE_NEWNS)
2040                 unshare_flags |= CLONE_FS;
2041
2042         err = check_unshare_flags(unshare_flags);
2043         if (err)
2044                 goto bad_unshare_out;
2045         /*
2046          * CLONE_NEWIPC must also detach from the undolist: after switching
2047          * to a new ipc namespace, the semaphore arrays from the old
2048          * namespace are unreachable.
2049          */
2050         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
2051                 do_sysvsem = 1;
2052         err = unshare_fs(unshare_flags, &new_fs);
2053         if (err)
2054                 goto bad_unshare_out;
2055         err = unshare_fd(unshare_flags, &new_fd);
2056         if (err)
2057                 goto bad_unshare_cleanup_fs;
2058         err = unshare_userns(unshare_flags, &new_cred);
2059         if (err)
2060                 goto bad_unshare_cleanup_fd;
2061         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
2062                                          new_cred, new_fs);
2063         if (err)
2064                 goto bad_unshare_cleanup_cred;
2065
2066         if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
2067                 if (do_sysvsem) {
2068                         /*
2069                          * CLONE_SYSVSEM is equivalent to sys_exit().
2070                          */
2071                         exit_sem(current);
2072                 }
2073                 if (unshare_flags & CLONE_NEWIPC) {
2074                         /* Orphan segments in old ns (see sem above). */
2075                         exit_shm(current);
2076                         shm_init_task(current);
2077                 }
2078
2079                 if (new_nsproxy)
2080                         switch_task_namespaces(current, new_nsproxy);
2081
2082                 task_lock(current);
2083
2084                 if (new_fs) {
2085                         fs = current->fs;
2086                         spin_lock(&fs->lock);
2087                         current->fs = new_fs;
2088                         if (--fs->users)
2089                                 new_fs = NULL;
2090                         else
2091                                 new_fs = fs;
2092                         spin_unlock(&fs->lock);
2093                 }
2094
2095                 if (new_fd) {
2096                         fd = current->files;
2097                         current->files = new_fd;
2098                         new_fd = fd;
2099                 }
2100
2101                 task_unlock(current);
2102
2103                 if (new_cred) {
2104                         /* Install the new user namespace */
2105                         commit_creds(new_cred);
2106                         new_cred = NULL;
2107                 }
2108         }
2109
2110 bad_unshare_cleanup_cred:
2111         if (new_cred)
2112                 put_cred(new_cred);
2113 bad_unshare_cleanup_fd:
2114         if (new_fd)
2115                 put_files_struct(new_fd);
2116
2117 bad_unshare_cleanup_fs:
2118         if (new_fs)
2119                 free_fs_struct(new_fs);
2120
2121 bad_unshare_out:
2122         return err;
2123 }
2124
2125 /*
2126  *      Helper to unshare the files of the current task.
2127  *      We don't want to expose copy_files internals to
2128  *      the exec layer of the kernel.
2129  */
2130
2131 int unshare_files(struct files_struct **displaced)
2132 {
2133         struct task_struct *task = current;
2134         struct files_struct *copy = NULL;
2135         int error;
2136
2137         error = unshare_fd(CLONE_FILES, &copy);
2138         if (error || !copy) {
2139                 *displaced = NULL;
2140                 return error;
2141         }
2142         *displaced = task->files;
2143         task_lock(task);
2144         task->files = copy;
2145         task_unlock(task);
2146         return 0;
2147 }
2148
2149 int sysctl_max_threads(struct ctl_table *table, int write,
2150                        void __user *buffer, size_t *lenp, loff_t *ppos)
2151 {
2152         struct ctl_table t;
2153         int ret;
2154         int threads = max_threads;
2155         int min = 1;
2156         int max = MAX_THREADS;
2157
2158         t = *table;
2159         t.data = &threads;
2160         t.extra1 = &min;
2161         t.extra2 = &max;
2162
2163         ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
2164         if (ret || !write)
2165                 return ret;
2166
2167         max_threads = threads;
2168
2169         return 0;
2170 }