OSDN Git Service

net: fec: Fix unbalanced PM runtime calls
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / fs / exec.c
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/pagemap.h>
36 #include <linux/perf_event.h>
37 #include <linux/highmem.h>
38 #include <linux/spinlock.h>
39 #include <linux/key.h>
40 #include <linux/personality.h>
41 #include <linux/binfmts.h>
42 #include <linux/utsname.h>
43 #include <linux/pid_namespace.h>
44 #include <linux/module.h>
45 #include <linux/namei.h>
46 #include <linux/mount.h>
47 #include <linux/security.h>
48 #include <linux/syscalls.h>
49 #include <linux/tsacct_kern.h>
50 #include <linux/cn_proc.h>
51 #include <linux/audit.h>
52 #include <linux/tracehook.h>
53 #include <linux/kmod.h>
54 #include <linux/fsnotify.h>
55 #include <linux/fs_struct.h>
56 #include <linux/pipe_fs_i.h>
57 #include <linux/oom.h>
58 #include <linux/compat.h>
59 #include <linux/user_namespace.h>
60
61 #include <asm/uaccess.h>
62 #include <asm/mmu_context.h>
63 #include <asm/tlb.h>
64
65 #include <trace/events/task.h>
66 #include "internal.h"
67
68 #include <trace/events/sched.h>
69
70 int suid_dumpable = 0;
71
72 static LIST_HEAD(formats);
73 static DEFINE_RWLOCK(binfmt_lock);
74
75 void __register_binfmt(struct linux_binfmt * fmt, int insert)
76 {
77         BUG_ON(!fmt);
78         if (WARN_ON(!fmt->load_binary))
79                 return;
80         write_lock(&binfmt_lock);
81         insert ? list_add(&fmt->lh, &formats) :
82                  list_add_tail(&fmt->lh, &formats);
83         write_unlock(&binfmt_lock);
84 }
85
86 EXPORT_SYMBOL(__register_binfmt);
87
88 void unregister_binfmt(struct linux_binfmt * fmt)
89 {
90         write_lock(&binfmt_lock);
91         list_del(&fmt->lh);
92         write_unlock(&binfmt_lock);
93 }
94
95 EXPORT_SYMBOL(unregister_binfmt);
96
97 static inline void put_binfmt(struct linux_binfmt * fmt)
98 {
99         module_put(fmt->module);
100 }
101
102 bool path_noexec(const struct path *path)
103 {
104         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
105                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
106 }
107
108 #ifdef CONFIG_USELIB
109 /*
110  * Note that a shared library must be both readable and executable due to
111  * security reasons.
112  *
113  * Also note that we take the address to load from from the file itself.
114  */
115 SYSCALL_DEFINE1(uselib, const char __user *, library)
116 {
117         struct linux_binfmt *fmt;
118         struct file *file;
119         struct filename *tmp = getname(library);
120         int error = PTR_ERR(tmp);
121         static const struct open_flags uselib_flags = {
122                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
123                 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
124                 .intent = LOOKUP_OPEN,
125                 .lookup_flags = LOOKUP_FOLLOW,
126         };
127
128         if (IS_ERR(tmp))
129                 goto out;
130
131         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
132         putname(tmp);
133         error = PTR_ERR(file);
134         if (IS_ERR(file))
135                 goto out;
136
137         error = -EINVAL;
138         if (!S_ISREG(file_inode(file)->i_mode))
139                 goto exit;
140
141         error = -EACCES;
142         if (path_noexec(&file->f_path))
143                 goto exit;
144
145         fsnotify_open(file);
146
147         error = -ENOEXEC;
148
149         read_lock(&binfmt_lock);
150         list_for_each_entry(fmt, &formats, lh) {
151                 if (!fmt->load_shlib)
152                         continue;
153                 if (!try_module_get(fmt->module))
154                         continue;
155                 read_unlock(&binfmt_lock);
156                 error = fmt->load_shlib(file);
157                 read_lock(&binfmt_lock);
158                 put_binfmt(fmt);
159                 if (error != -ENOEXEC)
160                         break;
161         }
162         read_unlock(&binfmt_lock);
163 exit:
164         fput(file);
165 out:
166         return error;
167 }
168 #endif /* #ifdef CONFIG_USELIB */
169
170 #ifdef CONFIG_MMU
171 /*
172  * The nascent bprm->mm is not visible until exec_mmap() but it can
173  * use a lot of memory, account these pages in current->mm temporary
174  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
175  * change the counter back via acct_arg_size(0).
176  */
177 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
178 {
179         struct mm_struct *mm = current->mm;
180         long diff = (long)(pages - bprm->vma_pages);
181
182         if (!mm || !diff)
183                 return;
184
185         bprm->vma_pages = pages;
186         add_mm_counter(mm, MM_ANONPAGES, diff);
187 }
188
189 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
190                 int write)
191 {
192         struct page *page;
193         int ret;
194
195 #ifdef CONFIG_STACK_GROWSUP
196         if (write) {
197                 ret = expand_downwards(bprm->vma, pos);
198                 if (ret < 0)
199                         return NULL;
200         }
201 #endif
202         ret = get_user_pages(current, bprm->mm, pos,
203                         1, write, 1, &page, NULL);
204         if (ret <= 0)
205                 return NULL;
206
207         if (write) {
208                 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
209                 unsigned long ptr_size, limit;
210
211                 /*
212                  * Since the stack will hold pointers to the strings, we
213                  * must account for them as well.
214                  *
215                  * The size calculation is the entire vma while each arg page is
216                  * built, so each time we get here it's calculating how far it
217                  * is currently (rather than each call being just the newly
218                  * added size from the arg page).  As a result, we need to
219                  * always add the entire size of the pointers, so that on the
220                  * last call to get_arg_page() we'll actually have the entire
221                  * correct size.
222                  */
223                 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
224                 if (ptr_size > ULONG_MAX - size)
225                         goto fail;
226                 size += ptr_size;
227
228                 acct_arg_size(bprm, size / PAGE_SIZE);
229
230                 /*
231                  * We've historically supported up to 32 pages (ARG_MAX)
232                  * of argument strings even with small stacks
233                  */
234                 if (size <= ARG_MAX)
235                         return page;
236
237                 /*
238                  * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
239                  * (whichever is smaller) for the argv+env strings.
240                  * This ensures that:
241                  *  - the remaining binfmt code will not run out of stack space,
242                  *  - the program will have a reasonable amount of stack left
243                  *    to work from.
244                  */
245                 limit = _STK_LIM / 4 * 3;
246                 limit = min(limit, rlimit(RLIMIT_STACK) / 4);
247                 if (size > limit)
248                         goto fail;
249         }
250
251         return page;
252
253 fail:
254         put_page(page);
255         return NULL;
256 }
257
258 static void put_arg_page(struct page *page)
259 {
260         put_page(page);
261 }
262
263 static void free_arg_page(struct linux_binprm *bprm, int i)
264 {
265 }
266
267 static void free_arg_pages(struct linux_binprm *bprm)
268 {
269 }
270
271 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
272                 struct page *page)
273 {
274         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
275 }
276
277 static int __bprm_mm_init(struct linux_binprm *bprm)
278 {
279         int err;
280         struct vm_area_struct *vma = NULL;
281         struct mm_struct *mm = bprm->mm;
282
283         bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
284         if (!vma)
285                 return -ENOMEM;
286
287         down_write(&mm->mmap_sem);
288         vma->vm_mm = mm;
289
290         /*
291          * Place the stack at the largest stack address the architecture
292          * supports. Later, we'll move this to an appropriate place. We don't
293          * use STACK_TOP because that can depend on attributes which aren't
294          * configured yet.
295          */
296         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
297         vma->vm_end = STACK_TOP_MAX;
298         vma->vm_start = vma->vm_end - PAGE_SIZE;
299         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
300         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
301         INIT_LIST_HEAD(&vma->anon_vma_chain);
302
303         err = insert_vm_struct(mm, vma);
304         if (err)
305                 goto err;
306
307         mm->stack_vm = mm->total_vm = 1;
308         arch_bprm_mm_init(mm, vma);
309         up_write(&mm->mmap_sem);
310         bprm->p = vma->vm_end - sizeof(void *);
311         return 0;
312 err:
313         up_write(&mm->mmap_sem);
314         bprm->vma = NULL;
315         kmem_cache_free(vm_area_cachep, vma);
316         return err;
317 }
318
319 static bool valid_arg_len(struct linux_binprm *bprm, long len)
320 {
321         return len <= MAX_ARG_STRLEN;
322 }
323
324 #else
325
326 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
327 {
328 }
329
330 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
331                 int write)
332 {
333         struct page *page;
334
335         page = bprm->page[pos / PAGE_SIZE];
336         if (!page && write) {
337                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
338                 if (!page)
339                         return NULL;
340                 bprm->page[pos / PAGE_SIZE] = page;
341         }
342
343         return page;
344 }
345
346 static void put_arg_page(struct page *page)
347 {
348 }
349
350 static void free_arg_page(struct linux_binprm *bprm, int i)
351 {
352         if (bprm->page[i]) {
353                 __free_page(bprm->page[i]);
354                 bprm->page[i] = NULL;
355         }
356 }
357
358 static void free_arg_pages(struct linux_binprm *bprm)
359 {
360         int i;
361
362         for (i = 0; i < MAX_ARG_PAGES; i++)
363                 free_arg_page(bprm, i);
364 }
365
366 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
367                 struct page *page)
368 {
369 }
370
371 static int __bprm_mm_init(struct linux_binprm *bprm)
372 {
373         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
374         return 0;
375 }
376
377 static bool valid_arg_len(struct linux_binprm *bprm, long len)
378 {
379         return len <= bprm->p;
380 }
381
382 #endif /* CONFIG_MMU */
383
384 /*
385  * Create a new mm_struct and populate it with a temporary stack
386  * vm_area_struct.  We don't have enough context at this point to set the stack
387  * flags, permissions, and offset, so we use temporary values.  We'll update
388  * them later in setup_arg_pages().
389  */
390 static int bprm_mm_init(struct linux_binprm *bprm)
391 {
392         int err;
393         struct mm_struct *mm = NULL;
394
395         bprm->mm = mm = mm_alloc();
396         err = -ENOMEM;
397         if (!mm)
398                 goto err;
399
400         err = __bprm_mm_init(bprm);
401         if (err)
402                 goto err;
403
404         return 0;
405
406 err:
407         if (mm) {
408                 bprm->mm = NULL;
409                 mmdrop(mm);
410         }
411
412         return err;
413 }
414
415 struct user_arg_ptr {
416 #ifdef CONFIG_COMPAT
417         bool is_compat;
418 #endif
419         union {
420                 const char __user *const __user *native;
421 #ifdef CONFIG_COMPAT
422                 const compat_uptr_t __user *compat;
423 #endif
424         } ptr;
425 };
426
427 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
428 {
429         const char __user *native;
430
431 #ifdef CONFIG_COMPAT
432         if (unlikely(argv.is_compat)) {
433                 compat_uptr_t compat;
434
435                 if (get_user(compat, argv.ptr.compat + nr))
436                         return ERR_PTR(-EFAULT);
437
438                 return compat_ptr(compat);
439         }
440 #endif
441
442         if (get_user(native, argv.ptr.native + nr))
443                 return ERR_PTR(-EFAULT);
444
445         return native;
446 }
447
448 /*
449  * count() counts the number of strings in array ARGV.
450  */
451 static int count(struct user_arg_ptr argv, int max)
452 {
453         int i = 0;
454
455         if (argv.ptr.native != NULL) {
456                 for (;;) {
457                         const char __user *p = get_user_arg_ptr(argv, i);
458
459                         if (!p)
460                                 break;
461
462                         if (IS_ERR(p))
463                                 return -EFAULT;
464
465                         if (i >= max)
466                                 return -E2BIG;
467                         ++i;
468
469                         if (fatal_signal_pending(current))
470                                 return -ERESTARTNOHAND;
471                         cond_resched();
472                 }
473         }
474         return i;
475 }
476
477 /*
478  * 'copy_strings()' copies argument/environment strings from the old
479  * processes's memory to the new process's stack.  The call to get_user_pages()
480  * ensures the destination page is created and not swapped out.
481  */
482 static int copy_strings(int argc, struct user_arg_ptr argv,
483                         struct linux_binprm *bprm)
484 {
485         struct page *kmapped_page = NULL;
486         char *kaddr = NULL;
487         unsigned long kpos = 0;
488         int ret;
489
490         while (argc-- > 0) {
491                 const char __user *str;
492                 int len;
493                 unsigned long pos;
494
495                 ret = -EFAULT;
496                 str = get_user_arg_ptr(argv, argc);
497                 if (IS_ERR(str))
498                         goto out;
499
500                 len = strnlen_user(str, MAX_ARG_STRLEN);
501                 if (!len)
502                         goto out;
503
504                 ret = -E2BIG;
505                 if (!valid_arg_len(bprm, len))
506                         goto out;
507
508                 /* We're going to work our way backwords. */
509                 pos = bprm->p;
510                 str += len;
511                 bprm->p -= len;
512
513                 while (len > 0) {
514                         int offset, bytes_to_copy;
515
516                         if (fatal_signal_pending(current)) {
517                                 ret = -ERESTARTNOHAND;
518                                 goto out;
519                         }
520                         cond_resched();
521
522                         offset = pos % PAGE_SIZE;
523                         if (offset == 0)
524                                 offset = PAGE_SIZE;
525
526                         bytes_to_copy = offset;
527                         if (bytes_to_copy > len)
528                                 bytes_to_copy = len;
529
530                         offset -= bytes_to_copy;
531                         pos -= bytes_to_copy;
532                         str -= bytes_to_copy;
533                         len -= bytes_to_copy;
534
535                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
536                                 struct page *page;
537
538                                 page = get_arg_page(bprm, pos, 1);
539                                 if (!page) {
540                                         ret = -E2BIG;
541                                         goto out;
542                                 }
543
544                                 if (kmapped_page) {
545                                         flush_kernel_dcache_page(kmapped_page);
546                                         kunmap(kmapped_page);
547                                         put_arg_page(kmapped_page);
548                                 }
549                                 kmapped_page = page;
550                                 kaddr = kmap(kmapped_page);
551                                 kpos = pos & PAGE_MASK;
552                                 flush_arg_page(bprm, kpos, kmapped_page);
553                         }
554                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
555                                 ret = -EFAULT;
556                                 goto out;
557                         }
558                 }
559         }
560         ret = 0;
561 out:
562         if (kmapped_page) {
563                 flush_kernel_dcache_page(kmapped_page);
564                 kunmap(kmapped_page);
565                 put_arg_page(kmapped_page);
566         }
567         return ret;
568 }
569
570 /*
571  * Like copy_strings, but get argv and its values from kernel memory.
572  */
573 int copy_strings_kernel(int argc, const char *const *__argv,
574                         struct linux_binprm *bprm)
575 {
576         int r;
577         mm_segment_t oldfs = get_fs();
578         struct user_arg_ptr argv = {
579                 .ptr.native = (const char __user *const  __user *)__argv,
580         };
581
582         set_fs(KERNEL_DS);
583         r = copy_strings(argc, argv, bprm);
584         set_fs(oldfs);
585
586         return r;
587 }
588 EXPORT_SYMBOL(copy_strings_kernel);
589
590 #ifdef CONFIG_MMU
591
592 /*
593  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
594  * the binfmt code determines where the new stack should reside, we shift it to
595  * its final location.  The process proceeds as follows:
596  *
597  * 1) Use shift to calculate the new vma endpoints.
598  * 2) Extend vma to cover both the old and new ranges.  This ensures the
599  *    arguments passed to subsequent functions are consistent.
600  * 3) Move vma's page tables to the new range.
601  * 4) Free up any cleared pgd range.
602  * 5) Shrink the vma to cover only the new range.
603  */
604 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
605 {
606         struct mm_struct *mm = vma->vm_mm;
607         unsigned long old_start = vma->vm_start;
608         unsigned long old_end = vma->vm_end;
609         unsigned long length = old_end - old_start;
610         unsigned long new_start = old_start - shift;
611         unsigned long new_end = old_end - shift;
612         struct mmu_gather tlb;
613
614         BUG_ON(new_start > new_end);
615
616         /*
617          * ensure there are no vmas between where we want to go
618          * and where we are
619          */
620         if (vma != find_vma(mm, new_start))
621                 return -EFAULT;
622
623         /*
624          * cover the whole range: [new_start, old_end)
625          */
626         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
627                 return -ENOMEM;
628
629         /*
630          * move the page tables downwards, on failure we rely on
631          * process cleanup to remove whatever mess we made.
632          */
633         if (length != move_page_tables(vma, old_start,
634                                        vma, new_start, length, false))
635                 return -ENOMEM;
636
637         lru_add_drain();
638         tlb_gather_mmu(&tlb, mm, old_start, old_end);
639         if (new_end > old_start) {
640                 /*
641                  * when the old and new regions overlap clear from new_end.
642                  */
643                 free_pgd_range(&tlb, new_end, old_end, new_end,
644                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
645         } else {
646                 /*
647                  * otherwise, clean from old_start; this is done to not touch
648                  * the address space in [new_end, old_start) some architectures
649                  * have constraints on va-space that make this illegal (IA64) -
650                  * for the others its just a little faster.
651                  */
652                 free_pgd_range(&tlb, old_start, old_end, new_end,
653                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
654         }
655         tlb_finish_mmu(&tlb, old_start, old_end);
656
657         /*
658          * Shrink the vma to just the new range.  Always succeeds.
659          */
660         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
661
662         return 0;
663 }
664
665 /*
666  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
667  * the stack is optionally relocated, and some extra space is added.
668  */
669 int setup_arg_pages(struct linux_binprm *bprm,
670                     unsigned long stack_top,
671                     int executable_stack)
672 {
673         unsigned long ret;
674         unsigned long stack_shift;
675         struct mm_struct *mm = current->mm;
676         struct vm_area_struct *vma = bprm->vma;
677         struct vm_area_struct *prev = NULL;
678         unsigned long vm_flags;
679         unsigned long stack_base;
680         unsigned long stack_size;
681         unsigned long stack_expand;
682         unsigned long rlim_stack;
683
684 #ifdef CONFIG_STACK_GROWSUP
685         /* Limit stack size */
686         stack_base = rlimit_max(RLIMIT_STACK);
687         if (stack_base > STACK_SIZE_MAX)
688                 stack_base = STACK_SIZE_MAX;
689
690         /* Add space for stack randomization. */
691         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
692
693         /* Make sure we didn't let the argument array grow too large. */
694         if (vma->vm_end - vma->vm_start > stack_base)
695                 return -ENOMEM;
696
697         stack_base = PAGE_ALIGN(stack_top - stack_base);
698
699         stack_shift = vma->vm_start - stack_base;
700         mm->arg_start = bprm->p - stack_shift;
701         bprm->p = vma->vm_end - stack_shift;
702 #else
703         stack_top = arch_align_stack(stack_top);
704         stack_top = PAGE_ALIGN(stack_top);
705
706         if (unlikely(stack_top < mmap_min_addr) ||
707             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
708                 return -ENOMEM;
709
710         stack_shift = vma->vm_end - stack_top;
711
712         bprm->p -= stack_shift;
713         mm->arg_start = bprm->p;
714 #endif
715
716         if (bprm->loader)
717                 bprm->loader -= stack_shift;
718         bprm->exec -= stack_shift;
719
720         down_write(&mm->mmap_sem);
721         vm_flags = VM_STACK_FLAGS;
722
723         /*
724          * Adjust stack execute permissions; explicitly enable for
725          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
726          * (arch default) otherwise.
727          */
728         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
729                 vm_flags |= VM_EXEC;
730         else if (executable_stack == EXSTACK_DISABLE_X)
731                 vm_flags &= ~VM_EXEC;
732         vm_flags |= mm->def_flags;
733         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
734
735         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
736                         vm_flags);
737         if (ret)
738                 goto out_unlock;
739         BUG_ON(prev != vma);
740
741         /* Move stack pages down in memory. */
742         if (stack_shift) {
743                 ret = shift_arg_pages(vma, stack_shift);
744                 if (ret)
745                         goto out_unlock;
746         }
747
748         /* mprotect_fixup is overkill to remove the temporary stack flags */
749         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
750
751         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
752         stack_size = vma->vm_end - vma->vm_start;
753         /*
754          * Align this down to a page boundary as expand_stack
755          * will align it up.
756          */
757         rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
758 #ifdef CONFIG_STACK_GROWSUP
759         if (stack_size + stack_expand > rlim_stack)
760                 stack_base = vma->vm_start + rlim_stack;
761         else
762                 stack_base = vma->vm_end + stack_expand;
763 #else
764         if (stack_size + stack_expand > rlim_stack)
765                 stack_base = vma->vm_end - rlim_stack;
766         else
767                 stack_base = vma->vm_start - stack_expand;
768 #endif
769         current->mm->start_stack = bprm->p;
770         ret = expand_stack(vma, stack_base);
771         if (ret)
772                 ret = -EFAULT;
773
774 out_unlock:
775         up_write(&mm->mmap_sem);
776         return ret;
777 }
778 EXPORT_SYMBOL(setup_arg_pages);
779
780 #endif /* CONFIG_MMU */
781
782 static struct file *do_open_execat(int fd, struct filename *name, int flags)
783 {
784         struct file *file;
785         int err;
786         struct open_flags open_exec_flags = {
787                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
788                 .acc_mode = MAY_EXEC | MAY_OPEN,
789                 .intent = LOOKUP_OPEN,
790                 .lookup_flags = LOOKUP_FOLLOW,
791         };
792
793         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
794                 return ERR_PTR(-EINVAL);
795         if (flags & AT_SYMLINK_NOFOLLOW)
796                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
797         if (flags & AT_EMPTY_PATH)
798                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
799
800         file = do_filp_open(fd, name, &open_exec_flags);
801         if (IS_ERR(file))
802                 goto out;
803
804         err = -EACCES;
805         if (!S_ISREG(file_inode(file)->i_mode))
806                 goto exit;
807
808         if (path_noexec(&file->f_path))
809                 goto exit;
810
811         err = deny_write_access(file);
812         if (err)
813                 goto exit;
814
815         if (name->name[0] != '\0')
816                 fsnotify_open(file);
817
818 out:
819         return file;
820
821 exit:
822         fput(file);
823         return ERR_PTR(err);
824 }
825
826 struct file *open_exec(const char *name)
827 {
828         struct filename *filename = getname_kernel(name);
829         struct file *f = ERR_CAST(filename);
830
831         if (!IS_ERR(filename)) {
832                 f = do_open_execat(AT_FDCWD, filename, 0);
833                 putname(filename);
834         }
835         return f;
836 }
837 EXPORT_SYMBOL(open_exec);
838
839 int kernel_read(struct file *file, loff_t offset,
840                 char *addr, unsigned long count)
841 {
842         mm_segment_t old_fs;
843         loff_t pos = offset;
844         int result;
845
846         old_fs = get_fs();
847         set_fs(get_ds());
848         /* The cast to a user pointer is valid due to the set_fs() */
849         result = vfs_read(file, (void __user *)addr, count, &pos);
850         set_fs(old_fs);
851         return result;
852 }
853
854 EXPORT_SYMBOL(kernel_read);
855
856 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
857 {
858         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
859         if (res > 0)
860                 flush_icache_range(addr, addr + len);
861         return res;
862 }
863 EXPORT_SYMBOL(read_code);
864
865 static int exec_mmap(struct mm_struct *mm)
866 {
867         struct task_struct *tsk;
868         struct mm_struct *old_mm, *active_mm;
869
870         /* Notify parent that we're no longer interested in the old VM */
871         tsk = current;
872         old_mm = current->mm;
873         mm_release(tsk, old_mm);
874
875         if (old_mm) {
876                 sync_mm_rss(old_mm);
877                 /*
878                  * Make sure that if there is a core dump in progress
879                  * for the old mm, we get out and die instead of going
880                  * through with the exec.  We must hold mmap_sem around
881                  * checking core_state and changing tsk->mm.
882                  */
883                 down_read(&old_mm->mmap_sem);
884                 if (unlikely(old_mm->core_state)) {
885                         up_read(&old_mm->mmap_sem);
886                         return -EINTR;
887                 }
888         }
889         task_lock(tsk);
890         active_mm = tsk->active_mm;
891         tsk->mm = mm;
892         tsk->active_mm = mm;
893         activate_mm(active_mm, mm);
894         tsk->mm->vmacache_seqnum = 0;
895         vmacache_flush(tsk);
896         task_unlock(tsk);
897         if (old_mm) {
898                 up_read(&old_mm->mmap_sem);
899                 BUG_ON(active_mm != old_mm);
900                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
901                 mm_update_next_owner(old_mm);
902                 mmput(old_mm);
903                 return 0;
904         }
905         mmdrop(active_mm);
906         return 0;
907 }
908
909 /*
910  * This function makes sure the current process has its own signal table,
911  * so that flush_signal_handlers can later reset the handlers without
912  * disturbing other processes.  (Other processes might share the signal
913  * table via the CLONE_SIGHAND option to clone().)
914  */
915 static int de_thread(struct task_struct *tsk)
916 {
917         struct signal_struct *sig = tsk->signal;
918         struct sighand_struct *oldsighand = tsk->sighand;
919         spinlock_t *lock = &oldsighand->siglock;
920
921         if (thread_group_empty(tsk))
922                 goto no_thread_group;
923
924         /*
925          * Kill all other threads in the thread group.
926          */
927         spin_lock_irq(lock);
928         if (signal_group_exit(sig)) {
929                 /*
930                  * Another group action in progress, just
931                  * return so that the signal is processed.
932                  */
933                 spin_unlock_irq(lock);
934                 return -EAGAIN;
935         }
936
937         sig->group_exit_task = tsk;
938         sig->notify_count = zap_other_threads(tsk);
939         if (!thread_group_leader(tsk))
940                 sig->notify_count--;
941
942         while (sig->notify_count) {
943                 __set_current_state(TASK_KILLABLE);
944                 spin_unlock_irq(lock);
945                 schedule();
946                 if (unlikely(__fatal_signal_pending(tsk)))
947                         goto killed;
948                 spin_lock_irq(lock);
949         }
950         spin_unlock_irq(lock);
951
952         /*
953          * At this point all other threads have exited, all we have to
954          * do is to wait for the thread group leader to become inactive,
955          * and to assume its PID:
956          */
957         if (!thread_group_leader(tsk)) {
958                 struct task_struct *leader = tsk->group_leader;
959
960                 for (;;) {
961                         threadgroup_change_begin(tsk);
962                         write_lock_irq(&tasklist_lock);
963                         /*
964                          * Do this under tasklist_lock to ensure that
965                          * exit_notify() can't miss ->group_exit_task
966                          */
967                         sig->notify_count = -1;
968                         if (likely(leader->exit_state))
969                                 break;
970                         __set_current_state(TASK_KILLABLE);
971                         write_unlock_irq(&tasklist_lock);
972                         threadgroup_change_end(tsk);
973                         schedule();
974                         if (unlikely(__fatal_signal_pending(tsk)))
975                                 goto killed;
976                 }
977
978                 /*
979                  * The only record we have of the real-time age of a
980                  * process, regardless of execs it's done, is start_time.
981                  * All the past CPU time is accumulated in signal_struct
982                  * from sister threads now dead.  But in this non-leader
983                  * exec, nothing survives from the original leader thread,
984                  * whose birth marks the true age of this process now.
985                  * When we take on its identity by switching to its PID, we
986                  * also take its birthdate (always earlier than our own).
987                  */
988                 tsk->start_time = leader->start_time;
989                 tsk->real_start_time = leader->real_start_time;
990
991                 BUG_ON(!same_thread_group(leader, tsk));
992                 BUG_ON(has_group_leader_pid(tsk));
993                 /*
994                  * An exec() starts a new thread group with the
995                  * TGID of the previous thread group. Rehash the
996                  * two threads with a switched PID, and release
997                  * the former thread group leader:
998                  */
999
1000                 /* Become a process group leader with the old leader's pid.
1001                  * The old leader becomes a thread of the this thread group.
1002                  * Note: The old leader also uses this pid until release_task
1003                  *       is called.  Odd but simple and correct.
1004                  */
1005                 tsk->pid = leader->pid;
1006                 change_pid(tsk, PIDTYPE_PID, task_pid(leader));
1007                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1008                 transfer_pid(leader, tsk, PIDTYPE_SID);
1009
1010                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1011                 list_replace_init(&leader->sibling, &tsk->sibling);
1012
1013                 tsk->group_leader = tsk;
1014                 leader->group_leader = tsk;
1015
1016                 tsk->exit_signal = SIGCHLD;
1017                 leader->exit_signal = -1;
1018
1019                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1020                 leader->exit_state = EXIT_DEAD;
1021
1022                 /*
1023                  * We are going to release_task()->ptrace_unlink() silently,
1024                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1025                  * the tracer wont't block again waiting for this thread.
1026                  */
1027                 if (unlikely(leader->ptrace))
1028                         __wake_up_parent(leader, leader->parent);
1029                 write_unlock_irq(&tasklist_lock);
1030                 threadgroup_change_end(tsk);
1031
1032                 release_task(leader);
1033         }
1034
1035         sig->group_exit_task = NULL;
1036         sig->notify_count = 0;
1037
1038 no_thread_group:
1039         /* we have changed execution domain */
1040         tsk->exit_signal = SIGCHLD;
1041
1042         exit_itimers(sig);
1043         flush_itimer_signals();
1044
1045         if (atomic_read(&oldsighand->count) != 1) {
1046                 struct sighand_struct *newsighand;
1047                 /*
1048                  * This ->sighand is shared with the CLONE_SIGHAND
1049                  * but not CLONE_THREAD task, switch to the new one.
1050                  */
1051                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1052                 if (!newsighand)
1053                         return -ENOMEM;
1054
1055                 atomic_set(&newsighand->count, 1);
1056                 memcpy(newsighand->action, oldsighand->action,
1057                        sizeof(newsighand->action));
1058
1059                 write_lock_irq(&tasklist_lock);
1060                 spin_lock(&oldsighand->siglock);
1061                 rcu_assign_pointer(tsk->sighand, newsighand);
1062                 spin_unlock(&oldsighand->siglock);
1063                 write_unlock_irq(&tasklist_lock);
1064
1065                 __cleanup_sighand(oldsighand);
1066         }
1067
1068         BUG_ON(!thread_group_leader(tsk));
1069         return 0;
1070
1071 killed:
1072         /* protects against exit_notify() and __exit_signal() */
1073         read_lock(&tasklist_lock);
1074         sig->group_exit_task = NULL;
1075         sig->notify_count = 0;
1076         read_unlock(&tasklist_lock);
1077         return -EAGAIN;
1078 }
1079
1080 char *get_task_comm(char *buf, struct task_struct *tsk)
1081 {
1082         /* buf must be at least sizeof(tsk->comm) in size */
1083         task_lock(tsk);
1084         strncpy(buf, tsk->comm, sizeof(tsk->comm));
1085         task_unlock(tsk);
1086         return buf;
1087 }
1088 EXPORT_SYMBOL_GPL(get_task_comm);
1089
1090 /*
1091  * These functions flushes out all traces of the currently running executable
1092  * so that a new one can be started
1093  */
1094
1095 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1096 {
1097         task_lock(tsk);
1098         trace_task_rename(tsk, buf);
1099         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1100         task_unlock(tsk);
1101         perf_event_comm(tsk, exec);
1102 }
1103
1104 int flush_old_exec(struct linux_binprm * bprm)
1105 {
1106         int retval;
1107
1108         /*
1109          * Make sure we have a private signal table and that
1110          * we are unassociated from the previous thread group.
1111          */
1112         retval = de_thread(current);
1113         if (retval)
1114                 goto out;
1115
1116         /*
1117          * Must be called _before_ exec_mmap() as bprm->mm is
1118          * not visibile until then. This also enables the update
1119          * to be lockless.
1120          */
1121         set_mm_exe_file(bprm->mm, bprm->file);
1122
1123         /*
1124          * Release all of the old mmap stuff
1125          */
1126         acct_arg_size(bprm, 0);
1127         retval = exec_mmap(bprm->mm);
1128         if (retval)
1129                 goto out;
1130
1131         bprm->mm = NULL;                /* We're using it now */
1132
1133         set_fs(USER_DS);
1134         current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1135                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1136         flush_thread();
1137         current->personality &= ~bprm->per_clear;
1138
1139         /*
1140          * We have to apply CLOEXEC before we change whether the process is
1141          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1142          * trying to access the should-be-closed file descriptors of a process
1143          * undergoing exec(2).
1144          */
1145         do_close_on_exec(current->files);
1146         return 0;
1147
1148 out:
1149         return retval;
1150 }
1151 EXPORT_SYMBOL(flush_old_exec);
1152
1153 void would_dump(struct linux_binprm *bprm, struct file *file)
1154 {
1155         struct inode *inode = file_inode(file);
1156         if (inode_permission(inode, MAY_READ) < 0) {
1157                 struct user_namespace *old, *user_ns;
1158                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1159
1160                 /* Ensure mm->user_ns contains the executable */
1161                 user_ns = old = bprm->mm->user_ns;
1162                 while ((user_ns != &init_user_ns) &&
1163                        !privileged_wrt_inode_uidgid(user_ns, inode))
1164                         user_ns = user_ns->parent;
1165
1166                 if (old != user_ns) {
1167                         bprm->mm->user_ns = get_user_ns(user_ns);
1168                         put_user_ns(old);
1169                 }
1170         }
1171 }
1172 EXPORT_SYMBOL(would_dump);
1173
1174 void setup_new_exec(struct linux_binprm * bprm)
1175 {
1176         arch_pick_mmap_layout(current->mm);
1177
1178         /* This is the point of no return */
1179         current->sas_ss_sp = current->sas_ss_size = 0;
1180
1181         if (uid_eq(current_euid(), current_uid()) && gid_eq(current_egid(), current_gid()))
1182                 set_dumpable(current->mm, SUID_DUMP_USER);
1183         else
1184                 set_dumpable(current->mm, suid_dumpable);
1185
1186         perf_event_exec();
1187         __set_task_comm(current, kbasename(bprm->filename), true);
1188
1189         /* Set the new mm task size. We have to do that late because it may
1190          * depend on TIF_32BIT which is only updated in flush_thread() on
1191          * some architectures like powerpc
1192          */
1193         current->mm->task_size = TASK_SIZE;
1194
1195         /* install the new credentials */
1196         if (!uid_eq(bprm->cred->uid, current_euid()) ||
1197             !gid_eq(bprm->cred->gid, current_egid())) {
1198                 current->pdeath_signal = 0;
1199         } else {
1200                 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
1201                         set_dumpable(current->mm, suid_dumpable);
1202         }
1203
1204         /* An exec changes our domain. We are no longer part of the thread
1205            group */
1206         current->self_exec_id++;
1207         flush_signal_handlers(current, 0);
1208 }
1209 EXPORT_SYMBOL(setup_new_exec);
1210
1211 /*
1212  * Prepare credentials and lock ->cred_guard_mutex.
1213  * install_exec_creds() commits the new creds and drops the lock.
1214  * Or, if exec fails before, free_bprm() should release ->cred and
1215  * and unlock.
1216  */
1217 int prepare_bprm_creds(struct linux_binprm *bprm)
1218 {
1219         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1220                 return -ERESTARTNOINTR;
1221
1222         bprm->cred = prepare_exec_creds();
1223         if (likely(bprm->cred))
1224                 return 0;
1225
1226         mutex_unlock(&current->signal->cred_guard_mutex);
1227         return -ENOMEM;
1228 }
1229
1230 static void free_bprm(struct linux_binprm *bprm)
1231 {
1232         free_arg_pages(bprm);
1233         if (bprm->cred) {
1234                 mutex_unlock(&current->signal->cred_guard_mutex);
1235                 abort_creds(bprm->cred);
1236         }
1237         if (bprm->file) {
1238                 allow_write_access(bprm->file);
1239                 fput(bprm->file);
1240         }
1241         /* If a binfmt changed the interp, free it. */
1242         if (bprm->interp != bprm->filename)
1243                 kfree(bprm->interp);
1244         kfree(bprm);
1245 }
1246
1247 int bprm_change_interp(char *interp, struct linux_binprm *bprm)
1248 {
1249         /* If a binfmt changed the interp, free it first. */
1250         if (bprm->interp != bprm->filename)
1251                 kfree(bprm->interp);
1252         bprm->interp = kstrdup(interp, GFP_KERNEL);
1253         if (!bprm->interp)
1254                 return -ENOMEM;
1255         return 0;
1256 }
1257 EXPORT_SYMBOL(bprm_change_interp);
1258
1259 /*
1260  * install the new credentials for this executable
1261  */
1262 void install_exec_creds(struct linux_binprm *bprm)
1263 {
1264         security_bprm_committing_creds(bprm);
1265
1266         commit_creds(bprm->cred);
1267         bprm->cred = NULL;
1268
1269         /*
1270          * Disable monitoring for regular users
1271          * when executing setuid binaries. Must
1272          * wait until new credentials are committed
1273          * by commit_creds() above
1274          */
1275         if (get_dumpable(current->mm) != SUID_DUMP_USER)
1276                 perf_event_exit_task(current);
1277         /*
1278          * cred_guard_mutex must be held at least to this point to prevent
1279          * ptrace_attach() from altering our determination of the task's
1280          * credentials; any time after this it may be unlocked.
1281          */
1282         security_bprm_committed_creds(bprm);
1283         mutex_unlock(&current->signal->cred_guard_mutex);
1284 }
1285 EXPORT_SYMBOL(install_exec_creds);
1286
1287 /*
1288  * determine how safe it is to execute the proposed program
1289  * - the caller must hold ->cred_guard_mutex to protect against
1290  *   PTRACE_ATTACH or seccomp thread-sync
1291  */
1292 static void check_unsafe_exec(struct linux_binprm *bprm)
1293 {
1294         struct task_struct *p = current, *t;
1295         unsigned n_fs;
1296
1297         if (p->ptrace) {
1298                 if (ptracer_capable(p, current_user_ns()))
1299                         bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
1300                 else
1301                         bprm->unsafe |= LSM_UNSAFE_PTRACE;
1302         }
1303
1304         /*
1305          * This isn't strictly necessary, but it makes it harder for LSMs to
1306          * mess up.
1307          */
1308         if (task_no_new_privs(current))
1309                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1310
1311         t = p;
1312         n_fs = 1;
1313         spin_lock(&p->fs->lock);
1314         rcu_read_lock();
1315         while_each_thread(p, t) {
1316                 if (t->fs == p->fs)
1317                         n_fs++;
1318         }
1319         rcu_read_unlock();
1320
1321         if (p->fs->users > n_fs)
1322                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1323         else
1324                 p->fs->in_exec = 1;
1325         spin_unlock(&p->fs->lock);
1326 }
1327
1328 static void bprm_fill_uid(struct linux_binprm *bprm)
1329 {
1330         struct inode *inode;
1331         unsigned int mode;
1332         kuid_t uid;
1333         kgid_t gid;
1334
1335         /* clear any previous set[ug]id data from a previous binary */
1336         bprm->cred->euid = current_euid();
1337         bprm->cred->egid = current_egid();
1338
1339         if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
1340                 return;
1341
1342         if (task_no_new_privs(current))
1343                 return;
1344
1345         inode = file_inode(bprm->file);
1346         mode = READ_ONCE(inode->i_mode);
1347         if (!(mode & (S_ISUID|S_ISGID)))
1348                 return;
1349
1350         /* Be careful if suid/sgid is set */
1351         mutex_lock(&inode->i_mutex);
1352
1353         /* reload atomically mode/uid/gid now that lock held */
1354         mode = inode->i_mode;
1355         uid = inode->i_uid;
1356         gid = inode->i_gid;
1357         mutex_unlock(&inode->i_mutex);
1358
1359         /* We ignore suid/sgid if there are no mappings for them in the ns */
1360         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1361                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1362                 return;
1363
1364         if (mode & S_ISUID) {
1365                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1366                 bprm->cred->euid = uid;
1367         }
1368
1369         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1370                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1371                 bprm->cred->egid = gid;
1372         }
1373 }
1374
1375 /*
1376  * Fill the binprm structure from the inode.
1377  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1378  *
1379  * This may be called multiple times for binary chains (scripts for example).
1380  */
1381 int prepare_binprm(struct linux_binprm *bprm)
1382 {
1383         int retval;
1384
1385         bprm_fill_uid(bprm);
1386
1387         /* fill in binprm security blob */
1388         retval = security_bprm_set_creds(bprm);
1389         if (retval)
1390                 return retval;
1391         bprm->cred_prepared = 1;
1392
1393         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1394         return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE);
1395 }
1396
1397 EXPORT_SYMBOL(prepare_binprm);
1398
1399 /*
1400  * Arguments are '\0' separated strings found at the location bprm->p
1401  * points to; chop off the first by relocating brpm->p to right after
1402  * the first '\0' encountered.
1403  */
1404 int remove_arg_zero(struct linux_binprm *bprm)
1405 {
1406         int ret = 0;
1407         unsigned long offset;
1408         char *kaddr;
1409         struct page *page;
1410
1411         if (!bprm->argc)
1412                 return 0;
1413
1414         do {
1415                 offset = bprm->p & ~PAGE_MASK;
1416                 page = get_arg_page(bprm, bprm->p, 0);
1417                 if (!page) {
1418                         ret = -EFAULT;
1419                         goto out;
1420                 }
1421                 kaddr = kmap_atomic(page);
1422
1423                 for (; offset < PAGE_SIZE && kaddr[offset];
1424                                 offset++, bprm->p++)
1425                         ;
1426
1427                 kunmap_atomic(kaddr);
1428                 put_arg_page(page);
1429
1430                 if (offset == PAGE_SIZE)
1431                         free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1);
1432         } while (offset == PAGE_SIZE);
1433
1434         bprm->p++;
1435         bprm->argc--;
1436         ret = 0;
1437
1438 out:
1439         return ret;
1440 }
1441 EXPORT_SYMBOL(remove_arg_zero);
1442
1443 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1444 /*
1445  * cycle the list of binary formats handler, until one recognizes the image
1446  */
1447 int search_binary_handler(struct linux_binprm *bprm)
1448 {
1449         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1450         struct linux_binfmt *fmt;
1451         int retval;
1452
1453         /* This allows 4 levels of binfmt rewrites before failing hard. */
1454         if (bprm->recursion_depth > 5)
1455                 return -ELOOP;
1456
1457         retval = security_bprm_check(bprm);
1458         if (retval)
1459                 return retval;
1460
1461         retval = -ENOENT;
1462  retry:
1463         read_lock(&binfmt_lock);
1464         list_for_each_entry(fmt, &formats, lh) {
1465                 if (!try_module_get(fmt->module))
1466                         continue;
1467                 read_unlock(&binfmt_lock);
1468                 bprm->recursion_depth++;
1469                 retval = fmt->load_binary(bprm);
1470                 read_lock(&binfmt_lock);
1471                 put_binfmt(fmt);
1472                 bprm->recursion_depth--;
1473                 if (retval < 0 && !bprm->mm) {
1474                         /* we got to flush_old_exec() and failed after it */
1475                         read_unlock(&binfmt_lock);
1476                         force_sigsegv(SIGSEGV, current);
1477                         return retval;
1478                 }
1479                 if (retval != -ENOEXEC || !bprm->file) {
1480                         read_unlock(&binfmt_lock);
1481                         return retval;
1482                 }
1483         }
1484         read_unlock(&binfmt_lock);
1485
1486         if (need_retry) {
1487                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1488                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1489                         return retval;
1490                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1491                         return retval;
1492                 need_retry = false;
1493                 goto retry;
1494         }
1495
1496         return retval;
1497 }
1498 EXPORT_SYMBOL(search_binary_handler);
1499
1500 static int exec_binprm(struct linux_binprm *bprm)
1501 {
1502         pid_t old_pid, old_vpid;
1503         int ret;
1504
1505         /* Need to fetch pid before load_binary changes it */
1506         old_pid = current->pid;
1507         rcu_read_lock();
1508         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1509         rcu_read_unlock();
1510
1511         ret = search_binary_handler(bprm);
1512         if (ret >= 0) {
1513                 audit_bprm(bprm);
1514                 trace_sched_process_exec(current, old_pid, bprm);
1515                 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1516                 proc_exec_connector(current);
1517         }
1518
1519         return ret;
1520 }
1521
1522 /*
1523  * sys_execve() executes a new program.
1524  */
1525 static int do_execveat_common(int fd, struct filename *filename,
1526                               struct user_arg_ptr argv,
1527                               struct user_arg_ptr envp,
1528                               int flags)
1529 {
1530         char *pathbuf = NULL;
1531         struct linux_binprm *bprm;
1532         struct file *file;
1533         struct files_struct *displaced;
1534         int retval;
1535
1536         if (IS_ERR(filename))
1537                 return PTR_ERR(filename);
1538
1539         /*
1540          * We move the actual failure in case of RLIMIT_NPROC excess from
1541          * set*uid() to execve() because too many poorly written programs
1542          * don't check setuid() return code.  Here we additionally recheck
1543          * whether NPROC limit is still exceeded.
1544          */
1545         if ((current->flags & PF_NPROC_EXCEEDED) &&
1546             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1547                 retval = -EAGAIN;
1548                 goto out_ret;
1549         }
1550
1551         /* We're below the limit (still or again), so we don't want to make
1552          * further execve() calls fail. */
1553         current->flags &= ~PF_NPROC_EXCEEDED;
1554
1555         retval = unshare_files(&displaced);
1556         if (retval)
1557                 goto out_ret;
1558
1559         retval = -ENOMEM;
1560         bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1561         if (!bprm)
1562                 goto out_files;
1563
1564         retval = prepare_bprm_creds(bprm);
1565         if (retval)
1566                 goto out_free;
1567
1568         check_unsafe_exec(bprm);
1569         current->in_execve = 1;
1570
1571         file = do_open_execat(fd, filename, flags);
1572         retval = PTR_ERR(file);
1573         if (IS_ERR(file))
1574                 goto out_unmark;
1575
1576         sched_exec();
1577
1578         bprm->file = file;
1579         if (fd == AT_FDCWD || filename->name[0] == '/') {
1580                 bprm->filename = filename->name;
1581         } else {
1582                 if (filename->name[0] == '\0')
1583                         pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d", fd);
1584                 else
1585                         pathbuf = kasprintf(GFP_TEMPORARY, "/dev/fd/%d/%s",
1586                                             fd, filename->name);
1587                 if (!pathbuf) {
1588                         retval = -ENOMEM;
1589                         goto out_unmark;
1590                 }
1591                 /*
1592                  * Record that a name derived from an O_CLOEXEC fd will be
1593                  * inaccessible after exec. Relies on having exclusive access to
1594                  * current->files (due to unshare_files above).
1595                  */
1596                 if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1597                         bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1598                 bprm->filename = pathbuf;
1599         }
1600         bprm->interp = bprm->filename;
1601
1602         retval = bprm_mm_init(bprm);
1603         if (retval)
1604                 goto out_unmark;
1605
1606         bprm->argc = count(argv, MAX_ARG_STRINGS);
1607         if ((retval = bprm->argc) < 0)
1608                 goto out;
1609
1610         bprm->envc = count(envp, MAX_ARG_STRINGS);
1611         if ((retval = bprm->envc) < 0)
1612                 goto out;
1613
1614         retval = prepare_binprm(bprm);
1615         if (retval < 0)
1616                 goto out;
1617
1618         retval = copy_strings_kernel(1, &bprm->filename, bprm);
1619         if (retval < 0)
1620                 goto out;
1621
1622         bprm->exec = bprm->p;
1623         retval = copy_strings(bprm->envc, envp, bprm);
1624         if (retval < 0)
1625                 goto out;
1626
1627         retval = copy_strings(bprm->argc, argv, bprm);
1628         if (retval < 0)
1629                 goto out;
1630
1631         would_dump(bprm, bprm->file);
1632
1633         retval = exec_binprm(bprm);
1634         if (retval < 0)
1635                 goto out;
1636
1637         /* execve succeeded */
1638         current->fs->in_exec = 0;
1639         current->in_execve = 0;
1640         acct_update_integrals(current);
1641         task_numa_free(current);
1642         free_bprm(bprm);
1643         kfree(pathbuf);
1644         putname(filename);
1645         if (displaced)
1646                 put_files_struct(displaced);
1647         return retval;
1648
1649 out:
1650         if (bprm->mm) {
1651                 acct_arg_size(bprm, 0);
1652                 mmput(bprm->mm);
1653         }
1654
1655 out_unmark:
1656         current->fs->in_exec = 0;
1657         current->in_execve = 0;
1658
1659 out_free:
1660         free_bprm(bprm);
1661         kfree(pathbuf);
1662
1663 out_files:
1664         if (displaced)
1665                 reset_files_struct(displaced);
1666 out_ret:
1667         putname(filename);
1668         return retval;
1669 }
1670
1671 int do_execve(struct filename *filename,
1672         const char __user *const __user *__argv,
1673         const char __user *const __user *__envp)
1674 {
1675         struct user_arg_ptr argv = { .ptr.native = __argv };
1676         struct user_arg_ptr envp = { .ptr.native = __envp };
1677         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1678 }
1679
1680 int do_execveat(int fd, struct filename *filename,
1681                 const char __user *const __user *__argv,
1682                 const char __user *const __user *__envp,
1683                 int flags)
1684 {
1685         struct user_arg_ptr argv = { .ptr.native = __argv };
1686         struct user_arg_ptr envp = { .ptr.native = __envp };
1687
1688         return do_execveat_common(fd, filename, argv, envp, flags);
1689 }
1690
1691 #ifdef CONFIG_COMPAT
1692 static int compat_do_execve(struct filename *filename,
1693         const compat_uptr_t __user *__argv,
1694         const compat_uptr_t __user *__envp)
1695 {
1696         struct user_arg_ptr argv = {
1697                 .is_compat = true,
1698                 .ptr.compat = __argv,
1699         };
1700         struct user_arg_ptr envp = {
1701                 .is_compat = true,
1702                 .ptr.compat = __envp,
1703         };
1704         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1705 }
1706
1707 static int compat_do_execveat(int fd, struct filename *filename,
1708                               const compat_uptr_t __user *__argv,
1709                               const compat_uptr_t __user *__envp,
1710                               int flags)
1711 {
1712         struct user_arg_ptr argv = {
1713                 .is_compat = true,
1714                 .ptr.compat = __argv,
1715         };
1716         struct user_arg_ptr envp = {
1717                 .is_compat = true,
1718                 .ptr.compat = __envp,
1719         };
1720         return do_execveat_common(fd, filename, argv, envp, flags);
1721 }
1722 #endif
1723
1724 void set_binfmt(struct linux_binfmt *new)
1725 {
1726         struct mm_struct *mm = current->mm;
1727
1728         if (mm->binfmt)
1729                 module_put(mm->binfmt->module);
1730
1731         mm->binfmt = new;
1732         if (new)
1733                 __module_get(new->module);
1734 }
1735 EXPORT_SYMBOL(set_binfmt);
1736
1737 /*
1738  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1739  */
1740 void set_dumpable(struct mm_struct *mm, int value)
1741 {
1742         unsigned long old, new;
1743
1744         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1745                 return;
1746
1747         do {
1748                 old = ACCESS_ONCE(mm->flags);
1749                 new = (old & ~MMF_DUMPABLE_MASK) | value;
1750         } while (cmpxchg(&mm->flags, old, new) != old);
1751 }
1752
1753 SYSCALL_DEFINE3(execve,
1754                 const char __user *, filename,
1755                 const char __user *const __user *, argv,
1756                 const char __user *const __user *, envp)
1757 {
1758         return do_execve(getname(filename), argv, envp);
1759 }
1760
1761 SYSCALL_DEFINE5(execveat,
1762                 int, fd, const char __user *, filename,
1763                 const char __user *const __user *, argv,
1764                 const char __user *const __user *, envp,
1765                 int, flags)
1766 {
1767         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1768
1769         return do_execveat(fd,
1770                            getname_flags(filename, lookup_flags, NULL),
1771                            argv, envp, flags);
1772 }
1773
1774 #ifdef CONFIG_COMPAT
1775 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1776         const compat_uptr_t __user *, argv,
1777         const compat_uptr_t __user *, envp)
1778 {
1779         return compat_do_execve(getname(filename), argv, envp);
1780 }
1781
1782 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1783                        const char __user *, filename,
1784                        const compat_uptr_t __user *, argv,
1785                        const compat_uptr_t __user *, envp,
1786                        int,  flags)
1787 {
1788         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1789
1790         return compat_do_execveat(fd,
1791                                   getname_flags(filename, lookup_flags, NULL),
1792                                   argv, envp, flags);
1793 }
1794 #endif