Merge tag 'chrome-platform-for-linus-4.19' of git://git.kernel.org/pub/scm/linux...
[platform/kernel/linux-rpi.git] / fs / exec.c
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/coredump.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/numa_balancing.h>
39 #include <linux/sched/task.h>
40 #include <linux/pagemap.h>
41 #include <linux/perf_event.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/key.h>
45 #include <linux/personality.h>
46 #include <linux/binfmts.h>
47 #include <linux/utsname.h>
48 #include <linux/pid_namespace.h>
49 #include <linux/module.h>
50 #include <linux/namei.h>
51 #include <linux/mount.h>
52 #include <linux/security.h>
53 #include <linux/syscalls.h>
54 #include <linux/tsacct_kern.h>
55 #include <linux/cn_proc.h>
56 #include <linux/audit.h>
57 #include <linux/tracehook.h>
58 #include <linux/kmod.h>
59 #include <linux/fsnotify.h>
60 #include <linux/fs_struct.h>
61 #include <linux/pipe_fs_i.h>
62 #include <linux/oom.h>
63 #include <linux/compat.h>
64 #include <linux/vmalloc.h>
65
66 #include <linux/uaccess.h>
67 #include <asm/mmu_context.h>
68 #include <asm/tlb.h>
69
70 #include <trace/events/task.h>
71 #include "internal.h"
72
73 #include <trace/events/sched.h>
74
75 int suid_dumpable = 0;
76
77 static LIST_HEAD(formats);
78 static DEFINE_RWLOCK(binfmt_lock);
79
80 void __register_binfmt(struct linux_binfmt * fmt, int insert)
81 {
82         BUG_ON(!fmt);
83         if (WARN_ON(!fmt->load_binary))
84                 return;
85         write_lock(&binfmt_lock);
86         insert ? list_add(&fmt->lh, &formats) :
87                  list_add_tail(&fmt->lh, &formats);
88         write_unlock(&binfmt_lock);
89 }
90
91 EXPORT_SYMBOL(__register_binfmt);
92
93 void unregister_binfmt(struct linux_binfmt * fmt)
94 {
95         write_lock(&binfmt_lock);
96         list_del(&fmt->lh);
97         write_unlock(&binfmt_lock);
98 }
99
100 EXPORT_SYMBOL(unregister_binfmt);
101
102 static inline void put_binfmt(struct linux_binfmt * fmt)
103 {
104         module_put(fmt->module);
105 }
106
107 bool path_noexec(const struct path *path)
108 {
109         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
110                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
111 }
112
113 #ifdef CONFIG_USELIB
114 /*
115  * Note that a shared library must be both readable and executable due to
116  * security reasons.
117  *
118  * Also note that we take the address to load from from the file itself.
119  */
120 SYSCALL_DEFINE1(uselib, const char __user *, library)
121 {
122         struct linux_binfmt *fmt;
123         struct file *file;
124         struct filename *tmp = getname(library);
125         int error = PTR_ERR(tmp);
126         static const struct open_flags uselib_flags = {
127                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
128                 .acc_mode = MAY_READ | MAY_EXEC,
129                 .intent = LOOKUP_OPEN,
130                 .lookup_flags = LOOKUP_FOLLOW,
131         };
132
133         if (IS_ERR(tmp))
134                 goto out;
135
136         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
137         putname(tmp);
138         error = PTR_ERR(file);
139         if (IS_ERR(file))
140                 goto out;
141
142         error = -EINVAL;
143         if (!S_ISREG(file_inode(file)->i_mode))
144                 goto exit;
145
146         error = -EACCES;
147         if (path_noexec(&file->f_path))
148                 goto exit;
149
150         fsnotify_open(file);
151
152         error = -ENOEXEC;
153
154         read_lock(&binfmt_lock);
155         list_for_each_entry(fmt, &formats, lh) {
156                 if (!fmt->load_shlib)
157                         continue;
158                 if (!try_module_get(fmt->module))
159                         continue;
160                 read_unlock(&binfmt_lock);
161                 error = fmt->load_shlib(file);
162                 read_lock(&binfmt_lock);
163                 put_binfmt(fmt);
164                 if (error != -ENOEXEC)
165                         break;
166         }
167         read_unlock(&binfmt_lock);
168 exit:
169         fput(file);
170 out:
171         return error;
172 }
173 #endif /* #ifdef CONFIG_USELIB */
174
175 #ifdef CONFIG_MMU
176 /*
177  * The nascent bprm->mm is not visible until exec_mmap() but it can
178  * use a lot of memory, account these pages in current->mm temporary
179  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
180  * change the counter back via acct_arg_size(0).
181  */
182 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
183 {
184         struct mm_struct *mm = current->mm;
185         long diff = (long)(pages - bprm->vma_pages);
186
187         if (!mm || !diff)
188                 return;
189
190         bprm->vma_pages = pages;
191         add_mm_counter(mm, MM_ANONPAGES, diff);
192 }
193
194 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
195                 int write)
196 {
197         struct page *page;
198         int ret;
199         unsigned int gup_flags = FOLL_FORCE;
200
201 #ifdef CONFIG_STACK_GROWSUP
202         if (write) {
203                 ret = expand_downwards(bprm->vma, pos);
204                 if (ret < 0)
205                         return NULL;
206         }
207 #endif
208
209         if (write)
210                 gup_flags |= FOLL_WRITE;
211
212         /*
213          * We are doing an exec().  'current' is the process
214          * doing the exec and bprm->mm is the new process's mm.
215          */
216         ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
217                         &page, NULL, NULL);
218         if (ret <= 0)
219                 return NULL;
220
221         if (write) {
222                 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
223                 unsigned long ptr_size, limit;
224
225                 /*
226                  * Since the stack will hold pointers to the strings, we
227                  * must account for them as well.
228                  *
229                  * The size calculation is the entire vma while each arg page is
230                  * built, so each time we get here it's calculating how far it
231                  * is currently (rather than each call being just the newly
232                  * added size from the arg page).  As a result, we need to
233                  * always add the entire size of the pointers, so that on the
234                  * last call to get_arg_page() we'll actually have the entire
235                  * correct size.
236                  */
237                 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
238                 if (ptr_size > ULONG_MAX - size)
239                         goto fail;
240                 size += ptr_size;
241
242                 acct_arg_size(bprm, size / PAGE_SIZE);
243
244                 /*
245                  * We've historically supported up to 32 pages (ARG_MAX)
246                  * of argument strings even with small stacks
247                  */
248                 if (size <= ARG_MAX)
249                         return page;
250
251                 /*
252                  * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
253                  * (whichever is smaller) for the argv+env strings.
254                  * This ensures that:
255                  *  - the remaining binfmt code will not run out of stack space,
256                  *  - the program will have a reasonable amount of stack left
257                  *    to work from.
258                  */
259                 limit = _STK_LIM / 4 * 3;
260                 limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
261                 if (size > limit)
262                         goto fail;
263         }
264
265         return page;
266
267 fail:
268         put_page(page);
269         return NULL;
270 }
271
272 static void put_arg_page(struct page *page)
273 {
274         put_page(page);
275 }
276
277 static void free_arg_pages(struct linux_binprm *bprm)
278 {
279 }
280
281 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
282                 struct page *page)
283 {
284         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
285 }
286
287 static int __bprm_mm_init(struct linux_binprm *bprm)
288 {
289         int err;
290         struct vm_area_struct *vma = NULL;
291         struct mm_struct *mm = bprm->mm;
292
293         bprm->vma = vma = vm_area_alloc(mm);
294         if (!vma)
295                 return -ENOMEM;
296         vma_set_anonymous(vma);
297
298         if (down_write_killable(&mm->mmap_sem)) {
299                 err = -EINTR;
300                 goto err_free;
301         }
302
303         /*
304          * Place the stack at the largest stack address the architecture
305          * supports. Later, we'll move this to an appropriate place. We don't
306          * use STACK_TOP because that can depend on attributes which aren't
307          * configured yet.
308          */
309         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
310         vma->vm_end = STACK_TOP_MAX;
311         vma->vm_start = vma->vm_end - PAGE_SIZE;
312         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
313         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
314
315         err = insert_vm_struct(mm, vma);
316         if (err)
317                 goto err;
318
319         mm->stack_vm = mm->total_vm = 1;
320         arch_bprm_mm_init(mm, vma);
321         up_write(&mm->mmap_sem);
322         bprm->p = vma->vm_end - sizeof(void *);
323         return 0;
324 err:
325         up_write(&mm->mmap_sem);
326 err_free:
327         bprm->vma = NULL;
328         vm_area_free(vma);
329         return err;
330 }
331
332 static bool valid_arg_len(struct linux_binprm *bprm, long len)
333 {
334         return len <= MAX_ARG_STRLEN;
335 }
336
337 #else
338
339 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
340 {
341 }
342
343 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
344                 int write)
345 {
346         struct page *page;
347
348         page = bprm->page[pos / PAGE_SIZE];
349         if (!page && write) {
350                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
351                 if (!page)
352                         return NULL;
353                 bprm->page[pos / PAGE_SIZE] = page;
354         }
355
356         return page;
357 }
358
359 static void put_arg_page(struct page *page)
360 {
361 }
362
363 static void free_arg_page(struct linux_binprm *bprm, int i)
364 {
365         if (bprm->page[i]) {
366                 __free_page(bprm->page[i]);
367                 bprm->page[i] = NULL;
368         }
369 }
370
371 static void free_arg_pages(struct linux_binprm *bprm)
372 {
373         int i;
374
375         for (i = 0; i < MAX_ARG_PAGES; i++)
376                 free_arg_page(bprm, i);
377 }
378
379 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
380                 struct page *page)
381 {
382 }
383
384 static int __bprm_mm_init(struct linux_binprm *bprm)
385 {
386         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
387         return 0;
388 }
389
390 static bool valid_arg_len(struct linux_binprm *bprm, long len)
391 {
392         return len <= bprm->p;
393 }
394
395 #endif /* CONFIG_MMU */
396
397 /*
398  * Create a new mm_struct and populate it with a temporary stack
399  * vm_area_struct.  We don't have enough context at this point to set the stack
400  * flags, permissions, and offset, so we use temporary values.  We'll update
401  * them later in setup_arg_pages().
402  */
403 static int bprm_mm_init(struct linux_binprm *bprm)
404 {
405         int err;
406         struct mm_struct *mm = NULL;
407
408         bprm->mm = mm = mm_alloc();
409         err = -ENOMEM;
410         if (!mm)
411                 goto err;
412
413         /* Save current stack limit for all calculations made during exec. */
414         task_lock(current->group_leader);
415         bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
416         task_unlock(current->group_leader);
417
418         err = __bprm_mm_init(bprm);
419         if (err)
420                 goto err;
421
422         return 0;
423
424 err:
425         if (mm) {
426                 bprm->mm = NULL;
427                 mmdrop(mm);
428         }
429
430         return err;
431 }
432
433 struct user_arg_ptr {
434 #ifdef CONFIG_COMPAT
435         bool is_compat;
436 #endif
437         union {
438                 const char __user *const __user *native;
439 #ifdef CONFIG_COMPAT
440                 const compat_uptr_t __user *compat;
441 #endif
442         } ptr;
443 };
444
445 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
446 {
447         const char __user *native;
448
449 #ifdef CONFIG_COMPAT
450         if (unlikely(argv.is_compat)) {
451                 compat_uptr_t compat;
452
453                 if (get_user(compat, argv.ptr.compat + nr))
454                         return ERR_PTR(-EFAULT);
455
456                 return compat_ptr(compat);
457         }
458 #endif
459
460         if (get_user(native, argv.ptr.native + nr))
461                 return ERR_PTR(-EFAULT);
462
463         return native;
464 }
465
466 /*
467  * count() counts the number of strings in array ARGV.
468  */
469 static int count(struct user_arg_ptr argv, int max)
470 {
471         int i = 0;
472
473         if (argv.ptr.native != NULL) {
474                 for (;;) {
475                         const char __user *p = get_user_arg_ptr(argv, i);
476
477                         if (!p)
478                                 break;
479
480                         if (IS_ERR(p))
481                                 return -EFAULT;
482
483                         if (i >= max)
484                                 return -E2BIG;
485                         ++i;
486
487                         if (fatal_signal_pending(current))
488                                 return -ERESTARTNOHAND;
489                         cond_resched();
490                 }
491         }
492         return i;
493 }
494
495 /*
496  * 'copy_strings()' copies argument/environment strings from the old
497  * processes's memory to the new process's stack.  The call to get_user_pages()
498  * ensures the destination page is created and not swapped out.
499  */
500 static int copy_strings(int argc, struct user_arg_ptr argv,
501                         struct linux_binprm *bprm)
502 {
503         struct page *kmapped_page = NULL;
504         char *kaddr = NULL;
505         unsigned long kpos = 0;
506         int ret;
507
508         while (argc-- > 0) {
509                 const char __user *str;
510                 int len;
511                 unsigned long pos;
512
513                 ret = -EFAULT;
514                 str = get_user_arg_ptr(argv, argc);
515                 if (IS_ERR(str))
516                         goto out;
517
518                 len = strnlen_user(str, MAX_ARG_STRLEN);
519                 if (!len)
520                         goto out;
521
522                 ret = -E2BIG;
523                 if (!valid_arg_len(bprm, len))
524                         goto out;
525
526                 /* We're going to work our way backwords. */
527                 pos = bprm->p;
528                 str += len;
529                 bprm->p -= len;
530
531                 while (len > 0) {
532                         int offset, bytes_to_copy;
533
534                         if (fatal_signal_pending(current)) {
535                                 ret = -ERESTARTNOHAND;
536                                 goto out;
537                         }
538                         cond_resched();
539
540                         offset = pos % PAGE_SIZE;
541                         if (offset == 0)
542                                 offset = PAGE_SIZE;
543
544                         bytes_to_copy = offset;
545                         if (bytes_to_copy > len)
546                                 bytes_to_copy = len;
547
548                         offset -= bytes_to_copy;
549                         pos -= bytes_to_copy;
550                         str -= bytes_to_copy;
551                         len -= bytes_to_copy;
552
553                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
554                                 struct page *page;
555
556                                 page = get_arg_page(bprm, pos, 1);
557                                 if (!page) {
558                                         ret = -E2BIG;
559                                         goto out;
560                                 }
561
562                                 if (kmapped_page) {
563                                         flush_kernel_dcache_page(kmapped_page);
564                                         kunmap(kmapped_page);
565                                         put_arg_page(kmapped_page);
566                                 }
567                                 kmapped_page = page;
568                                 kaddr = kmap(kmapped_page);
569                                 kpos = pos & PAGE_MASK;
570                                 flush_arg_page(bprm, kpos, kmapped_page);
571                         }
572                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
573                                 ret = -EFAULT;
574                                 goto out;
575                         }
576                 }
577         }
578         ret = 0;
579 out:
580         if (kmapped_page) {
581                 flush_kernel_dcache_page(kmapped_page);
582                 kunmap(kmapped_page);
583                 put_arg_page(kmapped_page);
584         }
585         return ret;
586 }
587
588 /*
589  * Like copy_strings, but get argv and its values from kernel memory.
590  */
591 int copy_strings_kernel(int argc, const char *const *__argv,
592                         struct linux_binprm *bprm)
593 {
594         int r;
595         mm_segment_t oldfs = get_fs();
596         struct user_arg_ptr argv = {
597                 .ptr.native = (const char __user *const  __user *)__argv,
598         };
599
600         set_fs(KERNEL_DS);
601         r = copy_strings(argc, argv, bprm);
602         set_fs(oldfs);
603
604         return r;
605 }
606 EXPORT_SYMBOL(copy_strings_kernel);
607
608 #ifdef CONFIG_MMU
609
610 /*
611  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
612  * the binfmt code determines where the new stack should reside, we shift it to
613  * its final location.  The process proceeds as follows:
614  *
615  * 1) Use shift to calculate the new vma endpoints.
616  * 2) Extend vma to cover both the old and new ranges.  This ensures the
617  *    arguments passed to subsequent functions are consistent.
618  * 3) Move vma's page tables to the new range.
619  * 4) Free up any cleared pgd range.
620  * 5) Shrink the vma to cover only the new range.
621  */
622 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
623 {
624         struct mm_struct *mm = vma->vm_mm;
625         unsigned long old_start = vma->vm_start;
626         unsigned long old_end = vma->vm_end;
627         unsigned long length = old_end - old_start;
628         unsigned long new_start = old_start - shift;
629         unsigned long new_end = old_end - shift;
630         struct mmu_gather tlb;
631
632         BUG_ON(new_start > new_end);
633
634         /*
635          * ensure there are no vmas between where we want to go
636          * and where we are
637          */
638         if (vma != find_vma(mm, new_start))
639                 return -EFAULT;
640
641         /*
642          * cover the whole range: [new_start, old_end)
643          */
644         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
645                 return -ENOMEM;
646
647         /*
648          * move the page tables downwards, on failure we rely on
649          * process cleanup to remove whatever mess we made.
650          */
651         if (length != move_page_tables(vma, old_start,
652                                        vma, new_start, length, false))
653                 return -ENOMEM;
654
655         lru_add_drain();
656         tlb_gather_mmu(&tlb, mm, old_start, old_end);
657         if (new_end > old_start) {
658                 /*
659                  * when the old and new regions overlap clear from new_end.
660                  */
661                 free_pgd_range(&tlb, new_end, old_end, new_end,
662                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
663         } else {
664                 /*
665                  * otherwise, clean from old_start; this is done to not touch
666                  * the address space in [new_end, old_start) some architectures
667                  * have constraints on va-space that make this illegal (IA64) -
668                  * for the others its just a little faster.
669                  */
670                 free_pgd_range(&tlb, old_start, old_end, new_end,
671                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
672         }
673         tlb_finish_mmu(&tlb, old_start, old_end);
674
675         /*
676          * Shrink the vma to just the new range.  Always succeeds.
677          */
678         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
679
680         return 0;
681 }
682
683 /*
684  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
685  * the stack is optionally relocated, and some extra space is added.
686  */
687 int setup_arg_pages(struct linux_binprm *bprm,
688                     unsigned long stack_top,
689                     int executable_stack)
690 {
691         unsigned long ret;
692         unsigned long stack_shift;
693         struct mm_struct *mm = current->mm;
694         struct vm_area_struct *vma = bprm->vma;
695         struct vm_area_struct *prev = NULL;
696         unsigned long vm_flags;
697         unsigned long stack_base;
698         unsigned long stack_size;
699         unsigned long stack_expand;
700         unsigned long rlim_stack;
701
702 #ifdef CONFIG_STACK_GROWSUP
703         /* Limit stack size */
704         stack_base = bprm->rlim_stack.rlim_max;
705         if (stack_base > STACK_SIZE_MAX)
706                 stack_base = STACK_SIZE_MAX;
707
708         /* Add space for stack randomization. */
709         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
710
711         /* Make sure we didn't let the argument array grow too large. */
712         if (vma->vm_end - vma->vm_start > stack_base)
713                 return -ENOMEM;
714
715         stack_base = PAGE_ALIGN(stack_top - stack_base);
716
717         stack_shift = vma->vm_start - stack_base;
718         mm->arg_start = bprm->p - stack_shift;
719         bprm->p = vma->vm_end - stack_shift;
720 #else
721         stack_top = arch_align_stack(stack_top);
722         stack_top = PAGE_ALIGN(stack_top);
723
724         if (unlikely(stack_top < mmap_min_addr) ||
725             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
726                 return -ENOMEM;
727
728         stack_shift = vma->vm_end - stack_top;
729
730         bprm->p -= stack_shift;
731         mm->arg_start = bprm->p;
732 #endif
733
734         if (bprm->loader)
735                 bprm->loader -= stack_shift;
736         bprm->exec -= stack_shift;
737
738         if (down_write_killable(&mm->mmap_sem))
739                 return -EINTR;
740
741         vm_flags = VM_STACK_FLAGS;
742
743         /*
744          * Adjust stack execute permissions; explicitly enable for
745          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
746          * (arch default) otherwise.
747          */
748         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
749                 vm_flags |= VM_EXEC;
750         else if (executable_stack == EXSTACK_DISABLE_X)
751                 vm_flags &= ~VM_EXEC;
752         vm_flags |= mm->def_flags;
753         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
754
755         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
756                         vm_flags);
757         if (ret)
758                 goto out_unlock;
759         BUG_ON(prev != vma);
760
761         /* Move stack pages down in memory. */
762         if (stack_shift) {
763                 ret = shift_arg_pages(vma, stack_shift);
764                 if (ret)
765                         goto out_unlock;
766         }
767
768         /* mprotect_fixup is overkill to remove the temporary stack flags */
769         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
770
771         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
772         stack_size = vma->vm_end - vma->vm_start;
773         /*
774          * Align this down to a page boundary as expand_stack
775          * will align it up.
776          */
777         rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
778 #ifdef CONFIG_STACK_GROWSUP
779         if (stack_size + stack_expand > rlim_stack)
780                 stack_base = vma->vm_start + rlim_stack;
781         else
782                 stack_base = vma->vm_end + stack_expand;
783 #else
784         if (stack_size + stack_expand > rlim_stack)
785                 stack_base = vma->vm_end - rlim_stack;
786         else
787                 stack_base = vma->vm_start - stack_expand;
788 #endif
789         current->mm->start_stack = bprm->p;
790         ret = expand_stack(vma, stack_base);
791         if (ret)
792                 ret = -EFAULT;
793
794 out_unlock:
795         up_write(&mm->mmap_sem);
796         return ret;
797 }
798 EXPORT_SYMBOL(setup_arg_pages);
799
800 #else
801
802 /*
803  * Transfer the program arguments and environment from the holding pages
804  * onto the stack. The provided stack pointer is adjusted accordingly.
805  */
806 int transfer_args_to_stack(struct linux_binprm *bprm,
807                            unsigned long *sp_location)
808 {
809         unsigned long index, stop, sp;
810         int ret = 0;
811
812         stop = bprm->p >> PAGE_SHIFT;
813         sp = *sp_location;
814
815         for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
816                 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
817                 char *src = kmap(bprm->page[index]) + offset;
818                 sp -= PAGE_SIZE - offset;
819                 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
820                         ret = -EFAULT;
821                 kunmap(bprm->page[index]);
822                 if (ret)
823                         goto out;
824         }
825
826         *sp_location = sp;
827
828 out:
829         return ret;
830 }
831 EXPORT_SYMBOL(transfer_args_to_stack);
832
833 #endif /* CONFIG_MMU */
834
835 static struct file *do_open_execat(int fd, struct filename *name, int flags)
836 {
837         struct file *file;
838         int err;
839         struct open_flags open_exec_flags = {
840                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
841                 .acc_mode = MAY_EXEC,
842                 .intent = LOOKUP_OPEN,
843                 .lookup_flags = LOOKUP_FOLLOW,
844         };
845
846         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
847                 return ERR_PTR(-EINVAL);
848         if (flags & AT_SYMLINK_NOFOLLOW)
849                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
850         if (flags & AT_EMPTY_PATH)
851                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
852
853         file = do_filp_open(fd, name, &open_exec_flags);
854         if (IS_ERR(file))
855                 goto out;
856
857         err = -EACCES;
858         if (!S_ISREG(file_inode(file)->i_mode))
859                 goto exit;
860
861         if (path_noexec(&file->f_path))
862                 goto exit;
863
864         err = deny_write_access(file);
865         if (err)
866                 goto exit;
867
868         if (name->name[0] != '\0')
869                 fsnotify_open(file);
870
871 out:
872         return file;
873
874 exit:
875         fput(file);
876         return ERR_PTR(err);
877 }
878
879 struct file *open_exec(const char *name)
880 {
881         struct filename *filename = getname_kernel(name);
882         struct file *f = ERR_CAST(filename);
883
884         if (!IS_ERR(filename)) {
885                 f = do_open_execat(AT_FDCWD, filename, 0);
886                 putname(filename);
887         }
888         return f;
889 }
890 EXPORT_SYMBOL(open_exec);
891
892 int kernel_read_file(struct file *file, void **buf, loff_t *size,
893                      loff_t max_size, enum kernel_read_file_id id)
894 {
895         loff_t i_size, pos;
896         ssize_t bytes = 0;
897         int ret;
898
899         if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
900                 return -EINVAL;
901
902         ret = deny_write_access(file);
903         if (ret)
904                 return ret;
905
906         ret = security_kernel_read_file(file, id);
907         if (ret)
908                 goto out;
909
910         i_size = i_size_read(file_inode(file));
911         if (max_size > 0 && i_size > max_size) {
912                 ret = -EFBIG;
913                 goto out;
914         }
915         if (i_size <= 0) {
916                 ret = -EINVAL;
917                 goto out;
918         }
919
920         if (id != READING_FIRMWARE_PREALLOC_BUFFER)
921                 *buf = vmalloc(i_size);
922         if (!*buf) {
923                 ret = -ENOMEM;
924                 goto out;
925         }
926
927         pos = 0;
928         while (pos < i_size) {
929                 bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
930                 if (bytes < 0) {
931                         ret = bytes;
932                         goto out;
933                 }
934
935                 if (bytes == 0)
936                         break;
937         }
938
939         if (pos != i_size) {
940                 ret = -EIO;
941                 goto out_free;
942         }
943
944         ret = security_kernel_post_read_file(file, *buf, i_size, id);
945         if (!ret)
946                 *size = pos;
947
948 out_free:
949         if (ret < 0) {
950                 if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
951                         vfree(*buf);
952                         *buf = NULL;
953                 }
954         }
955
956 out:
957         allow_write_access(file);
958         return ret;
959 }
960 EXPORT_SYMBOL_GPL(kernel_read_file);
961
962 int kernel_read_file_from_path(const char *path, void **buf, loff_t *size,
963                                loff_t max_size, enum kernel_read_file_id id)
964 {
965         struct file *file;
966         int ret;
967
968         if (!path || !*path)
969                 return -EINVAL;
970
971         file = filp_open(path, O_RDONLY, 0);
972         if (IS_ERR(file))
973                 return PTR_ERR(file);
974
975         ret = kernel_read_file(file, buf, size, max_size, id);
976         fput(file);
977         return ret;
978 }
979 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
980
981 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
982                              enum kernel_read_file_id id)
983 {
984         struct fd f = fdget(fd);
985         int ret = -EBADF;
986
987         if (!f.file)
988                 goto out;
989
990         ret = kernel_read_file(f.file, buf, size, max_size, id);
991 out:
992         fdput(f);
993         return ret;
994 }
995 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
996
997 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
998 {
999         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
1000         if (res > 0)
1001                 flush_icache_range(addr, addr + len);
1002         return res;
1003 }
1004 EXPORT_SYMBOL(read_code);
1005
1006 static int exec_mmap(struct mm_struct *mm)
1007 {
1008         struct task_struct *tsk;
1009         struct mm_struct *old_mm, *active_mm;
1010
1011         /* Notify parent that we're no longer interested in the old VM */
1012         tsk = current;
1013         old_mm = current->mm;
1014         mm_release(tsk, old_mm);
1015
1016         if (old_mm) {
1017                 sync_mm_rss(old_mm);
1018                 /*
1019                  * Make sure that if there is a core dump in progress
1020                  * for the old mm, we get out and die instead of going
1021                  * through with the exec.  We must hold mmap_sem around
1022                  * checking core_state and changing tsk->mm.
1023                  */
1024                 down_read(&old_mm->mmap_sem);
1025                 if (unlikely(old_mm->core_state)) {
1026                         up_read(&old_mm->mmap_sem);
1027                         return -EINTR;
1028                 }
1029         }
1030         task_lock(tsk);
1031         active_mm = tsk->active_mm;
1032         tsk->mm = mm;
1033         tsk->active_mm = mm;
1034         activate_mm(active_mm, mm);
1035         tsk->mm->vmacache_seqnum = 0;
1036         vmacache_flush(tsk);
1037         task_unlock(tsk);
1038         if (old_mm) {
1039                 up_read(&old_mm->mmap_sem);
1040                 BUG_ON(active_mm != old_mm);
1041                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1042                 mm_update_next_owner(old_mm);
1043                 mmput(old_mm);
1044                 return 0;
1045         }
1046         mmdrop(active_mm);
1047         return 0;
1048 }
1049
1050 /*
1051  * This function makes sure the current process has its own signal table,
1052  * so that flush_signal_handlers can later reset the handlers without
1053  * disturbing other processes.  (Other processes might share the signal
1054  * table via the CLONE_SIGHAND option to clone().)
1055  */
1056 static int de_thread(struct task_struct *tsk)
1057 {
1058         struct signal_struct *sig = tsk->signal;
1059         struct sighand_struct *oldsighand = tsk->sighand;
1060         spinlock_t *lock = &oldsighand->siglock;
1061
1062         if (thread_group_empty(tsk))
1063                 goto no_thread_group;
1064
1065         /*
1066          * Kill all other threads in the thread group.
1067          */
1068         spin_lock_irq(lock);
1069         if (signal_group_exit(sig)) {
1070                 /*
1071                  * Another group action in progress, just
1072                  * return so that the signal is processed.
1073                  */
1074                 spin_unlock_irq(lock);
1075                 return -EAGAIN;
1076         }
1077
1078         sig->group_exit_task = tsk;
1079         sig->notify_count = zap_other_threads(tsk);
1080         if (!thread_group_leader(tsk))
1081                 sig->notify_count--;
1082
1083         while (sig->notify_count) {
1084                 __set_current_state(TASK_KILLABLE);
1085                 spin_unlock_irq(lock);
1086                 schedule();
1087                 if (unlikely(__fatal_signal_pending(tsk)))
1088                         goto killed;
1089                 spin_lock_irq(lock);
1090         }
1091         spin_unlock_irq(lock);
1092
1093         /*
1094          * At this point all other threads have exited, all we have to
1095          * do is to wait for the thread group leader to become inactive,
1096          * and to assume its PID:
1097          */
1098         if (!thread_group_leader(tsk)) {
1099                 struct task_struct *leader = tsk->group_leader;
1100
1101                 for (;;) {
1102                         cgroup_threadgroup_change_begin(tsk);
1103                         write_lock_irq(&tasklist_lock);
1104                         /*
1105                          * Do this under tasklist_lock to ensure that
1106                          * exit_notify() can't miss ->group_exit_task
1107                          */
1108                         sig->notify_count = -1;
1109                         if (likely(leader->exit_state))
1110                                 break;
1111                         __set_current_state(TASK_KILLABLE);
1112                         write_unlock_irq(&tasklist_lock);
1113                         cgroup_threadgroup_change_end(tsk);
1114                         schedule();
1115                         if (unlikely(__fatal_signal_pending(tsk)))
1116                                 goto killed;
1117                 }
1118
1119                 /*
1120                  * The only record we have of the real-time age of a
1121                  * process, regardless of execs it's done, is start_time.
1122                  * All the past CPU time is accumulated in signal_struct
1123                  * from sister threads now dead.  But in this non-leader
1124                  * exec, nothing survives from the original leader thread,
1125                  * whose birth marks the true age of this process now.
1126                  * When we take on its identity by switching to its PID, we
1127                  * also take its birthdate (always earlier than our own).
1128                  */
1129                 tsk->start_time = leader->start_time;
1130                 tsk->real_start_time = leader->real_start_time;
1131
1132                 BUG_ON(!same_thread_group(leader, tsk));
1133                 BUG_ON(has_group_leader_pid(tsk));
1134                 /*
1135                  * An exec() starts a new thread group with the
1136                  * TGID of the previous thread group. Rehash the
1137                  * two threads with a switched PID, and release
1138                  * the former thread group leader:
1139                  */
1140
1141                 /* Become a process group leader with the old leader's pid.
1142                  * The old leader becomes a thread of the this thread group.
1143                  * Note: The old leader also uses this pid until release_task
1144                  *       is called.  Odd but simple and correct.
1145                  */
1146                 tsk->pid = leader->pid;
1147                 change_pid(tsk, PIDTYPE_PID, task_pid(leader));
1148                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1149                 transfer_pid(leader, tsk, PIDTYPE_SID);
1150
1151                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1152                 list_replace_init(&leader->sibling, &tsk->sibling);
1153
1154                 tsk->group_leader = tsk;
1155                 leader->group_leader = tsk;
1156
1157                 tsk->exit_signal = SIGCHLD;
1158                 leader->exit_signal = -1;
1159
1160                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1161                 leader->exit_state = EXIT_DEAD;
1162
1163                 /*
1164                  * We are going to release_task()->ptrace_unlink() silently,
1165                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1166                  * the tracer wont't block again waiting for this thread.
1167                  */
1168                 if (unlikely(leader->ptrace))
1169                         __wake_up_parent(leader, leader->parent);
1170                 write_unlock_irq(&tasklist_lock);
1171                 cgroup_threadgroup_change_end(tsk);
1172
1173                 release_task(leader);
1174         }
1175
1176         sig->group_exit_task = NULL;
1177         sig->notify_count = 0;
1178
1179 no_thread_group:
1180         /* we have changed execution domain */
1181         tsk->exit_signal = SIGCHLD;
1182
1183 #ifdef CONFIG_POSIX_TIMERS
1184         exit_itimers(sig);
1185         flush_itimer_signals();
1186 #endif
1187
1188         if (atomic_read(&oldsighand->count) != 1) {
1189                 struct sighand_struct *newsighand;
1190                 /*
1191                  * This ->sighand is shared with the CLONE_SIGHAND
1192                  * but not CLONE_THREAD task, switch to the new one.
1193                  */
1194                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1195                 if (!newsighand)
1196                         return -ENOMEM;
1197
1198                 atomic_set(&newsighand->count, 1);
1199                 memcpy(newsighand->action, oldsighand->action,
1200                        sizeof(newsighand->action));
1201
1202                 write_lock_irq(&tasklist_lock);
1203                 spin_lock(&oldsighand->siglock);
1204                 rcu_assign_pointer(tsk->sighand, newsighand);
1205                 spin_unlock(&oldsighand->siglock);
1206                 write_unlock_irq(&tasklist_lock);
1207
1208                 __cleanup_sighand(oldsighand);
1209         }
1210
1211         BUG_ON(!thread_group_leader(tsk));
1212         return 0;
1213
1214 killed:
1215         /* protects against exit_notify() and __exit_signal() */
1216         read_lock(&tasklist_lock);
1217         sig->group_exit_task = NULL;
1218         sig->notify_count = 0;
1219         read_unlock(&tasklist_lock);
1220         return -EAGAIN;
1221 }
1222
1223 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1224 {
1225         task_lock(tsk);
1226         strncpy(buf, tsk->comm, buf_size);
1227         task_unlock(tsk);
1228         return buf;
1229 }
1230 EXPORT_SYMBOL_GPL(__get_task_comm);
1231
1232 /*
1233  * These functions flushes out all traces of the currently running executable
1234  * so that a new one can be started
1235  */
1236
1237 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1238 {
1239         task_lock(tsk);
1240         trace_task_rename(tsk, buf);
1241         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1242         task_unlock(tsk);
1243         perf_event_comm(tsk, exec);
1244 }
1245
1246 /*
1247  * Calling this is the point of no return. None of the failures will be
1248  * seen by userspace since either the process is already taking a fatal
1249  * signal (via de_thread() or coredump), or will have SEGV raised
1250  * (after exec_mmap()) by search_binary_handlers (see below).
1251  */
1252 int flush_old_exec(struct linux_binprm * bprm)
1253 {
1254         int retval;
1255
1256         /*
1257          * Make sure we have a private signal table and that
1258          * we are unassociated from the previous thread group.
1259          */
1260         retval = de_thread(current);
1261         if (retval)
1262                 goto out;
1263
1264         /*
1265          * Must be called _before_ exec_mmap() as bprm->mm is
1266          * not visibile until then. This also enables the update
1267          * to be lockless.
1268          */
1269         set_mm_exe_file(bprm->mm, bprm->file);
1270
1271         /*
1272          * Release all of the old mmap stuff
1273          */
1274         acct_arg_size(bprm, 0);
1275         retval = exec_mmap(bprm->mm);
1276         if (retval)
1277                 goto out;
1278
1279         /*
1280          * After clearing bprm->mm (to mark that current is using the
1281          * prepared mm now), we have nothing left of the original
1282          * process. If anything from here on returns an error, the check
1283          * in search_binary_handler() will SEGV current.
1284          */
1285         bprm->mm = NULL;
1286
1287         set_fs(USER_DS);
1288         current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1289                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1290         flush_thread();
1291         current->personality &= ~bprm->per_clear;
1292
1293         /*
1294          * We have to apply CLOEXEC before we change whether the process is
1295          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1296          * trying to access the should-be-closed file descriptors of a process
1297          * undergoing exec(2).
1298          */
1299         do_close_on_exec(current->files);
1300         return 0;
1301
1302 out:
1303         return retval;
1304 }
1305 EXPORT_SYMBOL(flush_old_exec);
1306
1307 void would_dump(struct linux_binprm *bprm, struct file *file)
1308 {
1309         struct inode *inode = file_inode(file);
1310         if (inode_permission(inode, MAY_READ) < 0) {
1311                 struct user_namespace *old, *user_ns;
1312                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1313
1314                 /* Ensure mm->user_ns contains the executable */
1315                 user_ns = old = bprm->mm->user_ns;
1316                 while ((user_ns != &init_user_ns) &&
1317                        !privileged_wrt_inode_uidgid(user_ns, inode))
1318                         user_ns = user_ns->parent;
1319
1320                 if (old != user_ns) {
1321                         bprm->mm->user_ns = get_user_ns(user_ns);
1322                         put_user_ns(old);
1323                 }
1324         }
1325 }
1326 EXPORT_SYMBOL(would_dump);
1327
1328 void setup_new_exec(struct linux_binprm * bprm)
1329 {
1330         /*
1331          * Once here, prepare_binrpm() will not be called any more, so
1332          * the final state of setuid/setgid/fscaps can be merged into the
1333          * secureexec flag.
1334          */
1335         bprm->secureexec |= bprm->cap_elevated;
1336
1337         if (bprm->secureexec) {
1338                 /* Make sure parent cannot signal privileged process. */
1339                 current->pdeath_signal = 0;
1340
1341                 /*
1342                  * For secureexec, reset the stack limit to sane default to
1343                  * avoid bad behavior from the prior rlimits. This has to
1344                  * happen before arch_pick_mmap_layout(), which examines
1345                  * RLIMIT_STACK, but after the point of no return to avoid
1346                  * needing to clean up the change on failure.
1347                  */
1348                 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1349                         bprm->rlim_stack.rlim_cur = _STK_LIM;
1350         }
1351
1352         arch_pick_mmap_layout(current->mm, &bprm->rlim_stack);
1353
1354         current->sas_ss_sp = current->sas_ss_size = 0;
1355
1356         /*
1357          * Figure out dumpability. Note that this checking only of current
1358          * is wrong, but userspace depends on it. This should be testing
1359          * bprm->secureexec instead.
1360          */
1361         if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1362             !(uid_eq(current_euid(), current_uid()) &&
1363               gid_eq(current_egid(), current_gid())))
1364                 set_dumpable(current->mm, suid_dumpable);
1365         else
1366                 set_dumpable(current->mm, SUID_DUMP_USER);
1367
1368         arch_setup_new_exec();
1369         perf_event_exec();
1370         __set_task_comm(current, kbasename(bprm->filename), true);
1371
1372         /* Set the new mm task size. We have to do that late because it may
1373          * depend on TIF_32BIT which is only updated in flush_thread() on
1374          * some architectures like powerpc
1375          */
1376         current->mm->task_size = TASK_SIZE;
1377
1378         /* An exec changes our domain. We are no longer part of the thread
1379            group */
1380         current->self_exec_id++;
1381         flush_signal_handlers(current, 0);
1382 }
1383 EXPORT_SYMBOL(setup_new_exec);
1384
1385 /* Runs immediately before start_thread() takes over. */
1386 void finalize_exec(struct linux_binprm *bprm)
1387 {
1388         /* Store any stack rlimit changes before starting thread. */
1389         task_lock(current->group_leader);
1390         current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1391         task_unlock(current->group_leader);
1392 }
1393 EXPORT_SYMBOL(finalize_exec);
1394
1395 /*
1396  * Prepare credentials and lock ->cred_guard_mutex.
1397  * install_exec_creds() commits the new creds and drops the lock.
1398  * Or, if exec fails before, free_bprm() should release ->cred and
1399  * and unlock.
1400  */
1401 int prepare_bprm_creds(struct linux_binprm *bprm)
1402 {
1403         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1404                 return -ERESTARTNOINTR;
1405
1406         bprm->cred = prepare_exec_creds();
1407         if (likely(bprm->cred))
1408                 return 0;
1409
1410         mutex_unlock(&current->signal->cred_guard_mutex);
1411         return -ENOMEM;
1412 }
1413
1414 static void free_bprm(struct linux_binprm *bprm)
1415 {
1416         free_arg_pages(bprm);
1417         if (bprm->cred) {
1418                 mutex_unlock(&current->signal->cred_guard_mutex);
1419                 abort_creds(bprm->cred);
1420         }
1421         if (bprm->file) {
1422                 allow_write_access(bprm->file);
1423                 fput(bprm->file);
1424         }
1425         /* If a binfmt changed the interp, free it. */
1426         if (bprm->interp != bprm->filename)
1427                 kfree(bprm->interp);
1428         kfree(bprm);
1429 }
1430
1431 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1432 {
1433         /* If a binfmt changed the interp, free it first. */
1434         if (bprm->interp != bprm->filename)
1435                 kfree(bprm->interp);
1436         bprm->interp = kstrdup(interp, GFP_KERNEL);
1437         if (!bprm->interp)
1438                 return -ENOMEM;
1439         return 0;
1440 }
1441 EXPORT_SYMBOL(bprm_change_interp);
1442
1443 /*
1444  * install the new credentials for this executable
1445  */
1446 void install_exec_creds(struct linux_binprm *bprm)
1447 {
1448         security_bprm_committing_creds(bprm);
1449
1450         commit_creds(bprm->cred);
1451         bprm->cred = NULL;
1452
1453         /*
1454          * Disable monitoring for regular users
1455          * when executing setuid binaries. Must
1456          * wait until new credentials are committed
1457          * by commit_creds() above
1458          */
1459         if (get_dumpable(current->mm) != SUID_DUMP_USER)
1460                 perf_event_exit_task(current);
1461         /*
1462          * cred_guard_mutex must be held at least to this point to prevent
1463          * ptrace_attach() from altering our determination of the task's
1464          * credentials; any time after this it may be unlocked.
1465          */
1466         security_bprm_committed_creds(bprm);
1467         mutex_unlock(&current->signal->cred_guard_mutex);
1468 }
1469 EXPORT_SYMBOL(install_exec_creds);
1470
1471 /*
1472  * determine how safe it is to execute the proposed program
1473  * - the caller must hold ->cred_guard_mutex to protect against
1474  *   PTRACE_ATTACH or seccomp thread-sync
1475  */
1476 static void check_unsafe_exec(struct linux_binprm *bprm)
1477 {
1478         struct task_struct *p = current, *t;
1479         unsigned n_fs;
1480
1481         if (p->ptrace)
1482                 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1483
1484         /*
1485          * This isn't strictly necessary, but it makes it harder for LSMs to
1486          * mess up.
1487          */
1488         if (task_no_new_privs(current))
1489                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1490
1491         t = p;
1492         n_fs = 1;
1493         spin_lock(&p->fs->lock);
1494         rcu_read_lock();
1495         while_each_thread(p, t) {
1496                 if (t->fs == p->fs)
1497                         n_fs++;
1498         }
1499         rcu_read_unlock();
1500
1501         if (p->fs->users > n_fs)
1502                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1503         else
1504                 p->fs->in_exec = 1;
1505         spin_unlock(&p->fs->lock);
1506 }
1507
1508 static void bprm_fill_uid(struct linux_binprm *bprm)
1509 {
1510         struct inode *inode;
1511         unsigned int mode;
1512         kuid_t uid;
1513         kgid_t gid;
1514
1515         /*
1516          * Since this can be called multiple times (via prepare_binprm),
1517          * we must clear any previous work done when setting set[ug]id
1518          * bits from any earlier bprm->file uses (for example when run
1519          * first for a setuid script then again for its interpreter).
1520          */
1521         bprm->cred->euid = current_euid();
1522         bprm->cred->egid = current_egid();
1523
1524         if (!mnt_may_suid(bprm->file->f_path.mnt))
1525                 return;
1526
1527         if (task_no_new_privs(current))
1528                 return;
1529
1530         inode = bprm->file->f_path.dentry->d_inode;
1531         mode = READ_ONCE(inode->i_mode);
1532         if (!(mode & (S_ISUID|S_ISGID)))
1533                 return;
1534
1535         /* Be careful if suid/sgid is set */
1536         inode_lock(inode);
1537
1538         /* reload atomically mode/uid/gid now that lock held */
1539         mode = inode->i_mode;
1540         uid = inode->i_uid;
1541         gid = inode->i_gid;
1542         inode_unlock(inode);
1543
1544         /* We ignore suid/sgid if there are no mappings for them in the ns */
1545         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1546                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1547                 return;
1548
1549         if (mode & S_ISUID) {
1550                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1551                 bprm->cred->euid = uid;
1552         }
1553
1554         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1555                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1556                 bprm->cred->egid = gid;
1557         }
1558 }
1559
1560 /*
1561  * Fill the binprm structure from the inode.
1562  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1563  *
1564  * This may be called multiple times for binary chains (scripts for example).
1565  */
1566 int prepare_binprm(struct linux_binprm *bprm)
1567 {
1568         int retval;
1569         loff_t pos = 0;
1570
1571         bprm_fill_uid(bprm);
1572
1573         /* fill in binprm security blob */
1574         retval = security_bprm_set_creds(bprm);
1575         if (retval)
1576                 return retval;
1577         bprm->called_set_creds = 1;
1578
1579         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1580         return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1581 }
1582
1583 EXPORT_SYMBOL(prepare_binprm);
1584
1585 /*
1586  * Arguments are '\0' separated strings found at the location bprm->p
1587  * points to; chop off the first by relocating brpm->p to right after
1588  * the first '\0' encountered.
1589  */
1590 int remove_arg_zero(struct linux_binprm *bprm)
1591 {
1592         int ret = 0;
1593         unsigned long offset;
1594         char *kaddr;
1595         struct page *page;
1596
1597         if (!bprm->argc)
1598                 return 0;
1599
1600         do {
1601                 offset = bprm->p & ~PAGE_MASK;
1602                 page = get_arg_page(bprm, bprm->p, 0);
1603                 if (!page) {
1604                         ret = -EFAULT;
1605                         goto out;
1606                 }
1607                 kaddr = kmap_atomic(page);
1608
1609                 for (; offset < PAGE_SIZE && kaddr[offset];
1610                                 offset++, bprm->p++)
1611                         ;
1612
1613                 kunmap_atomic(kaddr);
1614                 put_arg_page(page);
1615         } while (offset == PAGE_SIZE);
1616
1617         bprm->p++;
1618         bprm->argc--;
1619         ret = 0;
1620
1621 out:
1622         return ret;
1623 }
1624 EXPORT_SYMBOL(remove_arg_zero);
1625
1626 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1627 /*
1628  * cycle the list of binary formats handler, until one recognizes the image
1629  */
1630 int search_binary_handler(struct linux_binprm *bprm)
1631 {
1632         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1633         struct linux_binfmt *fmt;
1634         int retval;
1635
1636         /* This allows 4 levels of binfmt rewrites before failing hard. */
1637         if (bprm->recursion_depth > 5)
1638                 return -ELOOP;
1639
1640         retval = security_bprm_check(bprm);
1641         if (retval)
1642                 return retval;
1643
1644         retval = -ENOENT;
1645  retry:
1646         read_lock(&binfmt_lock);
1647         list_for_each_entry(fmt, &formats, lh) {
1648                 if (!try_module_get(fmt->module))
1649                         continue;
1650                 read_unlock(&binfmt_lock);
1651                 bprm->recursion_depth++;
1652                 retval = fmt->load_binary(bprm);
1653                 read_lock(&binfmt_lock);
1654                 put_binfmt(fmt);
1655                 bprm->recursion_depth--;
1656                 if (retval < 0 && !bprm->mm) {
1657                         /* we got to flush_old_exec() and failed after it */
1658                         read_unlock(&binfmt_lock);
1659                         force_sigsegv(SIGSEGV, current);
1660                         return retval;
1661                 }
1662                 if (retval != -ENOEXEC || !bprm->file) {
1663                         read_unlock(&binfmt_lock);
1664                         return retval;
1665                 }
1666         }
1667         read_unlock(&binfmt_lock);
1668
1669         if (need_retry) {
1670                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1671                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1672                         return retval;
1673                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1674                         return retval;
1675                 need_retry = false;
1676                 goto retry;
1677         }
1678
1679         return retval;
1680 }
1681 EXPORT_SYMBOL(search_binary_handler);
1682
1683 static int exec_binprm(struct linux_binprm *bprm)
1684 {
1685         pid_t old_pid, old_vpid;
1686         int ret;
1687
1688         /* Need to fetch pid before load_binary changes it */
1689         old_pid = current->pid;
1690         rcu_read_lock();
1691         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1692         rcu_read_unlock();
1693
1694         ret = search_binary_handler(bprm);
1695         if (ret >= 0) {
1696                 audit_bprm(bprm);
1697                 trace_sched_process_exec(current, old_pid, bprm);
1698                 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1699                 proc_exec_connector(current);
1700         }
1701
1702         return ret;
1703 }
1704
1705 /*
1706  * sys_execve() executes a new program.
1707  */
1708 static int __do_execve_file(int fd, struct filename *filename,
1709                             struct user_arg_ptr argv,
1710                             struct user_arg_ptr envp,
1711                             int flags, struct file *file)
1712 {
1713         char *pathbuf = NULL;
1714         struct linux_binprm *bprm;
1715         struct files_struct *displaced;
1716         int retval;
1717
1718         if (IS_ERR(filename))
1719                 return PTR_ERR(filename);
1720
1721         /*
1722          * We move the actual failure in case of RLIMIT_NPROC excess from
1723          * set*uid() to execve() because too many poorly written programs
1724          * don't check setuid() return code.  Here we additionally recheck
1725          * whether NPROC limit is still exceeded.
1726          */
1727         if ((current->flags & PF_NPROC_EXCEEDED) &&
1728             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1729                 retval = -EAGAIN;
1730                 goto out_ret;
1731         }
1732
1733         /* We're below the limit (still or again), so we don't want to make
1734          * further execve() calls fail. */
1735         current->flags &= ~PF_NPROC_EXCEEDED;
1736
1737         retval = unshare_files(&displaced);
1738         if (retval)
1739                 goto out_ret;
1740
1741         retval = -ENOMEM;
1742         bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1743         if (!bprm)
1744                 goto out_files;
1745
1746         retval = prepare_bprm_creds(bprm);
1747         if (retval)
1748                 goto out_free;
1749
1750         check_unsafe_exec(bprm);
1751         current->in_execve = 1;
1752
1753         if (!file)
1754                 file = do_open_execat(fd, filename, flags);
1755         retval = PTR_ERR(file);
1756         if (IS_ERR(file))
1757                 goto out_unmark;
1758
1759         sched_exec();
1760
1761         bprm->file = file;
1762         if (!filename) {
1763                 bprm->filename = "none";
1764         } else if (fd == AT_FDCWD || filename->name[0] == '/') {
1765                 bprm->filename = filename->name;
1766         } else {
1767                 if (filename->name[0] == '\0')
1768                         pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1769                 else
1770                         pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1771                                             fd, filename->name);
1772                 if (!pathbuf) {
1773                         retval = -ENOMEM;
1774                         goto out_unmark;
1775                 }
1776                 /*
1777                  * Record that a name derived from an O_CLOEXEC fd will be
1778                  * inaccessible after exec. Relies on having exclusive access to
1779                  * current->files (due to unshare_files above).
1780                  */
1781                 if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1782                         bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1783                 bprm->filename = pathbuf;
1784         }
1785         bprm->interp = bprm->filename;
1786
1787         retval = bprm_mm_init(bprm);
1788         if (retval)
1789                 goto out_unmark;
1790
1791         bprm->argc = count(argv, MAX_ARG_STRINGS);
1792         if ((retval = bprm->argc) < 0)
1793                 goto out;
1794
1795         bprm->envc = count(envp, MAX_ARG_STRINGS);
1796         if ((retval = bprm->envc) < 0)
1797                 goto out;
1798
1799         retval = prepare_binprm(bprm);
1800         if (retval < 0)
1801                 goto out;
1802
1803         retval = copy_strings_kernel(1, &bprm->filename, bprm);
1804         if (retval < 0)
1805                 goto out;
1806
1807         bprm->exec = bprm->p;
1808         retval = copy_strings(bprm->envc, envp, bprm);
1809         if (retval < 0)
1810                 goto out;
1811
1812         retval = copy_strings(bprm->argc, argv, bprm);
1813         if (retval < 0)
1814                 goto out;
1815
1816         would_dump(bprm, bprm->file);
1817
1818         retval = exec_binprm(bprm);
1819         if (retval < 0)
1820                 goto out;
1821
1822         /* execve succeeded */
1823         current->fs->in_exec = 0;
1824         current->in_execve = 0;
1825         membarrier_execve(current);
1826         rseq_execve(current);
1827         acct_update_integrals(current);
1828         task_numa_free(current);
1829         free_bprm(bprm);
1830         kfree(pathbuf);
1831         if (filename)
1832                 putname(filename);
1833         if (displaced)
1834                 put_files_struct(displaced);
1835         return retval;
1836
1837 out:
1838         if (bprm->mm) {
1839                 acct_arg_size(bprm, 0);
1840                 mmput(bprm->mm);
1841         }
1842
1843 out_unmark:
1844         current->fs->in_exec = 0;
1845         current->in_execve = 0;
1846
1847 out_free:
1848         free_bprm(bprm);
1849         kfree(pathbuf);
1850
1851 out_files:
1852         if (displaced)
1853                 reset_files_struct(displaced);
1854 out_ret:
1855         if (filename)
1856                 putname(filename);
1857         return retval;
1858 }
1859
1860 static int do_execveat_common(int fd, struct filename *filename,
1861                               struct user_arg_ptr argv,
1862                               struct user_arg_ptr envp,
1863                               int flags)
1864 {
1865         return __do_execve_file(fd, filename, argv, envp, flags, NULL);
1866 }
1867
1868 int do_execve_file(struct file *file, void *__argv, void *__envp)
1869 {
1870         struct user_arg_ptr argv = { .ptr.native = __argv };
1871         struct user_arg_ptr envp = { .ptr.native = __envp };
1872
1873         return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file);
1874 }
1875
1876 int do_execve(struct filename *filename,
1877         const char __user *const __user *__argv,
1878         const char __user *const __user *__envp)
1879 {
1880         struct user_arg_ptr argv = { .ptr.native = __argv };
1881         struct user_arg_ptr envp = { .ptr.native = __envp };
1882         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1883 }
1884
1885 int do_execveat(int fd, struct filename *filename,
1886                 const char __user *const __user *__argv,
1887                 const char __user *const __user *__envp,
1888                 int flags)
1889 {
1890         struct user_arg_ptr argv = { .ptr.native = __argv };
1891         struct user_arg_ptr envp = { .ptr.native = __envp };
1892
1893         return do_execveat_common(fd, filename, argv, envp, flags);
1894 }
1895
1896 #ifdef CONFIG_COMPAT
1897 static int compat_do_execve(struct filename *filename,
1898         const compat_uptr_t __user *__argv,
1899         const compat_uptr_t __user *__envp)
1900 {
1901         struct user_arg_ptr argv = {
1902                 .is_compat = true,
1903                 .ptr.compat = __argv,
1904         };
1905         struct user_arg_ptr envp = {
1906                 .is_compat = true,
1907                 .ptr.compat = __envp,
1908         };
1909         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1910 }
1911
1912 static int compat_do_execveat(int fd, struct filename *filename,
1913                               const compat_uptr_t __user *__argv,
1914                               const compat_uptr_t __user *__envp,
1915                               int flags)
1916 {
1917         struct user_arg_ptr argv = {
1918                 .is_compat = true,
1919                 .ptr.compat = __argv,
1920         };
1921         struct user_arg_ptr envp = {
1922                 .is_compat = true,
1923                 .ptr.compat = __envp,
1924         };
1925         return do_execveat_common(fd, filename, argv, envp, flags);
1926 }
1927 #endif
1928
1929 void set_binfmt(struct linux_binfmt *new)
1930 {
1931         struct mm_struct *mm = current->mm;
1932
1933         if (mm->binfmt)
1934                 module_put(mm->binfmt->module);
1935
1936         mm->binfmt = new;
1937         if (new)
1938                 __module_get(new->module);
1939 }
1940 EXPORT_SYMBOL(set_binfmt);
1941
1942 /*
1943  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1944  */
1945 void set_dumpable(struct mm_struct *mm, int value)
1946 {
1947         unsigned long old, new;
1948
1949         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1950                 return;
1951
1952         do {
1953                 old = READ_ONCE(mm->flags);
1954                 new = (old & ~MMF_DUMPABLE_MASK) | value;
1955         } while (cmpxchg(&mm->flags, old, new) != old);
1956 }
1957
1958 SYSCALL_DEFINE3(execve,
1959                 const char __user *, filename,
1960                 const char __user *const __user *, argv,
1961                 const char __user *const __user *, envp)
1962 {
1963         return do_execve(getname(filename), argv, envp);
1964 }
1965
1966 SYSCALL_DEFINE5(execveat,
1967                 int, fd, const char __user *, filename,
1968                 const char __user *const __user *, argv,
1969                 const char __user *const __user *, envp,
1970                 int, flags)
1971 {
1972         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1973
1974         return do_execveat(fd,
1975                            getname_flags(filename, lookup_flags, NULL),
1976                            argv, envp, flags);
1977 }
1978
1979 #ifdef CONFIG_COMPAT
1980 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1981         const compat_uptr_t __user *, argv,
1982         const compat_uptr_t __user *, envp)
1983 {
1984         return compat_do_execve(getname(filename), argv, envp);
1985 }
1986
1987 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1988                        const char __user *, filename,
1989                        const compat_uptr_t __user *, argv,
1990                        const compat_uptr_t __user *, envp,
1991                        int,  flags)
1992 {
1993         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1994
1995         return compat_do_execveat(fd,
1996                                   getname_flags(filename, lookup_flags, NULL),
1997                                   argv, envp, flags);
1998 }
1999 #endif