1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1991, 1992 Linus Torvalds
9 * #!-checking implemented by tytso.
12 * Demand-loading implemented 01.12.91 - no need to read anything but
13 * the header into memory. The inode of the executable is put into
14 * "current->executable", and page faults do the actual loading. Clean.
16 * Once more I can proudly say that linux stood up to being changed: it
17 * was less than 2 hours work to get demand-loading completely implemented.
19 * Demand loading changed July 1993 by Eric Youngdale. Use mmap instead,
20 * current->executable is only used by the procfs. This allows a dispatch
21 * table to check for several different types of binary formats. We keep
22 * trying until we recognize the file or we run out of supported binary
26 #include <linux/kernel_read_file.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/fdtable.h>
31 #include <linux/vmacache.h>
32 #include <linux/stat.h>
33 #include <linux/fcntl.h>
34 #include <linux/swap.h>
35 #include <linux/string.h>
36 #include <linux/init.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/coredump.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/numa_balancing.h>
41 #include <linux/sched/task.h>
42 #include <linux/pagemap.h>
43 #include <linux/perf_event.h>
44 #include <linux/highmem.h>
45 #include <linux/spinlock.h>
46 #include <linux/key.h>
47 #include <linux/personality.h>
48 #include <linux/binfmts.h>
49 #include <linux/utsname.h>
50 #include <linux/pid_namespace.h>
51 #include <linux/module.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/security.h>
55 #include <linux/syscalls.h>
56 #include <linux/tsacct_kern.h>
57 #include <linux/cn_proc.h>
58 #include <linux/audit.h>
59 #include <linux/tracehook.h>
60 #include <linux/kmod.h>
61 #include <linux/fsnotify.h>
62 #include <linux/fs_struct.h>
63 #include <linux/oom.h>
64 #include <linux/compat.h>
65 #include <linux/vmalloc.h>
66 #include <linux/io_uring.h>
68 #include <linux/uaccess.h>
69 #include <asm/mmu_context.h>
72 #include <trace/events/task.h>
75 #include <trace/events/sched.h>
77 static int bprm_creds_from_file(struct linux_binprm *bprm);
79 int suid_dumpable = 0;
81 static LIST_HEAD(formats);
82 static DEFINE_RWLOCK(binfmt_lock);
84 void __register_binfmt(struct linux_binfmt * fmt, int insert)
87 if (WARN_ON(!fmt->load_binary))
89 write_lock(&binfmt_lock);
90 insert ? list_add(&fmt->lh, &formats) :
91 list_add_tail(&fmt->lh, &formats);
92 write_unlock(&binfmt_lock);
95 EXPORT_SYMBOL(__register_binfmt);
97 void unregister_binfmt(struct linux_binfmt * fmt)
99 write_lock(&binfmt_lock);
101 write_unlock(&binfmt_lock);
104 EXPORT_SYMBOL(unregister_binfmt);
106 static inline void put_binfmt(struct linux_binfmt * fmt)
108 module_put(fmt->module);
111 bool path_noexec(const struct path *path)
113 return (path->mnt->mnt_flags & MNT_NOEXEC) ||
114 (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
119 * Note that a shared library must be both readable and executable due to
122 * Also note that we take the address to load from from the file itself.
124 SYSCALL_DEFINE1(uselib, const char __user *, library)
126 struct linux_binfmt *fmt;
128 struct filename *tmp = getname(library);
129 int error = PTR_ERR(tmp);
130 static const struct open_flags uselib_flags = {
131 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
132 .acc_mode = MAY_READ | MAY_EXEC,
133 .intent = LOOKUP_OPEN,
134 .lookup_flags = LOOKUP_FOLLOW,
140 file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
142 error = PTR_ERR(file);
147 * may_open() has already checked for this, so it should be
148 * impossible to trip now. But we need to be extra cautious
149 * and check again at the very end too.
152 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
153 path_noexec(&file->f_path)))
160 read_lock(&binfmt_lock);
161 list_for_each_entry(fmt, &formats, lh) {
162 if (!fmt->load_shlib)
164 if (!try_module_get(fmt->module))
166 read_unlock(&binfmt_lock);
167 error = fmt->load_shlib(file);
168 read_lock(&binfmt_lock);
170 if (error != -ENOEXEC)
173 read_unlock(&binfmt_lock);
179 #endif /* #ifdef CONFIG_USELIB */
183 * The nascent bprm->mm is not visible until exec_mmap() but it can
184 * use a lot of memory, account these pages in current->mm temporary
185 * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
186 * change the counter back via acct_arg_size(0).
188 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
190 struct mm_struct *mm = current->mm;
191 long diff = (long)(pages - bprm->vma_pages);
196 bprm->vma_pages = pages;
197 add_mm_counter(mm, MM_ANONPAGES, diff);
200 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
205 unsigned int gup_flags = FOLL_FORCE;
207 #ifdef CONFIG_STACK_GROWSUP
209 ret = expand_downwards(bprm->vma, pos);
216 gup_flags |= FOLL_WRITE;
219 * We are doing an exec(). 'current' is the process
220 * doing the exec and bprm->mm is the new process's mm.
222 ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
228 acct_arg_size(bprm, vma_pages(bprm->vma));
233 static void put_arg_page(struct page *page)
238 static void free_arg_pages(struct linux_binprm *bprm)
242 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
245 flush_cache_page(bprm->vma, pos, page_to_pfn(page));
248 static int __bprm_mm_init(struct linux_binprm *bprm)
251 struct vm_area_struct *vma = NULL;
252 struct mm_struct *mm = bprm->mm;
254 bprm->vma = vma = vm_area_alloc(mm);
257 vma_set_anonymous(vma);
259 if (mmap_write_lock_killable(mm)) {
265 * Place the stack at the largest stack address the architecture
266 * supports. Later, we'll move this to an appropriate place. We don't
267 * use STACK_TOP because that can depend on attributes which aren't
270 BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
271 vma->vm_end = STACK_TOP_MAX;
272 vma->vm_start = vma->vm_end - PAGE_SIZE;
273 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
274 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
276 err = insert_vm_struct(mm, vma);
280 mm->stack_vm = mm->total_vm = 1;
281 mmap_write_unlock(mm);
282 bprm->p = vma->vm_end - sizeof(void *);
285 mmap_write_unlock(mm);
292 static bool valid_arg_len(struct linux_binprm *bprm, long len)
294 return len <= MAX_ARG_STRLEN;
299 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
303 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
308 page = bprm->page[pos / PAGE_SIZE];
309 if (!page && write) {
310 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
313 bprm->page[pos / PAGE_SIZE] = page;
319 static void put_arg_page(struct page *page)
323 static void free_arg_page(struct linux_binprm *bprm, int i)
326 __free_page(bprm->page[i]);
327 bprm->page[i] = NULL;
331 static void free_arg_pages(struct linux_binprm *bprm)
335 for (i = 0; i < MAX_ARG_PAGES; i++)
336 free_arg_page(bprm, i);
339 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
344 static int __bprm_mm_init(struct linux_binprm *bprm)
346 bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
350 static bool valid_arg_len(struct linux_binprm *bprm, long len)
352 return len <= bprm->p;
355 #endif /* CONFIG_MMU */
358 * Create a new mm_struct and populate it with a temporary stack
359 * vm_area_struct. We don't have enough context at this point to set the stack
360 * flags, permissions, and offset, so we use temporary values. We'll update
361 * them later in setup_arg_pages().
363 static int bprm_mm_init(struct linux_binprm *bprm)
366 struct mm_struct *mm = NULL;
368 bprm->mm = mm = mm_alloc();
373 /* Save current stack limit for all calculations made during exec. */
374 task_lock(current->group_leader);
375 bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
376 task_unlock(current->group_leader);
378 err = __bprm_mm_init(bprm);
393 struct user_arg_ptr {
398 const char __user *const __user *native;
400 const compat_uptr_t __user *compat;
405 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
407 const char __user *native;
410 if (unlikely(argv.is_compat)) {
411 compat_uptr_t compat;
413 if (get_user(compat, argv.ptr.compat + nr))
414 return ERR_PTR(-EFAULT);
416 return compat_ptr(compat);
420 if (get_user(native, argv.ptr.native + nr))
421 return ERR_PTR(-EFAULT);
427 * count() counts the number of strings in array ARGV.
429 static int count(struct user_arg_ptr argv, int max)
433 if (argv.ptr.native != NULL) {
435 const char __user *p = get_user_arg_ptr(argv, i);
447 if (fatal_signal_pending(current))
448 return -ERESTARTNOHAND;
455 static int count_strings_kernel(const char *const *argv)
462 for (i = 0; argv[i]; ++i) {
463 if (i >= MAX_ARG_STRINGS)
465 if (fatal_signal_pending(current))
466 return -ERESTARTNOHAND;
472 static int bprm_stack_limits(struct linux_binprm *bprm)
474 unsigned long limit, ptr_size;
477 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
478 * (whichever is smaller) for the argv+env strings.
480 * - the remaining binfmt code will not run out of stack space,
481 * - the program will have a reasonable amount of stack left
484 limit = _STK_LIM / 4 * 3;
485 limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
487 * We've historically supported up to 32 pages (ARG_MAX)
488 * of argument strings even with small stacks
490 limit = max_t(unsigned long, limit, ARG_MAX);
492 * We must account for the size of all the argv and envp pointers to
493 * the argv and envp strings, since they will also take up space in
494 * the stack. They aren't stored until much later when we can't
495 * signal to the parent that the child has run out of stack space.
496 * Instead, calculate it here so it's possible to fail gracefully.
498 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
499 if (limit <= ptr_size)
503 bprm->argmin = bprm->p - limit;
508 * 'copy_strings()' copies argument/environment strings from the old
509 * processes's memory to the new process's stack. The call to get_user_pages()
510 * ensures the destination page is created and not swapped out.
512 static int copy_strings(int argc, struct user_arg_ptr argv,
513 struct linux_binprm *bprm)
515 struct page *kmapped_page = NULL;
517 unsigned long kpos = 0;
521 const char __user *str;
526 str = get_user_arg_ptr(argv, argc);
530 len = strnlen_user(str, MAX_ARG_STRLEN);
535 if (!valid_arg_len(bprm, len))
538 /* We're going to work our way backwords. */
543 if (bprm->p < bprm->argmin)
548 int offset, bytes_to_copy;
550 if (fatal_signal_pending(current)) {
551 ret = -ERESTARTNOHAND;
556 offset = pos % PAGE_SIZE;
560 bytes_to_copy = offset;
561 if (bytes_to_copy > len)
564 offset -= bytes_to_copy;
565 pos -= bytes_to_copy;
566 str -= bytes_to_copy;
567 len -= bytes_to_copy;
569 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
572 page = get_arg_page(bprm, pos, 1);
579 flush_kernel_dcache_page(kmapped_page);
580 kunmap(kmapped_page);
581 put_arg_page(kmapped_page);
584 kaddr = kmap(kmapped_page);
585 kpos = pos & PAGE_MASK;
586 flush_arg_page(bprm, kpos, kmapped_page);
588 if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
597 flush_kernel_dcache_page(kmapped_page);
598 kunmap(kmapped_page);
599 put_arg_page(kmapped_page);
605 * Copy and argument/environment string from the kernel to the processes stack.
607 int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
609 int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
610 unsigned long pos = bprm->p;
614 if (!valid_arg_len(bprm, len))
617 /* We're going to work our way backwards. */
620 if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
624 unsigned int bytes_to_copy = min_t(unsigned int, len,
625 min_not_zero(offset_in_page(pos), PAGE_SIZE));
629 pos -= bytes_to_copy;
630 arg -= bytes_to_copy;
631 len -= bytes_to_copy;
633 page = get_arg_page(bprm, pos, 1);
636 kaddr = kmap_atomic(page);
637 flush_arg_page(bprm, pos & PAGE_MASK, page);
638 memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
639 flush_kernel_dcache_page(page);
640 kunmap_atomic(kaddr);
646 EXPORT_SYMBOL(copy_string_kernel);
648 static int copy_strings_kernel(int argc, const char *const *argv,
649 struct linux_binprm *bprm)
652 int ret = copy_string_kernel(argv[argc], bprm);
655 if (fatal_signal_pending(current))
656 return -ERESTARTNOHAND;
665 * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX. Once
666 * the binfmt code determines where the new stack should reside, we shift it to
667 * its final location. The process proceeds as follows:
669 * 1) Use shift to calculate the new vma endpoints.
670 * 2) Extend vma to cover both the old and new ranges. This ensures the
671 * arguments passed to subsequent functions are consistent.
672 * 3) Move vma's page tables to the new range.
673 * 4) Free up any cleared pgd range.
674 * 5) Shrink the vma to cover only the new range.
676 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
678 struct mm_struct *mm = vma->vm_mm;
679 unsigned long old_start = vma->vm_start;
680 unsigned long old_end = vma->vm_end;
681 unsigned long length = old_end - old_start;
682 unsigned long new_start = old_start - shift;
683 unsigned long new_end = old_end - shift;
684 struct mmu_gather tlb;
686 BUG_ON(new_start > new_end);
689 * ensure there are no vmas between where we want to go
692 if (vma != find_vma(mm, new_start))
696 * cover the whole range: [new_start, old_end)
698 if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
702 * move the page tables downwards, on failure we rely on
703 * process cleanup to remove whatever mess we made.
705 if (length != move_page_tables(vma, old_start,
706 vma, new_start, length, false))
710 tlb_gather_mmu(&tlb, mm, old_start, old_end);
711 if (new_end > old_start) {
713 * when the old and new regions overlap clear from new_end.
715 free_pgd_range(&tlb, new_end, old_end, new_end,
716 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
719 * otherwise, clean from old_start; this is done to not touch
720 * the address space in [new_end, old_start) some architectures
721 * have constraints on va-space that make this illegal (IA64) -
722 * for the others its just a little faster.
724 free_pgd_range(&tlb, old_start, old_end, new_end,
725 vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
727 tlb_finish_mmu(&tlb, old_start, old_end);
730 * Shrink the vma to just the new range. Always succeeds.
732 vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
738 * Finalizes the stack vm_area_struct. The flags and permissions are updated,
739 * the stack is optionally relocated, and some extra space is added.
741 int setup_arg_pages(struct linux_binprm *bprm,
742 unsigned long stack_top,
743 int executable_stack)
746 unsigned long stack_shift;
747 struct mm_struct *mm = current->mm;
748 struct vm_area_struct *vma = bprm->vma;
749 struct vm_area_struct *prev = NULL;
750 unsigned long vm_flags;
751 unsigned long stack_base;
752 unsigned long stack_size;
753 unsigned long stack_expand;
754 unsigned long rlim_stack;
756 #ifdef CONFIG_STACK_GROWSUP
757 /* Limit stack size */
758 stack_base = bprm->rlim_stack.rlim_max;
759 if (stack_base > STACK_SIZE_MAX)
760 stack_base = STACK_SIZE_MAX;
762 /* Add space for stack randomization. */
763 stack_base += (STACK_RND_MASK << PAGE_SHIFT);
765 /* Make sure we didn't let the argument array grow too large. */
766 if (vma->vm_end - vma->vm_start > stack_base)
769 stack_base = PAGE_ALIGN(stack_top - stack_base);
771 stack_shift = vma->vm_start - stack_base;
772 mm->arg_start = bprm->p - stack_shift;
773 bprm->p = vma->vm_end - stack_shift;
775 stack_top = arch_align_stack(stack_top);
776 stack_top = PAGE_ALIGN(stack_top);
778 if (unlikely(stack_top < mmap_min_addr) ||
779 unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
782 stack_shift = vma->vm_end - stack_top;
784 bprm->p -= stack_shift;
785 mm->arg_start = bprm->p;
789 bprm->loader -= stack_shift;
790 bprm->exec -= stack_shift;
792 if (mmap_write_lock_killable(mm))
795 vm_flags = VM_STACK_FLAGS;
798 * Adjust stack execute permissions; explicitly enable for
799 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
800 * (arch default) otherwise.
802 if (unlikely(executable_stack == EXSTACK_ENABLE_X))
804 else if (executable_stack == EXSTACK_DISABLE_X)
805 vm_flags &= ~VM_EXEC;
806 vm_flags |= mm->def_flags;
807 vm_flags |= VM_STACK_INCOMPLETE_SETUP;
809 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
815 if (unlikely(vm_flags & VM_EXEC)) {
816 pr_warn_once("process '%pD4' started with executable stack\n",
820 /* Move stack pages down in memory. */
822 ret = shift_arg_pages(vma, stack_shift);
827 /* mprotect_fixup is overkill to remove the temporary stack flags */
828 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
830 stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
831 stack_size = vma->vm_end - vma->vm_start;
833 * Align this down to a page boundary as expand_stack
836 rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
837 #ifdef CONFIG_STACK_GROWSUP
838 if (stack_size + stack_expand > rlim_stack)
839 stack_base = vma->vm_start + rlim_stack;
841 stack_base = vma->vm_end + stack_expand;
843 if (stack_size + stack_expand > rlim_stack)
844 stack_base = vma->vm_end - rlim_stack;
846 stack_base = vma->vm_start - stack_expand;
848 current->mm->start_stack = bprm->p;
849 ret = expand_stack(vma, stack_base);
854 mmap_write_unlock(mm);
857 EXPORT_SYMBOL(setup_arg_pages);
862 * Transfer the program arguments and environment from the holding pages
863 * onto the stack. The provided stack pointer is adjusted accordingly.
865 int transfer_args_to_stack(struct linux_binprm *bprm,
866 unsigned long *sp_location)
868 unsigned long index, stop, sp;
871 stop = bprm->p >> PAGE_SHIFT;
874 for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
875 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
876 char *src = kmap(bprm->page[index]) + offset;
877 sp -= PAGE_SIZE - offset;
878 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
880 kunmap(bprm->page[index]);
890 EXPORT_SYMBOL(transfer_args_to_stack);
892 #endif /* CONFIG_MMU */
894 static struct file *do_open_execat(int fd, struct filename *name, int flags)
898 struct open_flags open_exec_flags = {
899 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
900 .acc_mode = MAY_EXEC,
901 .intent = LOOKUP_OPEN,
902 .lookup_flags = LOOKUP_FOLLOW,
905 if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
906 return ERR_PTR(-EINVAL);
907 if (flags & AT_SYMLINK_NOFOLLOW)
908 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
909 if (flags & AT_EMPTY_PATH)
910 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
912 file = do_filp_open(fd, name, &open_exec_flags);
917 * may_open() has already checked for this, so it should be
918 * impossible to trip now. But we need to be extra cautious
919 * and check again at the very end too.
922 if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
923 path_noexec(&file->f_path)))
926 err = deny_write_access(file);
930 if (name->name[0] != '\0')
941 struct file *open_exec(const char *name)
943 struct filename *filename = getname_kernel(name);
944 struct file *f = ERR_CAST(filename);
946 if (!IS_ERR(filename)) {
947 f = do_open_execat(AT_FDCWD, filename, 0);
952 EXPORT_SYMBOL(open_exec);
954 #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
955 defined(CONFIG_BINFMT_ELF_FDPIC)
956 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
958 ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
960 flush_icache_user_range(addr, addr + len);
963 EXPORT_SYMBOL(read_code);
967 * Maps the mm_struct mm into the current task struct.
968 * On success, this function returns with the mutex
969 * exec_update_mutex locked.
971 static int exec_mmap(struct mm_struct *mm)
973 struct task_struct *tsk;
974 struct mm_struct *old_mm, *active_mm;
977 /* Notify parent that we're no longer interested in the old VM */
979 old_mm = current->mm;
980 exec_mm_release(tsk, old_mm);
984 ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
990 * Make sure that if there is a core dump in progress
991 * for the old mm, we get out and die instead of going
992 * through with the exec. We must hold mmap_lock around
993 * checking core_state and changing tsk->mm.
995 mmap_read_lock(old_mm);
996 if (unlikely(old_mm->core_state)) {
997 mmap_read_unlock(old_mm);
998 mutex_unlock(&tsk->signal->exec_update_mutex);
1004 membarrier_exec_mmap(mm);
1006 local_irq_disable();
1007 active_mm = tsk->active_mm;
1008 tsk->active_mm = mm;
1011 * This prevents preemption while active_mm is being loaded and
1012 * it and mm are being updated, which could cause problems for
1013 * lazy tlb mm refcounting when these are updated by context
1014 * switches. Not all architectures can handle irqs off over
1017 if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1019 activate_mm(active_mm, mm);
1020 if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1022 tsk->mm->vmacache_seqnum = 0;
1023 vmacache_flush(tsk);
1026 mmap_read_unlock(old_mm);
1027 BUG_ON(active_mm != old_mm);
1028 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1029 mm_update_next_owner(old_mm);
1037 static int de_thread(struct task_struct *tsk)
1039 struct signal_struct *sig = tsk->signal;
1040 struct sighand_struct *oldsighand = tsk->sighand;
1041 spinlock_t *lock = &oldsighand->siglock;
1043 if (thread_group_empty(tsk))
1044 goto no_thread_group;
1047 * Kill all other threads in the thread group.
1049 spin_lock_irq(lock);
1050 if (signal_group_exit(sig)) {
1052 * Another group action in progress, just
1053 * return so that the signal is processed.
1055 spin_unlock_irq(lock);
1059 sig->group_exit_task = tsk;
1060 sig->notify_count = zap_other_threads(tsk);
1061 if (!thread_group_leader(tsk))
1062 sig->notify_count--;
1064 while (sig->notify_count) {
1065 __set_current_state(TASK_KILLABLE);
1066 spin_unlock_irq(lock);
1068 if (__fatal_signal_pending(tsk))
1070 spin_lock_irq(lock);
1072 spin_unlock_irq(lock);
1075 * At this point all other threads have exited, all we have to
1076 * do is to wait for the thread group leader to become inactive,
1077 * and to assume its PID:
1079 if (!thread_group_leader(tsk)) {
1080 struct task_struct *leader = tsk->group_leader;
1083 cgroup_threadgroup_change_begin(tsk);
1084 write_lock_irq(&tasklist_lock);
1086 * Do this under tasklist_lock to ensure that
1087 * exit_notify() can't miss ->group_exit_task
1089 sig->notify_count = -1;
1090 if (likely(leader->exit_state))
1092 __set_current_state(TASK_KILLABLE);
1093 write_unlock_irq(&tasklist_lock);
1094 cgroup_threadgroup_change_end(tsk);
1096 if (__fatal_signal_pending(tsk))
1101 * The only record we have of the real-time age of a
1102 * process, regardless of execs it's done, is start_time.
1103 * All the past CPU time is accumulated in signal_struct
1104 * from sister threads now dead. But in this non-leader
1105 * exec, nothing survives from the original leader thread,
1106 * whose birth marks the true age of this process now.
1107 * When we take on its identity by switching to its PID, we
1108 * also take its birthdate (always earlier than our own).
1110 tsk->start_time = leader->start_time;
1111 tsk->start_boottime = leader->start_boottime;
1113 BUG_ON(!same_thread_group(leader, tsk));
1115 * An exec() starts a new thread group with the
1116 * TGID of the previous thread group. Rehash the
1117 * two threads with a switched PID, and release
1118 * the former thread group leader:
1121 /* Become a process group leader with the old leader's pid.
1122 * The old leader becomes a thread of the this thread group.
1124 exchange_tids(tsk, leader);
1125 transfer_pid(leader, tsk, PIDTYPE_TGID);
1126 transfer_pid(leader, tsk, PIDTYPE_PGID);
1127 transfer_pid(leader, tsk, PIDTYPE_SID);
1129 list_replace_rcu(&leader->tasks, &tsk->tasks);
1130 list_replace_init(&leader->sibling, &tsk->sibling);
1132 tsk->group_leader = tsk;
1133 leader->group_leader = tsk;
1135 tsk->exit_signal = SIGCHLD;
1136 leader->exit_signal = -1;
1138 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1139 leader->exit_state = EXIT_DEAD;
1142 * We are going to release_task()->ptrace_unlink() silently,
1143 * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1144 * the tracer wont't block again waiting for this thread.
1146 if (unlikely(leader->ptrace))
1147 __wake_up_parent(leader, leader->parent);
1148 write_unlock_irq(&tasklist_lock);
1149 cgroup_threadgroup_change_end(tsk);
1151 release_task(leader);
1154 sig->group_exit_task = NULL;
1155 sig->notify_count = 0;
1158 /* we have changed execution domain */
1159 tsk->exit_signal = SIGCHLD;
1161 BUG_ON(!thread_group_leader(tsk));
1165 /* protects against exit_notify() and __exit_signal() */
1166 read_lock(&tasklist_lock);
1167 sig->group_exit_task = NULL;
1168 sig->notify_count = 0;
1169 read_unlock(&tasklist_lock);
1175 * This function makes sure the current process has its own signal table,
1176 * so that flush_signal_handlers can later reset the handlers without
1177 * disturbing other processes. (Other processes might share the signal
1178 * table via the CLONE_SIGHAND option to clone().)
1180 static int unshare_sighand(struct task_struct *me)
1182 struct sighand_struct *oldsighand = me->sighand;
1184 if (refcount_read(&oldsighand->count) != 1) {
1185 struct sighand_struct *newsighand;
1187 * This ->sighand is shared with the CLONE_SIGHAND
1188 * but not CLONE_THREAD task, switch to the new one.
1190 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1194 refcount_set(&newsighand->count, 1);
1195 memcpy(newsighand->action, oldsighand->action,
1196 sizeof(newsighand->action));
1198 write_lock_irq(&tasklist_lock);
1199 spin_lock(&oldsighand->siglock);
1200 rcu_assign_pointer(me->sighand, newsighand);
1201 spin_unlock(&oldsighand->siglock);
1202 write_unlock_irq(&tasklist_lock);
1204 __cleanup_sighand(oldsighand);
1209 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1212 strncpy(buf, tsk->comm, buf_size);
1216 EXPORT_SYMBOL_GPL(__get_task_comm);
1219 * These functions flushes out all traces of the currently running executable
1220 * so that a new one can be started
1223 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1226 trace_task_rename(tsk, buf);
1227 strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1229 perf_event_comm(tsk, exec);
1233 * Calling this is the point of no return. None of the failures will be
1234 * seen by userspace since either the process is already taking a fatal
1235 * signal (via de_thread() or coredump), or will have SEGV raised
1236 * (after exec_mmap()) by search_binary_handler (see below).
1238 int begin_new_exec(struct linux_binprm * bprm)
1240 struct task_struct *me = current;
1243 /* Once we are committed compute the creds */
1244 retval = bprm_creds_from_file(bprm);
1249 * Ensure all future errors are fatal.
1251 bprm->point_of_no_return = true;
1254 * Make this the only thread in the thread group.
1256 retval = de_thread(me);
1261 * Must be called _before_ exec_mmap() as bprm->mm is
1262 * not visibile until then. This also enables the update
1265 set_mm_exe_file(bprm->mm, bprm->file);
1267 /* If the binary is not readable then enforce mm->dumpable=0 */
1268 would_dump(bprm, bprm->file);
1269 if (bprm->have_execfd)
1270 would_dump(bprm, bprm->executable);
1273 * Release all of the old mmap stuff
1275 acct_arg_size(bprm, 0);
1276 retval = exec_mmap(bprm->mm);
1282 #ifdef CONFIG_POSIX_TIMERS
1283 exit_itimers(me->signal);
1284 flush_itimer_signals();
1288 * Make the signal table private.
1290 retval = unshare_sighand(me);
1295 * Ensure that the uaccess routines can actually operate on userspace
1298 force_uaccess_begin();
1300 me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1301 PF_NOFREEZE | PF_NO_SETAFFINITY);
1303 me->personality &= ~bprm->per_clear;
1306 * We have to apply CLOEXEC before we change whether the process is
1307 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1308 * trying to access the should-be-closed file descriptors of a process
1309 * undergoing exec(2).
1311 do_close_on_exec(me->files);
1313 if (bprm->secureexec) {
1314 /* Make sure parent cannot signal privileged process. */
1315 me->pdeath_signal = 0;
1318 * For secureexec, reset the stack limit to sane default to
1319 * avoid bad behavior from the prior rlimits. This has to
1320 * happen before arch_pick_mmap_layout(), which examines
1321 * RLIMIT_STACK, but after the point of no return to avoid
1322 * needing to clean up the change on failure.
1324 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1325 bprm->rlim_stack.rlim_cur = _STK_LIM;
1328 me->sas_ss_sp = me->sas_ss_size = 0;
1331 * Figure out dumpability. Note that this checking only of current
1332 * is wrong, but userspace depends on it. This should be testing
1333 * bprm->secureexec instead.
1335 if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1336 !(uid_eq(current_euid(), current_uid()) &&
1337 gid_eq(current_egid(), current_gid())))
1338 set_dumpable(current->mm, suid_dumpable);
1340 set_dumpable(current->mm, SUID_DUMP_USER);
1343 __set_task_comm(me, kbasename(bprm->filename), true);
1345 /* An exec changes our domain. We are no longer part of the thread
1347 WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1348 flush_signal_handlers(me, 0);
1351 * install the new credentials for this executable
1353 security_bprm_committing_creds(bprm);
1355 commit_creds(bprm->cred);
1359 * Disable monitoring for regular users
1360 * when executing setuid binaries. Must
1361 * wait until new credentials are committed
1362 * by commit_creds() above
1364 if (get_dumpable(me->mm) != SUID_DUMP_USER)
1365 perf_event_exit_task(me);
1367 * cred_guard_mutex must be held at least to this point to prevent
1368 * ptrace_attach() from altering our determination of the task's
1369 * credentials; any time after this it may be unlocked.
1371 security_bprm_committed_creds(bprm);
1373 /* Pass the opened binary to the interpreter. */
1374 if (bprm->have_execfd) {
1375 retval = get_unused_fd_flags(0);
1378 fd_install(retval, bprm->executable);
1379 bprm->executable = NULL;
1380 bprm->execfd = retval;
1385 mutex_unlock(&me->signal->exec_update_mutex);
1389 EXPORT_SYMBOL(begin_new_exec);
1391 void would_dump(struct linux_binprm *bprm, struct file *file)
1393 struct inode *inode = file_inode(file);
1394 if (inode_permission(inode, MAY_READ) < 0) {
1395 struct user_namespace *old, *user_ns;
1396 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1398 /* Ensure mm->user_ns contains the executable */
1399 user_ns = old = bprm->mm->user_ns;
1400 while ((user_ns != &init_user_ns) &&
1401 !privileged_wrt_inode_uidgid(user_ns, inode))
1402 user_ns = user_ns->parent;
1404 if (old != user_ns) {
1405 bprm->mm->user_ns = get_user_ns(user_ns);
1410 EXPORT_SYMBOL(would_dump);
1412 void setup_new_exec(struct linux_binprm * bprm)
1414 /* Setup things that can depend upon the personality */
1415 struct task_struct *me = current;
1417 arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1419 arch_setup_new_exec();
1421 /* Set the new mm task size. We have to do that late because it may
1422 * depend on TIF_32BIT which is only updated in flush_thread() on
1423 * some architectures like powerpc
1425 me->mm->task_size = TASK_SIZE;
1426 mutex_unlock(&me->signal->exec_update_mutex);
1427 mutex_unlock(&me->signal->cred_guard_mutex);
1429 EXPORT_SYMBOL(setup_new_exec);
1431 /* Runs immediately before start_thread() takes over. */
1432 void finalize_exec(struct linux_binprm *bprm)
1434 /* Store any stack rlimit changes before starting thread. */
1435 task_lock(current->group_leader);
1436 current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1437 task_unlock(current->group_leader);
1439 EXPORT_SYMBOL(finalize_exec);
1442 * Prepare credentials and lock ->cred_guard_mutex.
1443 * setup_new_exec() commits the new creds and drops the lock.
1444 * Or, if exec fails before, free_bprm() should release ->cred and
1447 static int prepare_bprm_creds(struct linux_binprm *bprm)
1449 if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex))
1450 return -ERESTARTNOINTR;
1452 bprm->cred = prepare_exec_creds();
1453 if (likely(bprm->cred))
1456 mutex_unlock(¤t->signal->cred_guard_mutex);
1460 static void free_bprm(struct linux_binprm *bprm)
1463 acct_arg_size(bprm, 0);
1466 free_arg_pages(bprm);
1468 mutex_unlock(¤t->signal->cred_guard_mutex);
1469 abort_creds(bprm->cred);
1472 allow_write_access(bprm->file);
1475 if (bprm->executable)
1476 fput(bprm->executable);
1477 /* If a binfmt changed the interp, free it. */
1478 if (bprm->interp != bprm->filename)
1479 kfree(bprm->interp);
1480 kfree(bprm->fdpath);
1484 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
1486 struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1487 int retval = -ENOMEM;
1491 if (fd == AT_FDCWD || filename->name[0] == '/') {
1492 bprm->filename = filename->name;
1494 if (filename->name[0] == '\0')
1495 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1497 bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1498 fd, filename->name);
1502 bprm->filename = bprm->fdpath;
1504 bprm->interp = bprm->filename;
1506 retval = bprm_mm_init(bprm);
1514 return ERR_PTR(retval);
1517 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1519 /* If a binfmt changed the interp, free it first. */
1520 if (bprm->interp != bprm->filename)
1521 kfree(bprm->interp);
1522 bprm->interp = kstrdup(interp, GFP_KERNEL);
1527 EXPORT_SYMBOL(bprm_change_interp);
1530 * determine how safe it is to execute the proposed program
1531 * - the caller must hold ->cred_guard_mutex to protect against
1532 * PTRACE_ATTACH or seccomp thread-sync
1534 static void check_unsafe_exec(struct linux_binprm *bprm)
1536 struct task_struct *p = current, *t;
1540 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1543 * This isn't strictly necessary, but it makes it harder for LSMs to
1546 if (task_no_new_privs(current))
1547 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1551 spin_lock(&p->fs->lock);
1553 while_each_thread(p, t) {
1559 if (p->fs->users > n_fs)
1560 bprm->unsafe |= LSM_UNSAFE_SHARE;
1563 spin_unlock(&p->fs->lock);
1566 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1568 /* Handle suid and sgid on files */
1569 struct inode *inode;
1574 if (!mnt_may_suid(file->f_path.mnt))
1577 if (task_no_new_privs(current))
1580 inode = file->f_path.dentry->d_inode;
1581 mode = READ_ONCE(inode->i_mode);
1582 if (!(mode & (S_ISUID|S_ISGID)))
1585 /* Be careful if suid/sgid is set */
1588 /* reload atomically mode/uid/gid now that lock held */
1589 mode = inode->i_mode;
1592 inode_unlock(inode);
1594 /* We ignore suid/sgid if there are no mappings for them in the ns */
1595 if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1596 !kgid_has_mapping(bprm->cred->user_ns, gid))
1599 if (mode & S_ISUID) {
1600 bprm->per_clear |= PER_CLEAR_ON_SETID;
1601 bprm->cred->euid = uid;
1604 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1605 bprm->per_clear |= PER_CLEAR_ON_SETID;
1606 bprm->cred->egid = gid;
1611 * Compute brpm->cred based upon the final binary.
1613 static int bprm_creds_from_file(struct linux_binprm *bprm)
1615 /* Compute creds based on which file? */
1616 struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1618 bprm_fill_uid(bprm, file);
1619 return security_bprm_creds_from_file(bprm, file);
1623 * Fill the binprm structure from the inode.
1624 * Read the first BINPRM_BUF_SIZE bytes
1626 * This may be called multiple times for binary chains (scripts for example).
1628 static int prepare_binprm(struct linux_binprm *bprm)
1632 memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1633 return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1637 * Arguments are '\0' separated strings found at the location bprm->p
1638 * points to; chop off the first by relocating brpm->p to right after
1639 * the first '\0' encountered.
1641 int remove_arg_zero(struct linux_binprm *bprm)
1644 unsigned long offset;
1652 offset = bprm->p & ~PAGE_MASK;
1653 page = get_arg_page(bprm, bprm->p, 0);
1658 kaddr = kmap_atomic(page);
1660 for (; offset < PAGE_SIZE && kaddr[offset];
1661 offset++, bprm->p++)
1664 kunmap_atomic(kaddr);
1666 } while (offset == PAGE_SIZE);
1675 EXPORT_SYMBOL(remove_arg_zero);
1677 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1679 * cycle the list of binary formats handler, until one recognizes the image
1681 static int search_binary_handler(struct linux_binprm *bprm)
1683 bool need_retry = IS_ENABLED(CONFIG_MODULES);
1684 struct linux_binfmt *fmt;
1687 retval = prepare_binprm(bprm);
1691 retval = security_bprm_check(bprm);
1697 read_lock(&binfmt_lock);
1698 list_for_each_entry(fmt, &formats, lh) {
1699 if (!try_module_get(fmt->module))
1701 read_unlock(&binfmt_lock);
1703 retval = fmt->load_binary(bprm);
1705 read_lock(&binfmt_lock);
1707 if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1708 read_unlock(&binfmt_lock);
1712 read_unlock(&binfmt_lock);
1715 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1716 printable(bprm->buf[2]) && printable(bprm->buf[3]))
1718 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1727 static int exec_binprm(struct linux_binprm *bprm)
1729 pid_t old_pid, old_vpid;
1732 /* Need to fetch pid before load_binary changes it */
1733 old_pid = current->pid;
1735 old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1738 /* This allows 4 levels of binfmt rewrites before failing hard. */
1739 for (depth = 0;; depth++) {
1744 ret = search_binary_handler(bprm);
1747 if (!bprm->interpreter)
1751 bprm->file = bprm->interpreter;
1752 bprm->interpreter = NULL;
1754 allow_write_access(exec);
1755 if (unlikely(bprm->have_execfd)) {
1756 if (bprm->executable) {
1760 bprm->executable = exec;
1766 trace_sched_process_exec(current, old_pid, bprm);
1767 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1768 proc_exec_connector(current);
1773 * sys_execve() executes a new program.
1775 static int bprm_execve(struct linux_binprm *bprm,
1776 int fd, struct filename *filename, int flags)
1779 struct files_struct *displaced;
1783 * Cancel any io_uring activity across execve
1785 io_uring_task_cancel();
1787 retval = unshare_files(&displaced);
1791 retval = prepare_bprm_creds(bprm);
1795 check_unsafe_exec(bprm);
1796 current->in_execve = 1;
1798 file = do_open_execat(fd, filename, flags);
1799 retval = PTR_ERR(file);
1807 * Record that a name derived from an O_CLOEXEC fd will be
1808 * inaccessible after exec. Relies on having exclusive access to
1809 * current->files (due to unshare_files above).
1812 close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1813 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1815 /* Set the unchanging part of bprm->cred */
1816 retval = security_bprm_creds_for_exec(bprm);
1820 retval = exec_binprm(bprm);
1824 /* execve succeeded */
1825 current->fs->in_exec = 0;
1826 current->in_execve = 0;
1827 rseq_execve(current);
1828 acct_update_integrals(current);
1829 task_numa_free(current, false);
1831 put_files_struct(displaced);
1836 * If past the point of no return ensure the the code never
1837 * returns to the userspace process. Use an existing fatal
1838 * signal if present otherwise terminate the process with
1841 if (bprm->point_of_no_return && !fatal_signal_pending(current))
1842 force_sigsegv(SIGSEGV);
1845 current->fs->in_exec = 0;
1846 current->in_execve = 0;
1850 reset_files_struct(displaced);
1855 static int do_execveat_common(int fd, struct filename *filename,
1856 struct user_arg_ptr argv,
1857 struct user_arg_ptr envp,
1860 struct linux_binprm *bprm;
1863 if (IS_ERR(filename))
1864 return PTR_ERR(filename);
1867 * We move the actual failure in case of RLIMIT_NPROC excess from
1868 * set*uid() to execve() because too many poorly written programs
1869 * don't check setuid() return code. Here we additionally recheck
1870 * whether NPROC limit is still exceeded.
1872 if ((current->flags & PF_NPROC_EXCEEDED) &&
1873 atomic_read(¤t_user()->processes) > rlimit(RLIMIT_NPROC)) {
1878 /* We're below the limit (still or again), so we don't want to make
1879 * further execve() calls fail. */
1880 current->flags &= ~PF_NPROC_EXCEEDED;
1882 bprm = alloc_bprm(fd, filename);
1884 retval = PTR_ERR(bprm);
1888 retval = count(argv, MAX_ARG_STRINGS);
1891 bprm->argc = retval;
1893 retval = count(envp, MAX_ARG_STRINGS);
1896 bprm->envc = retval;
1898 retval = bprm_stack_limits(bprm);
1902 retval = copy_string_kernel(bprm->filename, bprm);
1905 bprm->exec = bprm->p;
1907 retval = copy_strings(bprm->envc, envp, bprm);
1911 retval = copy_strings(bprm->argc, argv, bprm);
1915 retval = bprm_execve(bprm, fd, filename, flags);
1924 int kernel_execve(const char *kernel_filename,
1925 const char *const *argv, const char *const *envp)
1927 struct filename *filename;
1928 struct linux_binprm *bprm;
1932 filename = getname_kernel(kernel_filename);
1933 if (IS_ERR(filename))
1934 return PTR_ERR(filename);
1936 bprm = alloc_bprm(fd, filename);
1938 retval = PTR_ERR(bprm);
1942 retval = count_strings_kernel(argv);
1945 bprm->argc = retval;
1947 retval = count_strings_kernel(envp);
1950 bprm->envc = retval;
1952 retval = bprm_stack_limits(bprm);
1956 retval = copy_string_kernel(bprm->filename, bprm);
1959 bprm->exec = bprm->p;
1961 retval = copy_strings_kernel(bprm->envc, envp, bprm);
1965 retval = copy_strings_kernel(bprm->argc, argv, bprm);
1969 retval = bprm_execve(bprm, fd, filename, 0);
1977 static int do_execve(struct filename *filename,
1978 const char __user *const __user *__argv,
1979 const char __user *const __user *__envp)
1981 struct user_arg_ptr argv = { .ptr.native = __argv };
1982 struct user_arg_ptr envp = { .ptr.native = __envp };
1983 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1986 static int do_execveat(int fd, struct filename *filename,
1987 const char __user *const __user *__argv,
1988 const char __user *const __user *__envp,
1991 struct user_arg_ptr argv = { .ptr.native = __argv };
1992 struct user_arg_ptr envp = { .ptr.native = __envp };
1994 return do_execveat_common(fd, filename, argv, envp, flags);
1997 #ifdef CONFIG_COMPAT
1998 static int compat_do_execve(struct filename *filename,
1999 const compat_uptr_t __user *__argv,
2000 const compat_uptr_t __user *__envp)
2002 struct user_arg_ptr argv = {
2004 .ptr.compat = __argv,
2006 struct user_arg_ptr envp = {
2008 .ptr.compat = __envp,
2010 return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2013 static int compat_do_execveat(int fd, struct filename *filename,
2014 const compat_uptr_t __user *__argv,
2015 const compat_uptr_t __user *__envp,
2018 struct user_arg_ptr argv = {
2020 .ptr.compat = __argv,
2022 struct user_arg_ptr envp = {
2024 .ptr.compat = __envp,
2026 return do_execveat_common(fd, filename, argv, envp, flags);
2030 void set_binfmt(struct linux_binfmt *new)
2032 struct mm_struct *mm = current->mm;
2035 module_put(mm->binfmt->module);
2039 __module_get(new->module);
2041 EXPORT_SYMBOL(set_binfmt);
2044 * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2046 void set_dumpable(struct mm_struct *mm, int value)
2048 if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2051 set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2054 SYSCALL_DEFINE3(execve,
2055 const char __user *, filename,
2056 const char __user *const __user *, argv,
2057 const char __user *const __user *, envp)
2059 return do_execve(getname(filename), argv, envp);
2062 SYSCALL_DEFINE5(execveat,
2063 int, fd, const char __user *, filename,
2064 const char __user *const __user *, argv,
2065 const char __user *const __user *, envp,
2068 int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2070 return do_execveat(fd,
2071 getname_flags(filename, lookup_flags, NULL),
2075 #ifdef CONFIG_COMPAT
2076 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2077 const compat_uptr_t __user *, argv,
2078 const compat_uptr_t __user *, envp)
2080 return compat_do_execve(getname(filename), argv, envp);
2083 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2084 const char __user *, filename,
2085 const compat_uptr_t __user *, argv,
2086 const compat_uptr_t __user *, envp,
2089 int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2091 return compat_do_execveat(fd,
2092 getname_flags(filename, lookup_flags, NULL),