1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/file.h>
4 #include <linux/fdtable.h>
5 #include <linux/freezer.h>
7 #include <linux/stat.h>
8 #include <linux/fcntl.h>
9 #include <linux/swap.h>
10 #include <linux/ctype.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/pagemap.h>
14 #include <linux/perf_event.h>
15 #include <linux/highmem.h>
16 #include <linux/spinlock.h>
17 #include <linux/key.h>
18 #include <linux/personality.h>
19 #include <linux/binfmts.h>
20 #include <linux/coredump.h>
21 #include <linux/sched/coredump.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/task_stack.h>
24 #include <linux/utsname.h>
25 #include <linux/pid_namespace.h>
26 #include <linux/module.h>
27 #include <linux/namei.h>
28 #include <linux/mount.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/tsacct_kern.h>
32 #include <linux/cn_proc.h>
33 #include <linux/audit.h>
34 #include <linux/kmod.h>
35 #include <linux/fsnotify.h>
36 #include <linux/fs_struct.h>
37 #include <linux/pipe_fs_i.h>
38 #include <linux/oom.h>
39 #include <linux/compat.h>
41 #include <linux/path.h>
42 #include <linux/timekeeping.h>
43 #include <linux/sysctl.h>
44 #include <linux/elf.h>
46 #include <linux/uaccess.h>
47 #include <asm/mmu_context.h>
51 #include <trace/events/task.h>
54 #include <trace/events/sched.h>
56 static bool dump_vma_snapshot(struct coredump_params *cprm);
57 static void free_vma_snapshot(struct coredump_params *cprm);
59 static int core_uses_pid;
60 static unsigned int core_pipe_limit;
61 static char core_pattern[CORENAME_MAX_SIZE] = "core";
62 static int core_name_size = CORENAME_MAX_SIZE;
69 static int expand_corename(struct core_name *cn, int size)
71 char *corename = krealloc(cn->corename, size, GFP_KERNEL);
76 if (size > core_name_size) /* racy but harmless */
77 core_name_size = size;
79 cn->size = ksize(corename);
80 cn->corename = corename;
84 static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt,
91 free = cn->size - cn->used;
93 va_copy(arg_copy, arg);
94 need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy);
102 if (!expand_corename(cn, cn->size + need - free + 1))
108 static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...)
114 ret = cn_vprintf(cn, fmt, arg);
120 static __printf(2, 3)
121 int cn_esc_printf(struct core_name *cn, const char *fmt, ...)
128 ret = cn_vprintf(cn, fmt, arg);
133 * Ensure that this coredump name component can't cause the
134 * resulting corefile path to consist of a ".." or ".".
136 if ((cn->used - cur == 1 && cn->corename[cur] == '.') ||
137 (cn->used - cur == 2 && cn->corename[cur] == '.'
138 && cn->corename[cur+1] == '.'))
139 cn->corename[cur] = '!';
142 * Empty names are fishy and could be used to create a "//" in a
143 * corefile name, causing the coredump to happen one directory
144 * level too high. Enforce that all components of the core
145 * pattern are at least one character long.
148 ret = cn_printf(cn, "!");
151 for (; cur < cn->used; ++cur) {
152 if (cn->corename[cur] == '/')
153 cn->corename[cur] = '!';
158 static int cn_print_exe_file(struct core_name *cn, bool name_only)
160 struct file *exe_file;
161 char *pathbuf, *path, *ptr;
164 exe_file = get_mm_exe_file(current->mm);
166 return cn_esc_printf(cn, "%s (path unknown)", current->comm);
168 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
174 path = file_path(exe_file, pathbuf, PATH_MAX);
181 ptr = strrchr(path, '/');
185 ret = cn_esc_printf(cn, "%s", path);
194 /* format_corename will inspect the pattern parameter, and output a
195 * name into corename, which must have space for at least
196 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
198 static int format_corename(struct core_name *cn, struct coredump_params *cprm,
199 size_t **argv, int *argc)
201 const struct cred *cred = current_cred();
202 const char *pat_ptr = core_pattern;
203 int ispipe = (*pat_ptr == '|');
204 bool was_space = false;
205 int pid_in_pattern = 0;
210 if (expand_corename(cn, core_name_size))
212 cn->corename[0] = '\0';
215 int argvs = sizeof(core_pattern) / 2;
216 (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
219 (*argv)[(*argc)++] = 0;
225 /* Repeat as long as we have more pattern to process and more output
229 * Split on spaces before doing template expansion so that
230 * %e and %E don't get split if they have spaces in them
233 if (isspace(*pat_ptr)) {
238 } else if (was_space) {
240 err = cn_printf(cn, "%c", '\0');
243 (*argv)[(*argc)++] = cn->used;
246 if (*pat_ptr != '%') {
247 err = cn_printf(cn, "%c", *pat_ptr++);
249 switch (*++pat_ptr) {
250 /* single % at the end, drop that */
253 /* Double percent, output one percent */
255 err = cn_printf(cn, "%c", '%');
260 err = cn_printf(cn, "%d",
261 task_tgid_vnr(current));
265 err = cn_printf(cn, "%d",
266 task_tgid_nr(current));
269 err = cn_printf(cn, "%d",
270 task_pid_vnr(current));
273 err = cn_printf(cn, "%d",
274 task_pid_nr(current));
278 err = cn_printf(cn, "%u",
279 from_kuid(&init_user_ns,
284 err = cn_printf(cn, "%u",
285 from_kgid(&init_user_ns,
289 err = cn_printf(cn, "%d",
290 __get_dumpable(cprm->mm_flags));
292 /* signal that caused the coredump */
294 err = cn_printf(cn, "%d",
295 cprm->siginfo->si_signo);
297 /* UNIX time of coredump */
301 time = ktime_get_real_seconds();
302 err = cn_printf(cn, "%lld", time);
308 err = cn_esc_printf(cn, "%s",
309 utsname()->nodename);
312 /* executable, could be changed by prctl PR_SET_NAME etc */
314 err = cn_esc_printf(cn, "%s", current->comm);
316 /* file name of executable */
318 err = cn_print_exe_file(cn, true);
321 err = cn_print_exe_file(cn, false);
323 /* core limit size */
325 err = cn_printf(cn, "%lu",
326 rlimit(RLIMIT_CORE));
328 /* CPU the task ran on */
330 err = cn_printf(cn, "%d", cprm->cpu);
343 /* Backward compatibility with core_uses_pid:
345 * If core_pattern does not include a %p (as is the default)
346 * and core_uses_pid is set, then .%pid will be appended to
347 * the filename. Do not do this for piped commands. */
348 if (!ispipe && !pid_in_pattern && core_uses_pid) {
349 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
356 static int zap_process(struct task_struct *start, int exit_code)
358 struct task_struct *t;
361 /* Allow SIGKILL, see prepare_signal() */
362 start->signal->flags = SIGNAL_GROUP_EXIT;
363 start->signal->group_exit_code = exit_code;
364 start->signal->group_stop_count = 0;
366 for_each_thread(start, t) {
367 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
368 if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
369 sigaddset(&t->pending.signal, SIGKILL);
370 signal_wake_up(t, 1);
378 static int zap_threads(struct task_struct *tsk,
379 struct core_state *core_state, int exit_code)
381 struct signal_struct *signal = tsk->signal;
384 spin_lock_irq(&tsk->sighand->siglock);
385 if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
386 signal->core_state = core_state;
387 nr = zap_process(tsk, exit_code);
388 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
389 tsk->flags |= PF_DUMPCORE;
390 atomic_set(&core_state->nr_threads, nr);
392 spin_unlock_irq(&tsk->sighand->siglock);
396 static int coredump_wait(int exit_code, struct core_state *core_state)
398 struct task_struct *tsk = current;
399 int core_waiters = -EBUSY;
401 init_completion(&core_state->startup);
402 core_state->dumper.task = tsk;
403 core_state->dumper.next = NULL;
405 core_waiters = zap_threads(tsk, core_state, exit_code);
406 if (core_waiters > 0) {
407 struct core_thread *ptr;
409 wait_for_completion_state(&core_state->startup,
410 TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
412 * Wait for all the threads to become inactive, so that
413 * all the thread context (extended register state, like
414 * fpu etc) gets copied to the memory.
416 ptr = core_state->dumper.next;
417 while (ptr != NULL) {
418 wait_task_inactive(ptr->task, TASK_ANY);
426 static void coredump_finish(bool core_dumped)
428 struct core_thread *curr, *next;
429 struct task_struct *task;
431 spin_lock_irq(¤t->sighand->siglock);
432 if (core_dumped && !__fatal_signal_pending(current))
433 current->signal->group_exit_code |= 0x80;
434 next = current->signal->core_state->dumper.next;
435 current->signal->core_state = NULL;
436 spin_unlock_irq(¤t->sighand->siglock);
438 while ((curr = next) != NULL) {
442 * see coredump_task_exit(), curr->task must not see
443 * ->task == NULL before we read ->next.
447 wake_up_process(task);
451 static bool dump_interrupted(void)
454 * SIGKILL or freezing() interrupt the coredumping. Perhaps we
455 * can do try_to_freeze() and check __fatal_signal_pending(),
456 * but then we need to teach dump_write() to restart and clear
459 return fatal_signal_pending(current) || freezing(current);
462 static void wait_for_dump_helpers(struct file *file)
464 struct pipe_inode_info *pipe = file->private_data;
469 wake_up_interruptible_sync(&pipe->rd_wait);
470 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
474 * We actually want wait_event_freezable() but then we need
475 * to clear TIF_SIGPENDING and improve dump_interrupted().
477 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
487 * helper function to customize the process used
488 * to collect the core in userspace. Specifically
489 * it sets up a pipe and installs it as fd 0 (stdin)
490 * for the process. Returns 0 on success, or
491 * PTR_ERR on failure.
492 * Note that it also sets the core limit to 1. This
493 * is a special value that we use to trap recursive
496 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
498 struct file *files[2];
499 struct coredump_params *cp = (struct coredump_params *)info->data;
500 int err = create_pipe_files(files, 0);
506 err = replace_fd(0, files[0], 0);
508 /* and disallow core files too */
509 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
514 void do_coredump(const kernel_siginfo_t *siginfo)
516 struct core_state core_state;
518 struct mm_struct *mm = current->mm;
519 struct linux_binfmt * binfmt;
520 const struct cred *old_cred;
526 /* require nonrelative corefile path and be extra careful */
527 bool need_suid_safe = false;
528 bool core_dumped = false;
529 static atomic_t core_dump_count = ATOMIC_INIT(0);
530 struct coredump_params cprm = {
532 .limit = rlimit(RLIMIT_CORE),
534 * We must use the same mm->flags while dumping core to avoid
535 * inconsistency of bit flags, since this flag is not protected
538 .mm_flags = mm->flags,
540 .cpu = raw_smp_processor_id(),
543 audit_core_dumps(siginfo->si_signo);
546 if (!binfmt || !binfmt->core_dump)
548 if (!__get_dumpable(cprm.mm_flags))
551 cred = prepare_creds();
555 * We cannot trust fsuid as being the "true" uid of the process
556 * nor do we know its entire history. We only know it was tainted
557 * so we dump it as root in mode 2, and only into a controlled
558 * environment (pipe handler or fully qualified path).
560 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
561 /* Setuid core dump mode */
562 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
563 need_suid_safe = true;
566 retval = coredump_wait(siginfo->si_signo, &core_state);
570 old_cred = override_creds(cred);
572 ispipe = format_corename(&cn, &cprm, &argv, &argc);
578 struct subprocess_info *sub_info;
581 printk(KERN_WARNING "format_corename failed\n");
582 printk(KERN_WARNING "Aborting core\n");
586 if (cprm.limit == 1) {
587 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
589 * Normally core limits are irrelevant to pipes, since
590 * we're not writing to the file system, but we use
591 * cprm.limit of 1 here as a special value, this is a
592 * consistent way to catch recursive crashes.
593 * We can still crash if the core_pattern binary sets
594 * RLIM_CORE = !1, but it runs as root, and can do
595 * lots of stupid things.
597 * Note that we use task_tgid_vnr here to grab the pid
598 * of the process group leader. That way we get the
599 * right pid if a thread in a multi-threaded
600 * core_pattern process dies.
603 "Process %d(%s) has RLIMIT_CORE set to 1\n",
604 task_tgid_vnr(current), current->comm);
605 printk(KERN_WARNING "Aborting core\n");
608 cprm.limit = RLIM_INFINITY;
610 dump_count = atomic_inc_return(&core_dump_count);
611 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
612 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
613 task_tgid_vnr(current), current->comm);
614 printk(KERN_WARNING "Skipping core dump\n");
618 helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
621 printk(KERN_WARNING "%s failed to allocate memory\n",
625 for (argi = 0; argi < argc; argi++)
626 helper_argv[argi] = cn.corename + argv[argi];
627 helper_argv[argi] = NULL;
630 sub_info = call_usermodehelper_setup(helper_argv[0],
631 helper_argv, NULL, GFP_KERNEL,
632 umh_pipe_setup, NULL, &cprm);
634 retval = call_usermodehelper_exec(sub_info,
639 printk(KERN_INFO "Core dump to |%s pipe failed\n",
644 struct user_namespace *mnt_userns;
646 int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
647 O_LARGEFILE | O_EXCL;
649 if (cprm.limit < binfmt->min_coredump)
652 if (need_suid_safe && cn.corename[0] != '/') {
653 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
654 "to fully qualified path!\n",
655 task_tgid_vnr(current), current->comm);
656 printk(KERN_WARNING "Skipping core dump\n");
661 * Unlink the file if it exists unless this is a SUID
662 * binary - in that case, we're running around with root
663 * privs and don't want to unlink another user's coredump.
665 if (!need_suid_safe) {
667 * If it doesn't exist, that's fine. If there's some
668 * other problem, we'll catch it at the filp_open().
670 do_unlinkat(AT_FDCWD, getname_kernel(cn.corename));
674 * There is a race between unlinking and creating the
675 * file, but if that causes an EEXIST here, that's
676 * fine - another process raced with us while creating
677 * the corefile, and the other process won. To userspace,
678 * what matters is that at least one of the two processes
679 * writes its coredump successfully, not which one.
681 if (need_suid_safe) {
683 * Using user namespaces, normal user tasks can change
684 * their current->fs->root to point to arbitrary
685 * directories. Since the intention of the "only dump
686 * with a fully qualified path" rule is to control where
687 * coredumps may be placed using root privileges,
688 * current->fs->root must not be used. Instead, use the
689 * root directory of init_task.
693 task_lock(&init_task);
694 get_fs_root(init_task.fs, &root);
695 task_unlock(&init_task);
696 cprm.file = file_open_root(&root, cn.corename,
700 cprm.file = filp_open(cn.corename, open_flags, 0600);
702 if (IS_ERR(cprm.file))
705 inode = file_inode(cprm.file);
706 if (inode->i_nlink > 1)
708 if (d_unhashed(cprm.file->f_path.dentry))
711 * AK: actually i see no reason to not allow this for named
712 * pipes etc, but keep the previous behaviour for now.
714 if (!S_ISREG(inode->i_mode))
717 * Don't dump core if the filesystem changed owner or mode
718 * of the file during file creation. This is an issue when
719 * a process dumps core while its cwd is e.g. on a vfat
722 mnt_userns = file_mnt_user_ns(cprm.file);
723 if (!uid_eq(i_uid_into_mnt(mnt_userns, inode),
725 pr_info_ratelimited("Core dump to %s aborted: cannot preserve file owner\n",
729 if ((inode->i_mode & 0677) != 0600) {
730 pr_info_ratelimited("Core dump to %s aborted: cannot preserve file permissions\n",
734 if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
736 if (do_truncate(mnt_userns, cprm.file->f_path.dentry,
741 /* get us an unshared descriptor table; almost always a no-op */
742 /* The cell spufs coredump code reads the file descriptor tables */
743 retval = unshare_files();
746 if (!dump_interrupted()) {
748 * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
749 * have this set to NULL.
752 pr_info("Core dump to |%s disabled\n", cn.corename);
755 if (!dump_vma_snapshot(&cprm))
758 file_start_write(cprm.file);
759 core_dumped = binfmt->core_dump(&cprm);
761 * Ensures that file size is big enough to contain the current
762 * file postion. This prevents gdb from complaining about
763 * a truncated file if the last "write" to the file was
768 dump_emit(&cprm, "", 1);
770 file_end_write(cprm.file);
771 free_vma_snapshot(&cprm);
773 if (ispipe && core_pipe_limit)
774 wait_for_dump_helpers(cprm.file);
777 filp_close(cprm.file, NULL);
780 atomic_dec(&core_dump_count);
784 coredump_finish(core_dumped);
785 revert_creds(old_cred);
793 * Core dumping helper functions. These are the only things you should
794 * do on a core-file: use only these functions to write out all the
797 static int __dump_emit(struct coredump_params *cprm, const void *addr, int nr)
799 struct file *file = cprm->file;
800 loff_t pos = file->f_pos;
802 if (cprm->written + nr > cprm->limit)
806 if (dump_interrupted())
808 n = __kernel_write(file, addr, nr, &pos);
818 static int __dump_skip(struct coredump_params *cprm, size_t nr)
820 static char zeroes[PAGE_SIZE];
821 struct file *file = cprm->file;
822 if (file->f_mode & FMODE_LSEEK) {
823 if (dump_interrupted() ||
824 vfs_llseek(file, nr, SEEK_CUR) < 0)
829 while (nr > PAGE_SIZE) {
830 if (!__dump_emit(cprm, zeroes, PAGE_SIZE))
834 return __dump_emit(cprm, zeroes, nr);
838 static int dump_emit_page(struct coredump_params *cprm, struct page *page)
840 struct bio_vec bvec = {
845 struct iov_iter iter;
846 struct file *file = cprm->file;
851 if (!__dump_skip(cprm, cprm->to_skip))
855 if (cprm->written + PAGE_SIZE > cprm->limit)
857 if (dump_interrupted())
860 iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
861 n = __kernel_write_iter(cprm->file, &iter, &pos);
865 cprm->written += PAGE_SIZE;
866 cprm->pos += PAGE_SIZE;
871 int dump_emit(struct coredump_params *cprm, const void *addr, int nr)
874 if (!__dump_skip(cprm, cprm->to_skip))
878 return __dump_emit(cprm, addr, nr);
880 EXPORT_SYMBOL(dump_emit);
882 void dump_skip_to(struct coredump_params *cprm, unsigned long pos)
884 cprm->to_skip = pos - cprm->pos;
886 EXPORT_SYMBOL(dump_skip_to);
888 void dump_skip(struct coredump_params *cprm, size_t nr)
892 EXPORT_SYMBOL(dump_skip);
894 #ifdef CONFIG_ELF_CORE
895 int dump_user_range(struct coredump_params *cprm, unsigned long start,
900 for (addr = start; addr < start + len; addr += PAGE_SIZE) {
904 * To avoid having to allocate page tables for virtual address
905 * ranges that have never been used yet, and also to make it
906 * easy to generate sparse core files, use a helper that returns
907 * NULL when encountering an empty page table entry that would
908 * otherwise have been filled with the zero page.
910 page = get_dump_page(addr);
912 int stop = !dump_emit_page(cprm, page);
917 dump_skip(cprm, PAGE_SIZE);
924 int dump_align(struct coredump_params *cprm, int align)
926 unsigned mod = (cprm->pos + cprm->to_skip) & (align - 1);
927 if (align & (align - 1))
930 cprm->to_skip += align - mod;
933 EXPORT_SYMBOL(dump_align);
937 void validate_coredump_safety(void)
939 if (suid_dumpable == SUID_DUMP_ROOT &&
940 core_pattern[0] != '/' && core_pattern[0] != '|') {
942 "Unsafe core_pattern used with fs.suid_dumpable=2.\n"
943 "Pipe handler or fully qualified core dump path required.\n"
944 "Set kernel.core_pattern before fs.suid_dumpable.\n"
949 static int proc_dostring_coredump(struct ctl_table *table, int write,
950 void *buffer, size_t *lenp, loff_t *ppos)
952 int error = proc_dostring(table, write, buffer, lenp, ppos);
955 validate_coredump_safety();
959 static struct ctl_table coredump_sysctls[] = {
961 .procname = "core_uses_pid",
962 .data = &core_uses_pid,
963 .maxlen = sizeof(int),
965 .proc_handler = proc_dointvec,
968 .procname = "core_pattern",
969 .data = core_pattern,
970 .maxlen = CORENAME_MAX_SIZE,
972 .proc_handler = proc_dostring_coredump,
975 .procname = "core_pipe_limit",
976 .data = &core_pipe_limit,
977 .maxlen = sizeof(unsigned int),
979 .proc_handler = proc_dointvec,
984 static int __init init_fs_coredump_sysctls(void)
986 register_sysctl_init("kernel", coredump_sysctls);
989 fs_initcall(init_fs_coredump_sysctls);
990 #endif /* CONFIG_SYSCTL */
993 * The purpose of always_dump_vma() is to make sure that special kernel mappings
994 * that are useful for post-mortem analysis are included in every core dump.
995 * In that way we ensure that the core dump is fully interpretable later
996 * without matching up the same kernel and hardware config to see what PC values
997 * meant. These special mappings include - vDSO, vsyscall, and other
998 * architecture specific mappings
1000 static bool always_dump_vma(struct vm_area_struct *vma)
1002 /* Any vsyscall mappings? */
1003 if (vma == get_gate_vma(vma->vm_mm))
1007 * Assume that all vmas with a .name op should always be dumped.
1008 * If this changes, a new vm_ops field can easily be added.
1010 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1014 * arch_vma_name() returns non-NULL for special architecture mappings,
1015 * such as vDSO sections.
1017 if (arch_vma_name(vma))
1023 #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1
1026 * Decide how much of @vma's contents should be included in a core dump.
1028 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1029 unsigned long mm_flags)
1031 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1033 /* always dump the vdso and vsyscall sections */
1034 if (always_dump_vma(vma))
1037 if (vma->vm_flags & VM_DONTDUMP)
1040 /* support for DAX */
1041 if (vma_is_dax(vma)) {
1042 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1044 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1049 /* Hugetlb memory check */
1050 if (is_vm_hugetlb_page(vma)) {
1051 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1053 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1058 /* Do not dump I/O mapped devices or special mappings */
1059 if (vma->vm_flags & VM_IO)
1062 /* By default, dump shared memory if mapped from an anonymous file. */
1063 if (vma->vm_flags & VM_SHARED) {
1064 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1065 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1070 /* Dump segments that have been written to. */
1071 if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE))
1073 if (vma->vm_file == NULL)
1076 if (FILTER(MAPPED_PRIVATE))
1080 * If this is the beginning of an executable file mapping,
1081 * dump the first page to aid in determining what was mapped here.
1083 if (FILTER(ELF_HEADERS) &&
1084 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1085 if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0)
1089 * ELF libraries aren't always executable.
1090 * We'll want to check whether the mapping starts with the ELF
1091 * magic, but not now - we're holding the mmap lock,
1092 * so copy_from_user() doesn't work here.
1093 * Use a placeholder instead, and fix it up later in
1094 * dump_vma_snapshot().
1096 return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER;
1104 return vma->vm_end - vma->vm_start;
1108 * Helper function for iterating across a vma list. It ensures that the caller
1109 * will visit `gate_vma' prior to terminating the search.
1111 static struct vm_area_struct *coredump_next_vma(struct ma_state *mas,
1112 struct vm_area_struct *vma,
1113 struct vm_area_struct *gate_vma)
1115 if (gate_vma && (vma == gate_vma))
1118 vma = mas_next(mas, ULONG_MAX);
1124 static void free_vma_snapshot(struct coredump_params *cprm)
1126 if (cprm->vma_meta) {
1128 for (i = 0; i < cprm->vma_count; i++) {
1129 struct file *file = cprm->vma_meta[i].file;
1133 kvfree(cprm->vma_meta);
1134 cprm->vma_meta = NULL;
1139 * Under the mmap_lock, take a snapshot of relevant information about the task's
1142 static bool dump_vma_snapshot(struct coredump_params *cprm)
1144 struct vm_area_struct *gate_vma, *vma = NULL;
1145 struct mm_struct *mm = current->mm;
1146 MA_STATE(mas, &mm->mm_mt, 0, 0);
1150 * Once the stack expansion code is fixed to not change VMA bounds
1151 * under mmap_lock in read mode, this can be changed to take the
1152 * mmap_lock in read mode.
1154 if (mmap_write_lock_killable(mm))
1157 cprm->vma_data_size = 0;
1158 gate_vma = get_gate_vma(mm);
1159 cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0);
1161 cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL);
1162 if (!cprm->vma_meta) {
1163 mmap_write_unlock(mm);
1167 while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) {
1168 struct core_vma_metadata *m = cprm->vma_meta + i;
1170 m->start = vma->vm_start;
1171 m->end = vma->vm_end;
1172 m->flags = vma->vm_flags;
1173 m->dump_size = vma_dump_size(vma, cprm->mm_flags);
1174 m->pgoff = vma->vm_pgoff;
1175 m->file = vma->vm_file;
1181 mmap_write_unlock(mm);
1183 for (i = 0; i < cprm->vma_count; i++) {
1184 struct core_vma_metadata *m = cprm->vma_meta + i;
1186 if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) {
1187 char elfmag[SELFMAG];
1189 if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) ||
1190 memcmp(elfmag, ELFMAG, SELFMAG) != 0) {
1193 m->dump_size = PAGE_SIZE;
1197 cprm->vma_data_size += m->dump_size;