1 #include <linux/slab.h>
2 #include <linux/file.h>
3 #include <linux/fdtable.h>
5 #include <linux/stat.h>
6 #include <linux/fcntl.h>
7 #include <linux/swap.h>
8 #include <linux/string.h>
9 #include <linux/init.h>
10 #include <linux/pagemap.h>
11 #include <linux/perf_event.h>
12 #include <linux/highmem.h>
13 #include <linux/spinlock.h>
14 #include <linux/key.h>
15 #include <linux/personality.h>
16 #include <linux/binfmts.h>
17 #include <linux/coredump.h>
18 #include <linux/utsname.h>
19 #include <linux/pid_namespace.h>
20 #include <linux/module.h>
21 #include <linux/namei.h>
22 #include <linux/mount.h>
23 #include <linux/security.h>
24 #include <linux/syscalls.h>
25 #include <linux/tsacct_kern.h>
26 #include <linux/cn_proc.h>
27 #include <linux/audit.h>
28 #include <linux/tracehook.h>
29 #include <linux/kmod.h>
30 #include <linux/fsnotify.h>
31 #include <linux/fs_struct.h>
32 #include <linux/pipe_fs_i.h>
33 #include <linux/oom.h>
34 #include <linux/compat.h>
36 #include <asm/uaccess.h>
37 #include <asm/mmu_context.h>
41 #include <trace/events/task.h>
45 #include <trace/events/sched.h>
48 char core_pattern[CORENAME_MAX_SIZE] = "core";
49 unsigned int core_pipe_limit;
55 static atomic_t call_count = ATOMIC_INIT(1);
57 /* The maximal length of core_pattern is also specified in sysctl.c */
59 static int expand_corename(struct core_name *cn)
61 char *old_corename = cn->corename;
63 cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
64 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
74 static int cn_printf(struct core_name *cn, const char *fmt, ...)
82 need = vsnprintf(NULL, 0, fmt, arg);
85 if (likely(need < cn->size - cn->used - 1))
88 ret = expand_corename(cn);
93 cur = cn->corename + cn->used;
95 vsnprintf(cur, need + 1, fmt, arg);
104 static void cn_escape(char *str)
111 static int cn_print_exe_file(struct core_name *cn)
113 struct file *exe_file;
114 char *pathbuf, *path;
117 exe_file = get_mm_exe_file(current->mm);
119 char *commstart = cn->corename + cn->used;
120 ret = cn_printf(cn, "%s (path unknown)", current->comm);
121 cn_escape(commstart);
125 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
131 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
139 ret = cn_printf(cn, "%s", path);
148 /* format_corename will inspect the pattern parameter, and output a
149 * name into corename, which must have space for at least
150 * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
152 static int format_corename(struct core_name *cn, struct coredump_params *cprm)
154 const struct cred *cred = current_cred();
155 const char *pat_ptr = core_pattern;
156 int ispipe = (*pat_ptr == '|');
157 int pid_in_pattern = 0;
160 cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
167 /* Repeat as long as we have more pattern to process and more output
170 if (*pat_ptr != '%') {
173 err = cn_printf(cn, "%c", *pat_ptr++);
175 switch (*++pat_ptr) {
176 /* single % at the end, drop that */
179 /* Double percent, output one percent */
181 err = cn_printf(cn, "%c", '%');
186 err = cn_printf(cn, "%d",
187 task_tgid_vnr(current));
191 err = cn_printf(cn, "%d", cred->uid);
195 err = cn_printf(cn, "%d", cred->gid);
198 err = cn_printf(cn, "%d",
199 __get_dumpable(cprm->mm_flags));
201 /* signal that caused the coredump */
203 err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
205 /* UNIX time of coredump */
208 do_gettimeofday(&tv);
209 err = cn_printf(cn, "%lu", tv.tv_sec);
214 char *namestart = cn->corename + cn->used;
216 err = cn_printf(cn, "%s",
217 utsname()->nodename);
219 cn_escape(namestart);
224 char *commstart = cn->corename + cn->used;
225 err = cn_printf(cn, "%s", current->comm);
226 cn_escape(commstart);
230 err = cn_print_exe_file(cn);
232 /* core limit size */
234 err = cn_printf(cn, "%lu",
235 rlimit(RLIMIT_CORE));
247 /* Backward compatibility with core_uses_pid:
249 * If core_pattern does not include a %p (as is the default)
250 * and core_uses_pid is set, then .%pid will be appended to
251 * the filename. Do not do this for piped commands. */
252 if (!ispipe && !pid_in_pattern && core_uses_pid) {
253 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
261 static int zap_process(struct task_struct *start, int exit_code)
263 struct task_struct *t;
266 start->signal->flags = SIGNAL_GROUP_EXIT;
267 start->signal->group_exit_code = exit_code;
268 start->signal->group_stop_count = 0;
272 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
273 if (t != current && t->mm) {
274 sigaddset(&t->pending.signal, SIGKILL);
275 signal_wake_up(t, 1);
278 } while_each_thread(start, t);
283 static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
284 struct core_state *core_state, int exit_code)
286 struct task_struct *g, *p;
290 spin_lock_irq(&tsk->sighand->siglock);
291 if (!signal_group_exit(tsk->signal)) {
292 mm->core_state = core_state;
293 nr = zap_process(tsk, exit_code);
294 /* ignore all signals except SIGKILL, see prepare_signal() */
295 tsk->signal->flags |= SIGNAL_GROUP_COREDUMP;
296 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
298 spin_unlock_irq(&tsk->sighand->siglock);
299 if (unlikely(nr < 0))
302 if (atomic_read(&mm->mm_users) == nr + 1)
305 * We should find and kill all tasks which use this mm, and we should
306 * count them correctly into ->nr_threads. We don't take tasklist
307 * lock, but this is safe wrt:
310 * None of sub-threads can fork after zap_process(leader). All
311 * processes which were created before this point should be
312 * visible to zap_threads() because copy_process() adds the new
313 * process to the tail of init_task.tasks list, and lock/unlock
314 * of ->siglock provides a memory barrier.
317 * The caller holds mm->mmap_sem. This means that the task which
318 * uses this mm can't pass exit_mm(), so it can't exit or clear
322 * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
323 * we must see either old or new leader, this does not matter.
324 * However, it can change p->sighand, so lock_task_sighand(p)
325 * must be used. Since p->mm != NULL and we hold ->mmap_sem
328 * Note also that "g" can be the old leader with ->mm == NULL
329 * and already unhashed and thus removed from ->thread_group.
330 * This is OK, __unhash_process()->list_del_rcu() does not
331 * clear the ->next pointer, we will find the new leader via
335 for_each_process(g) {
336 if (g == tsk->group_leader)
338 if (g->flags & PF_KTHREAD)
343 if (unlikely(p->mm == mm)) {
344 lock_task_sighand(p, &flags);
345 nr += zap_process(p, exit_code);
346 unlock_task_sighand(p, &flags);
350 } while_each_thread(g, p);
354 atomic_set(&core_state->nr_threads, nr);
358 static int coredump_wait(int exit_code, struct core_state *core_state)
360 struct task_struct *tsk = current;
361 struct mm_struct *mm = tsk->mm;
362 int core_waiters = -EBUSY;
364 init_completion(&core_state->startup);
365 core_state->dumper.task = tsk;
366 core_state->dumper.next = NULL;
368 down_write(&mm->mmap_sem);
370 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
371 up_write(&mm->mmap_sem);
373 if (core_waiters > 0) {
374 struct core_thread *ptr;
376 wait_for_completion(&core_state->startup);
378 * Wait for all the threads to become inactive, so that
379 * all the thread context (extended register state, like
380 * fpu etc) gets copied to the memory.
382 ptr = core_state->dumper.next;
383 while (ptr != NULL) {
384 wait_task_inactive(ptr->task, 0);
392 static void coredump_finish(struct mm_struct *mm)
394 struct core_thread *curr, *next;
395 struct task_struct *task;
397 next = mm->core_state->dumper.next;
398 while ((curr = next) != NULL) {
402 * see exit_mm(), curr->task must not see
403 * ->task == NULL before we read ->next.
407 wake_up_process(task);
410 mm->core_state = NULL;
413 static void wait_for_dump_helpers(struct file *file)
415 struct pipe_inode_info *pipe;
417 pipe = file_inode(file)->i_pipe;
423 while ((pipe->readers > 1) && (!signal_pending(current))) {
424 wake_up_interruptible_sync(&pipe->wait);
425 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
437 * helper function to customize the process used
438 * to collect the core in userspace. Specifically
439 * it sets up a pipe and installs it as fd 0 (stdin)
440 * for the process. Returns 0 on success, or
441 * PTR_ERR on failure.
442 * Note that it also sets the core limit to 1. This
443 * is a special value that we use to trap recursive
446 static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
448 struct file *files[2];
449 struct coredump_params *cp = (struct coredump_params *)info->data;
450 int err = create_pipe_files(files, 0);
456 err = replace_fd(0, files[0], 0);
458 /* and disallow core files too */
459 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
464 void do_coredump(siginfo_t *siginfo)
466 struct core_state core_state;
468 struct mm_struct *mm = current->mm;
469 struct linux_binfmt * binfmt;
470 const struct cred *old_cred;
475 struct files_struct *displaced;
476 bool need_nonrelative = false;
477 static atomic_t core_dump_count = ATOMIC_INIT(0);
478 struct coredump_params cprm = {
480 .regs = signal_pt_regs(),
481 .limit = rlimit(RLIMIT_CORE),
483 * We must use the same mm->flags while dumping core to avoid
484 * inconsistency of bit flags, since this flag is not protected
487 .mm_flags = mm->flags,
490 audit_core_dumps(siginfo->si_signo);
493 if (!binfmt || !binfmt->core_dump)
495 if (!__get_dumpable(cprm.mm_flags))
498 cred = prepare_creds();
502 * We cannot trust fsuid as being the "true" uid of the process
503 * nor do we know its entire history. We only know it was tainted
504 * so we dump it as root in mode 2, and only into a controlled
505 * environment (pipe handler or fully qualified path).
507 if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
508 /* Setuid core dump mode */
509 flag = O_EXCL; /* Stop rewrite attacks */
510 cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
511 need_nonrelative = true;
514 retval = coredump_wait(siginfo->si_signo, &core_state);
518 old_cred = override_creds(cred);
520 ispipe = format_corename(&cn, &cprm);
525 struct subprocess_info *sub_info;
528 printk(KERN_WARNING "format_corename failed\n");
529 printk(KERN_WARNING "Aborting core\n");
533 if (cprm.limit == 1) {
534 /* See umh_pipe_setup() which sets RLIMIT_CORE = 1.
536 * Normally core limits are irrelevant to pipes, since
537 * we're not writing to the file system, but we use
538 * cprm.limit of 1 here as a speacial value, this is a
539 * consistent way to catch recursive crashes.
540 * We can still crash if the core_pattern binary sets
541 * RLIM_CORE = !1, but it runs as root, and can do
542 * lots of stupid things.
544 * Note that we use task_tgid_vnr here to grab the pid
545 * of the process group leader. That way we get the
546 * right pid if a thread in a multi-threaded
547 * core_pattern process dies.
550 "Process %d(%s) has RLIMIT_CORE set to 1\n",
551 task_tgid_vnr(current), current->comm);
552 printk(KERN_WARNING "Aborting core\n");
555 cprm.limit = RLIM_INFINITY;
557 dump_count = atomic_inc_return(&core_dump_count);
558 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
559 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
560 task_tgid_vnr(current), current->comm);
561 printk(KERN_WARNING "Skipping core dump\n");
565 helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
567 printk(KERN_WARNING "%s failed to allocate memory\n",
573 sub_info = call_usermodehelper_setup(helper_argv[0],
574 helper_argv, NULL, GFP_KERNEL,
575 umh_pipe_setup, NULL, &cprm);
577 retval = call_usermodehelper_exec(sub_info,
580 argv_free(helper_argv);
582 printk(KERN_INFO "Core dump to %s pipe failed\n",
589 if (cprm.limit < binfmt->min_coredump)
592 if (need_nonrelative && cn.corename[0] != '/') {
593 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
594 "to fully qualified path!\n",
595 task_tgid_vnr(current), current->comm);
596 printk(KERN_WARNING "Skipping core dump\n");
600 cprm.file = filp_open(cn.corename,
601 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
603 if (IS_ERR(cprm.file))
606 inode = file_inode(cprm.file);
607 if (inode->i_nlink > 1)
609 if (d_unhashed(cprm.file->f_path.dentry))
612 * AK: actually i see no reason to not allow this for named
613 * pipes etc, but keep the previous behaviour for now.
615 if (!S_ISREG(inode->i_mode))
618 * Dont allow local users get cute and trick others to coredump
619 * into their pre-created files.
621 if (!uid_eq(inode->i_uid, current_fsuid()))
623 if (!cprm.file->f_op || !cprm.file->f_op->write)
625 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
629 /* get us an unshared descriptor table; almost always a no-op */
630 retval = unshare_files(&displaced);
634 put_files_struct(displaced);
635 retval = binfmt->core_dump(&cprm);
637 current->signal->group_exit_code |= 0x80;
639 if (ispipe && core_pipe_limit)
640 wait_for_dump_helpers(cprm.file);
643 filp_close(cprm.file, NULL);
646 atomic_dec(&core_dump_count);
651 revert_creds(old_cred);
659 * Core dumping helper functions. These are the only things you should
660 * do on a core-file: use only these functions to write out all the
663 int dump_write(struct file *file, const void *addr, int nr)
665 return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
667 EXPORT_SYMBOL(dump_write);
669 int dump_seek(struct file *file, loff_t off)
673 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
674 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
677 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
682 unsigned long n = off;
686 if (!dump_write(file, buf, n)) {
692 free_page((unsigned long)buf);
696 EXPORT_SYMBOL(dump_seek);