1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 #include <asm/syscall.h> /* for syscall_get_* */
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
188 EXPORT_SYMBOL(recalc_sigpending);
190 void calculate_sigpending(void)
192 /* Have any signals or users of TIF_SIGPENDING been delayed
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 spin_unlock_irq(¤t->sighand->siglock);
201 /* Given the mask, find the first available signal that should be serviced. */
203 #define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 int next_signal(struct sigpending *pending, sigset_t *mask)
209 unsigned long i, *s, *m, x;
212 s = pending->signal.sig;
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
227 switch (_NSIG_WORDS) {
229 for (i = 1; i < _NSIG_WORDS; ++i) {
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
242 sig = ffz(~x) + _NSIG_BPW + 1;
253 static inline void print_dropped_signal(int sig)
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 if (!print_fatal_signals)
260 if (!__ratelimit(&ratelimit_state))
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
268 * task_set_jobctl_pending - set jobctl pending bits
270 * @mask: pending bits to set
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
279 * Must be called with @task->sighand->siglock held.
282 * %true if @mask is set, %false if made noop because @task was dying.
284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 task->jobctl |= mask;
301 * task_clear_jobctl_trapping - clear jobctl trapping bit
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
310 * Must be called with @task->sighand->siglock held.
312 void task_clear_jobctl_trapping(struct task_struct *task)
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb(); /* advised by wake_up_bit() */
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
322 * task_clear_jobctl_pending - clear jobctl pending bits
324 * @mask: pending bits to clear
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
334 * Must be called with @task->sighand->siglock held.
336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 task->jobctl &= ~mask;
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354 * Group stop states are cleared and the group stop count is consumed if
355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356 * stop, the appropriate `SIGNAL_*` flags are set.
359 * Must be called with @task->sighand->siglock held.
362 * %true if group stop completion should be notified to the parent, %false
365 static bool task_participate_group_stop(struct task_struct *task)
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
391 void task_join_group_stop(struct task_struct *task)
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
402 /* Have the new thread join an on-going signal group stop */
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
437 print_dropped_signal(sig);
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 kmem_cache_free(sigqueue_cachep, q);
461 void flush_sigqueue(struct sigpending *queue)
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
474 * Flush all pending signals for this kthread.
476 void flush_signals(struct task_struct *t)
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 EXPORT_SYMBOL(flush_signals);
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
494 signal = pending->signal;
495 sigemptyset(&retain);
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
509 sigorsets(&pending->signal, &signal, &retain);
512 void flush_itimer_signals(void)
514 struct task_struct *tsk = current;
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 void ignore_signals(struct task_struct *t)
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
535 * Flush all handlers for a task.
539 flush_signal_handlers(struct task_struct *t, int force_default)
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
550 sigemptyset(&ka->sa.sa_mask);
555 bool unhandled_signal(struct task_struct *tsk, int sig)
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
561 if (handler != SIG_IGN && handler != SIG_DFL)
564 /* if ptraced, let the tracer determine */
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
571 struct sigqueue *q, *first = NULL;
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
585 sigdelset(&list->signal, sig);
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
597 __sigqueue_free(first);
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
605 info->si_signo = sig;
607 info->si_code = SI_USER;
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
616 int sig = next_signal(pending, mask);
619 collect_signal(sig, pending, info, resched_timer);
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
627 * All callers have to hold the siglock.
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
631 bool resched_timer = false;
634 /* We only dequeue private signals from ourselves, we don't let
635 * signalfd steal them
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
672 if (unlikely(sig_kernel_stop(signr))) {
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
687 #ifdef CONFIG_POSIX_TIMERS
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
699 /* Don't expose the si_sys_private value to userspace */
700 info->si_sys_private = 0;
705 EXPORT_SYMBOL_GPL(dequeue_signal);
707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
714 * Might a synchronous signal be in the queue?
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 * Return the first synchronous signal in the queue.
722 list_for_each_entry(q, &pending->list, list) {
723 /* Synchronous signals have a positive si_code */
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
733 * Check if there is another siginfo for the same signal.
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
740 sigdelset(&pending->signal, sync->info.si_signo);
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
750 * Tell a process that it has a new active signal..
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
764 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 * case. We don't check t->state here because there is a race with it
766 * executing another processor and just now entering stopped state.
767 * By using wake_up_state, we ensure the process will wake up and
768 * handle its death signal.
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
775 * Remove signals in mask from the pending set and queue.
776 * Returns 1 if any signals were found.
778 * All callers must be holding the siglock.
780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
782 struct sigqueue *q, *n;
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
798 static inline int is_si_special(const struct kernel_siginfo *info)
800 return info <= SEND_SIG_PRIV;
803 static inline bool si_fromuser(const struct kernel_siginfo *info)
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
810 * called with RCU read lock from check_kill_permission()
812 static bool kill_ok_by_cred(struct task_struct *t)
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
825 * Bad permissions for sending the signal
826 * - the caller must hold the RCU read lock
828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
834 if (!valid_signal(sig))
837 if (!si_fromuser(info))
840 error = audit_signal_info(sig, t); /* Let audit system see the signal */
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
848 sid = task_session(t);
850 * We don't return the error if sid == NULL. The
851 * task was unhashed, the caller must notice this.
853 if (!sid || sid == task_session(current))
861 return security_task_kill(t, info, sig, NULL);
865 * ptrace_trap_notify - schedule trap to notify ptracer
866 * @t: tracee wanting to notify tracer
868 * This function schedules sticky ptrace trap which is cleared on the next
869 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
872 * If @t is running, STOP trap will be taken. If trapped for STOP and
873 * ptracer is listening for events, tracee is woken up so that it can
874 * re-trap for the new event. If trapped otherwise, STOP trap will be
875 * eventually taken without returning to userland after the existing traps
876 * are finished by PTRACE_CONT.
879 * Must be called with @task->sighand->siglock held.
881 static void ptrace_trap_notify(struct task_struct *t)
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
891 * Handle magic process-wide effects of stop/continue signals. Unlike
892 * the signal actions, these happen immediately at signal-generation
893 * time regardless of blocking, ignoring, or handling. This does the
894 * actual continuing for SIGCONT, but not the actual stopping for stop
895 * signals. The process stop is done as a signal action for SIG_DFL.
897 * Returns true if the signal should be actually delivered, otherwise
898 * it should be dropped.
900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
910 * The process is in the middle of dying, nothing to do.
912 } else if (sig_kernel_stop(sig)) {
914 * This is a stop signal. Remove SIGCONT from all queues.
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
923 * Remove all stop signals from all queues, wake all threads.
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
933 ptrace_trap_notify(t);
937 * Notify the parent with CLD_CONTINUED if we were stopped.
939 * If we were in the middle of a group stop, we pretend it
940 * was already finished, and then continued. Since SIGCHLD
941 * doesn't queue we report only CLD_STOPPED, as if the next
942 * CLD_CONTINUED was dropped.
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
952 * The first thread which returns from do_signal_stop()
953 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 * notify its parent. See get_signal().
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
962 return !sig_ignored(p, sig, force);
966 * Test if P wants to take SIG. After we've checked all threads with this,
967 * it's equivalent to finding no threads not blocking SIG. Any threads not
968 * blocking SIG were ruled out because they are not running and already
969 * have pending signals. Such threads will dequeue from the shared queue
970 * as soon as they're available, so putting the signal on the shared queue
971 * will be equivalent to sending it to one such thread.
973 static inline bool wants_signal(int sig, struct task_struct *p)
975 if (sigismember(&p->blocked, sig))
978 if (p->flags & PF_EXITING)
984 if (task_is_stopped_or_traced(p))
987 return task_curr(p) || !task_sigpending(p);
990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
996 * Now find a thread we can wake up to take the signal off the queue.
998 * If the main thread wants the signal, it gets first crack.
999 * Probably the least surprising to the average bear.
1001 if (wants_signal(sig, p))
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1005 * There is just one thread and it does not need to be woken.
1006 * It will dequeue unblocked signals before it runs again.
1011 * Otherwise try to find a suitable thread.
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1016 if (t == signal->curr_target)
1018 * No thread needs to be woken.
1019 * Any eligible threads will see
1020 * the signal in the queue soon.
1024 signal->curr_target = t;
1028 * Found a killable thread. If the signal will be fatal,
1029 * then start taking the whole group down immediately.
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1036 * This signal will be fatal to the whole group.
1038 if (!sig_kernel_coredump(sig)) {
1040 * Start a group exit and wake everybody up.
1041 * This way we don't have other threads
1042 * running and doing things after a slower
1043 * thread has the fatal signal pending.
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1059 * The signal is already in the shared-pending queue.
1060 * Tell the chosen thread to wake up and dequeue it.
1062 signal_wake_up(t, sig == SIGKILL);
1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1074 struct sigpending *pending;
1076 int override_rlimit;
1077 int ret = 0, result;
1079 assert_spin_locked(&t->sighand->siglock);
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1087 * Short-circuit ignored signals and support queuing
1088 * exactly one non-rt signal, so that we can get more
1089 * detailed information about the cause of the signal.
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1095 result = TRACE_SIGNAL_DELIVERED;
1097 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1103 * Real-time signals must be queued if sent by sigqueue, or
1104 * some other real-time mechanism. It is implementation
1105 * defined whether kill() does so. We attempt to do so, on
1106 * the principle of least surprise, but since kill is not
1107 * allowed to fail with EAGAIN when low on memory we just
1108 * make sure at least one signal gets delivered and don't
1109 * pass on the info struct.
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1114 override_rlimit = 0;
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119 list_add_tail(&q->list, &pending->list);
1120 switch ((unsigned long) info) {
1121 case (unsigned long) SEND_SIG_NOINFO:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_USER;
1126 q->info.si_pid = task_tgid_nr_ns(current,
1127 task_active_pid_ns(t));
1130 from_kuid_munged(task_cred_xxx(t, user_ns),
1134 case (unsigned long) SEND_SIG_PRIV:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_KERNEL;
1143 copy_siginfo(&q->info, info);
1146 } else if (!is_si_special(info) &&
1147 sig >= SIGRTMIN && info->si_code != SI_USER) {
1149 * Queue overflow, abort. We may abort if the
1150 * signal was rt and sent by user using something
1151 * other than kill().
1153 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1158 * This is a silent loss of information. We still
1159 * send the signal, but the *info bits are lost.
1161 result = TRACE_SIGNAL_LOSE_INFO;
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1168 /* Let multiprocess signals appear after on-going forks */
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173 /* Can't queue both a stop and a continue signal */
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1182 complete_signal(sig, t, type);
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1188 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191 switch (siginfo_layout(info->si_signo, info->si_code)) {
1200 case SIL_FAULT_TRAPNO:
1201 case SIL_FAULT_MCEERR:
1202 case SIL_FAULT_BNDERR:
1203 case SIL_FAULT_PKUERR:
1204 case SIL_FAULT_PERF_EVENT:
1212 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1215 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218 if (info == SEND_SIG_NOINFO) {
1219 /* Force if sent from an ancestor pid namespace */
1220 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221 } else if (info == SEND_SIG_PRIV) {
1222 /* Don't ignore kernel generated signals */
1224 } else if (has_si_pid_and_uid(info)) {
1225 /* SIGKILL and SIGSTOP is special or has ids */
1226 struct user_namespace *t_user_ns;
1229 t_user_ns = task_cred_xxx(t, user_ns);
1230 if (current_user_ns() != t_user_ns) {
1231 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232 info->si_uid = from_kuid_munged(t_user_ns, uid);
1236 /* A kernel generated signal? */
1237 force = (info->si_code == SI_KERNEL);
1239 /* From an ancestor pid namespace? */
1240 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1245 return __send_signal(sig, info, t, type, force);
1248 static void print_fatal_signal(int signr)
1250 struct pt_regs *regs = signal_pt_regs();
1251 pr_info("potentially unexpected fatal signal %d.\n", signr);
1253 #if defined(__i386__) && !defined(__arch_um__)
1254 pr_info("code at %08lx: ", regs->ip);
1257 for (i = 0; i < 16; i++) {
1260 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1262 pr_cont("%02x ", insn);
1272 static int __init setup_print_fatal_signals(char *str)
1274 get_option (&str, &print_fatal_signals);
1279 __setup("print-fatal-signals=", setup_print_fatal_signals);
1282 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1284 return send_signal(sig, info, p, PIDTYPE_TGID);
1287 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290 unsigned long flags;
1293 if (lock_task_sighand(p, &flags)) {
1294 ret = send_signal(sig, info, p, type);
1295 unlock_task_sighand(p, &flags);
1302 HANDLER_CURRENT, /* If reachable use the current handler */
1303 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1304 HANDLER_EXIT, /* Only visible as the process exit code */
1308 * Force a signal that the process can't ignore: if necessary
1309 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1311 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1312 * since we do not want to have a signal handler that was blocked
1313 * be invoked when user space had explicitly blocked it.
1315 * We don't want to have recursive SIGSEGV's etc, for example,
1316 * that is why we also clear SIGNAL_UNKILLABLE.
1319 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1320 enum sig_handler handler)
1322 unsigned long int flags;
1323 int ret, blocked, ignored;
1324 struct k_sigaction *action;
1325 int sig = info->si_signo;
1327 spin_lock_irqsave(&t->sighand->siglock, flags);
1328 action = &t->sighand->action[sig-1];
1329 ignored = action->sa.sa_handler == SIG_IGN;
1330 blocked = sigismember(&t->blocked, sig);
1331 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1332 action->sa.sa_handler = SIG_DFL;
1333 if (handler == HANDLER_EXIT)
1334 action->sa.sa_flags |= SA_IMMUTABLE;
1336 sigdelset(&t->blocked, sig);
1337 recalc_sigpending_and_wake(t);
1341 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1342 * debugging to leave init killable.
1344 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1345 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1346 ret = send_signal(sig, info, t, PIDTYPE_PID);
1347 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1352 int force_sig_info(struct kernel_siginfo *info)
1354 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1358 * Nuke all other threads in the group.
1360 int zap_other_threads(struct task_struct *p)
1362 struct task_struct *t = p;
1365 p->signal->group_stop_count = 0;
1367 while_each_thread(p, t) {
1368 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1371 /* Don't bother with already dead threads */
1374 sigaddset(&t->pending.signal, SIGKILL);
1375 signal_wake_up(t, 1);
1381 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1382 unsigned long *flags)
1384 struct sighand_struct *sighand;
1388 sighand = rcu_dereference(tsk->sighand);
1389 if (unlikely(sighand == NULL))
1393 * This sighand can be already freed and even reused, but
1394 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1395 * initializes ->siglock: this slab can't go away, it has
1396 * the same object type, ->siglock can't be reinitialized.
1398 * We need to ensure that tsk->sighand is still the same
1399 * after we take the lock, we can race with de_thread() or
1400 * __exit_signal(). In the latter case the next iteration
1401 * must see ->sighand == NULL.
1403 spin_lock_irqsave(&sighand->siglock, *flags);
1404 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1406 spin_unlock_irqrestore(&sighand->siglock, *flags);
1413 #ifdef CONFIG_LOCKDEP
1414 void lockdep_assert_task_sighand_held(struct task_struct *task)
1416 struct sighand_struct *sighand;
1419 sighand = rcu_dereference(task->sighand);
1421 lockdep_assert_held(&sighand->siglock);
1429 * send signal info to all the members of a group
1431 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1432 struct task_struct *p, enum pid_type type)
1437 ret = check_kill_permission(sig, info, p);
1441 ret = do_send_sig_info(sig, info, p, type);
1447 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1448 * control characters do (^C, ^Z etc)
1449 * - the caller must hold at least a readlock on tasklist_lock
1451 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1453 struct task_struct *p = NULL;
1454 int retval, success;
1458 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1459 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1462 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1463 return success ? 0 : retval;
1466 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1469 struct task_struct *p;
1473 p = pid_task(pid, PIDTYPE_PID);
1475 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1477 if (likely(!p || error != -ESRCH))
1481 * The task was unhashed in between, try again. If it
1482 * is dead, pid_task() will return NULL, if we race with
1483 * de_thread() it will find the new leader.
1488 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492 error = kill_pid_info(sig, info, find_vpid(pid));
1497 static inline bool kill_as_cred_perm(const struct cred *cred,
1498 struct task_struct *target)
1500 const struct cred *pcred = __task_cred(target);
1502 return uid_eq(cred->euid, pcred->suid) ||
1503 uid_eq(cred->euid, pcred->uid) ||
1504 uid_eq(cred->uid, pcred->suid) ||
1505 uid_eq(cred->uid, pcred->uid);
1509 * The usb asyncio usage of siginfo is wrong. The glibc support
1510 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1511 * AKA after the generic fields:
1512 * kernel_pid_t si_pid;
1513 * kernel_uid32_t si_uid;
1514 * sigval_t si_value;
1516 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1517 * after the generic fields is:
1518 * void __user *si_addr;
1520 * This is a practical problem when there is a 64bit big endian kernel
1521 * and a 32bit userspace. As the 32bit address will encoded in the low
1522 * 32bits of the pointer. Those low 32bits will be stored at higher
1523 * address than appear in a 32 bit pointer. So userspace will not
1524 * see the address it was expecting for it's completions.
1526 * There is nothing in the encoding that can allow
1527 * copy_siginfo_to_user32 to detect this confusion of formats, so
1528 * handle this by requiring the caller of kill_pid_usb_asyncio to
1529 * notice when this situration takes place and to store the 32bit
1530 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1533 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1534 struct pid *pid, const struct cred *cred)
1536 struct kernel_siginfo info;
1537 struct task_struct *p;
1538 unsigned long flags;
1541 if (!valid_signal(sig))
1544 clear_siginfo(&info);
1545 info.si_signo = sig;
1546 info.si_errno = errno;
1547 info.si_code = SI_ASYNCIO;
1548 *((sigval_t *)&info.si_pid) = addr;
1551 p = pid_task(pid, PIDTYPE_PID);
1556 if (!kill_as_cred_perm(cred, p)) {
1560 ret = security_task_kill(p, &info, sig, cred);
1565 if (lock_task_sighand(p, &flags)) {
1566 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1567 unlock_task_sighand(p, &flags);
1575 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1578 * kill_something_info() interprets pid in interesting ways just like kill(2).
1580 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1581 * is probably wrong. Should make it like BSD or SYSV.
1584 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1589 return kill_proc_info(sig, info, pid);
1591 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1595 read_lock(&tasklist_lock);
1597 ret = __kill_pgrp_info(sig, info,
1598 pid ? find_vpid(-pid) : task_pgrp(current));
1600 int retval = 0, count = 0;
1601 struct task_struct * p;
1603 for_each_process(p) {
1604 if (task_pid_vnr(p) > 1 &&
1605 !same_thread_group(p, current)) {
1606 int err = group_send_sig_info(sig, info, p,
1613 ret = count ? retval : -ESRCH;
1615 read_unlock(&tasklist_lock);
1621 * These are for backward compatibility with the rest of the kernel source.
1624 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1627 * Make sure legacy kernel users don't send in bad values
1628 * (normal paths check this in check_kill_permission).
1630 if (!valid_signal(sig))
1633 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1635 EXPORT_SYMBOL(send_sig_info);
1637 #define __si_special(priv) \
1638 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1641 send_sig(int sig, struct task_struct *p, int priv)
1643 return send_sig_info(sig, __si_special(priv), p);
1645 EXPORT_SYMBOL(send_sig);
1647 void force_sig(int sig)
1649 struct kernel_siginfo info;
1651 clear_siginfo(&info);
1652 info.si_signo = sig;
1654 info.si_code = SI_KERNEL;
1657 force_sig_info(&info);
1659 EXPORT_SYMBOL(force_sig);
1661 void force_fatal_sig(int sig)
1663 struct kernel_siginfo info;
1665 clear_siginfo(&info);
1666 info.si_signo = sig;
1668 info.si_code = SI_KERNEL;
1671 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1675 * When things go south during signal handling, we
1676 * will force a SIGSEGV. And if the signal that caused
1677 * the problem was already a SIGSEGV, we'll want to
1678 * make sure we don't even try to deliver the signal..
1680 void force_sigsegv(int sig)
1683 force_fatal_sig(SIGSEGV);
1688 int force_sig_fault_to_task(int sig, int code, void __user *addr
1689 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1690 , struct task_struct *t)
1692 struct kernel_siginfo info;
1694 clear_siginfo(&info);
1695 info.si_signo = sig;
1697 info.si_code = code;
1698 info.si_addr = addr;
1701 info.si_flags = flags;
1704 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1707 int force_sig_fault(int sig, int code, void __user *addr
1708 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1710 return force_sig_fault_to_task(sig, code, addr
1711 ___ARCH_SI_IA64(imm, flags, isr), current);
1714 int send_sig_fault(int sig, int code, void __user *addr
1715 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1716 , struct task_struct *t)
1718 struct kernel_siginfo info;
1720 clear_siginfo(&info);
1721 info.si_signo = sig;
1723 info.si_code = code;
1724 info.si_addr = addr;
1727 info.si_flags = flags;
1730 return send_sig_info(info.si_signo, &info, t);
1733 int force_sig_mceerr(int code, void __user *addr, short lsb)
1735 struct kernel_siginfo info;
1737 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1738 clear_siginfo(&info);
1739 info.si_signo = SIGBUS;
1741 info.si_code = code;
1742 info.si_addr = addr;
1743 info.si_addr_lsb = lsb;
1744 return force_sig_info(&info);
1747 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1749 struct kernel_siginfo info;
1751 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1752 clear_siginfo(&info);
1753 info.si_signo = SIGBUS;
1755 info.si_code = code;
1756 info.si_addr = addr;
1757 info.si_addr_lsb = lsb;
1758 return send_sig_info(info.si_signo, &info, t);
1760 EXPORT_SYMBOL(send_sig_mceerr);
1762 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1764 struct kernel_siginfo info;
1766 clear_siginfo(&info);
1767 info.si_signo = SIGSEGV;
1769 info.si_code = SEGV_BNDERR;
1770 info.si_addr = addr;
1771 info.si_lower = lower;
1772 info.si_upper = upper;
1773 return force_sig_info(&info);
1777 int force_sig_pkuerr(void __user *addr, u32 pkey)
1779 struct kernel_siginfo info;
1781 clear_siginfo(&info);
1782 info.si_signo = SIGSEGV;
1784 info.si_code = SEGV_PKUERR;
1785 info.si_addr = addr;
1786 info.si_pkey = pkey;
1787 return force_sig_info(&info);
1791 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1793 struct kernel_siginfo info;
1795 clear_siginfo(&info);
1796 info.si_signo = SIGTRAP;
1798 info.si_code = TRAP_PERF;
1799 info.si_addr = addr;
1800 info.si_perf_data = sig_data;
1801 info.si_perf_type = type;
1803 return force_sig_info(&info);
1807 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1808 * @syscall: syscall number to send to userland
1809 * @reason: filter-supplied reason code to send to userland (via si_errno)
1811 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1813 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1815 struct kernel_siginfo info;
1817 clear_siginfo(&info);
1818 info.si_signo = SIGSYS;
1819 info.si_code = SYS_SECCOMP;
1820 info.si_call_addr = (void __user *)KSTK_EIP(current);
1821 info.si_errno = reason;
1822 info.si_arch = syscall_get_arch(current);
1823 info.si_syscall = syscall;
1824 return force_sig_info_to_task(&info, current,
1825 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1828 /* For the crazy architectures that include trap information in
1829 * the errno field, instead of an actual errno value.
1831 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1833 struct kernel_siginfo info;
1835 clear_siginfo(&info);
1836 info.si_signo = SIGTRAP;
1837 info.si_errno = errno;
1838 info.si_code = TRAP_HWBKPT;
1839 info.si_addr = addr;
1840 return force_sig_info(&info);
1843 /* For the rare architectures that include trap information using
1846 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1848 struct kernel_siginfo info;
1850 clear_siginfo(&info);
1851 info.si_signo = sig;
1853 info.si_code = code;
1854 info.si_addr = addr;
1855 info.si_trapno = trapno;
1856 return force_sig_info(&info);
1859 /* For the rare architectures that include trap information using
1862 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1863 struct task_struct *t)
1865 struct kernel_siginfo info;
1867 clear_siginfo(&info);
1868 info.si_signo = sig;
1870 info.si_code = code;
1871 info.si_addr = addr;
1872 info.si_trapno = trapno;
1873 return send_sig_info(info.si_signo, &info, t);
1876 int kill_pgrp(struct pid *pid, int sig, int priv)
1880 read_lock(&tasklist_lock);
1881 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1882 read_unlock(&tasklist_lock);
1886 EXPORT_SYMBOL(kill_pgrp);
1888 int kill_pid(struct pid *pid, int sig, int priv)
1890 return kill_pid_info(sig, __si_special(priv), pid);
1892 EXPORT_SYMBOL(kill_pid);
1895 * These functions support sending signals using preallocated sigqueue
1896 * structures. This is needed "because realtime applications cannot
1897 * afford to lose notifications of asynchronous events, like timer
1898 * expirations or I/O completions". In the case of POSIX Timers
1899 * we allocate the sigqueue structure from the timer_create. If this
1900 * allocation fails we are able to report the failure to the application
1901 * with an EAGAIN error.
1903 struct sigqueue *sigqueue_alloc(void)
1905 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1908 void sigqueue_free(struct sigqueue *q)
1910 unsigned long flags;
1911 spinlock_t *lock = ¤t->sighand->siglock;
1913 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1915 * We must hold ->siglock while testing q->list
1916 * to serialize with collect_signal() or with
1917 * __exit_signal()->flush_sigqueue().
1919 spin_lock_irqsave(lock, flags);
1920 q->flags &= ~SIGQUEUE_PREALLOC;
1922 * If it is queued it will be freed when dequeued,
1923 * like the "regular" sigqueue.
1925 if (!list_empty(&q->list))
1927 spin_unlock_irqrestore(lock, flags);
1933 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1935 int sig = q->info.si_signo;
1936 struct sigpending *pending;
1937 struct task_struct *t;
1938 unsigned long flags;
1941 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1945 t = pid_task(pid, type);
1946 if (!t || !likely(lock_task_sighand(t, &flags)))
1949 ret = 1; /* the signal is ignored */
1950 result = TRACE_SIGNAL_IGNORED;
1951 if (!prepare_signal(sig, t, false))
1955 if (unlikely(!list_empty(&q->list))) {
1957 * If an SI_TIMER entry is already queue just increment
1958 * the overrun count.
1960 BUG_ON(q->info.si_code != SI_TIMER);
1961 q->info.si_overrun++;
1962 result = TRACE_SIGNAL_ALREADY_PENDING;
1965 q->info.si_overrun = 0;
1967 signalfd_notify(t, sig);
1968 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1969 list_add_tail(&q->list, &pending->list);
1970 sigaddset(&pending->signal, sig);
1971 complete_signal(sig, t, type);
1972 result = TRACE_SIGNAL_DELIVERED;
1974 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1975 unlock_task_sighand(t, &flags);
1981 static void do_notify_pidfd(struct task_struct *task)
1985 WARN_ON(task->exit_state == 0);
1986 pid = task_pid(task);
1987 wake_up_all(&pid->wait_pidfd);
1991 * Let a parent know about the death of a child.
1992 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1994 * Returns true if our parent ignored us and so we've switched to
1997 bool do_notify_parent(struct task_struct *tsk, int sig)
1999 struct kernel_siginfo info;
2000 unsigned long flags;
2001 struct sighand_struct *psig;
2002 bool autoreap = false;
2007 /* do_notify_parent_cldstop should have been called instead. */
2008 BUG_ON(task_is_stopped_or_traced(tsk));
2010 BUG_ON(!tsk->ptrace &&
2011 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2013 /* Wake up all pidfd waiters */
2014 do_notify_pidfd(tsk);
2016 if (sig != SIGCHLD) {
2018 * This is only possible if parent == real_parent.
2019 * Check if it has changed security domain.
2021 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2025 clear_siginfo(&info);
2026 info.si_signo = sig;
2029 * We are under tasklist_lock here so our parent is tied to
2030 * us and cannot change.
2032 * task_active_pid_ns will always return the same pid namespace
2033 * until a task passes through release_task.
2035 * write_lock() currently calls preempt_disable() which is the
2036 * same as rcu_read_lock(), but according to Oleg, this is not
2037 * correct to rely on this
2040 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2041 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2045 task_cputime(tsk, &utime, &stime);
2046 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2047 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2049 info.si_status = tsk->exit_code & 0x7f;
2050 if (tsk->exit_code & 0x80)
2051 info.si_code = CLD_DUMPED;
2052 else if (tsk->exit_code & 0x7f)
2053 info.si_code = CLD_KILLED;
2055 info.si_code = CLD_EXITED;
2056 info.si_status = tsk->exit_code >> 8;
2059 psig = tsk->parent->sighand;
2060 spin_lock_irqsave(&psig->siglock, flags);
2061 if (!tsk->ptrace && sig == SIGCHLD &&
2062 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2063 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2065 * We are exiting and our parent doesn't care. POSIX.1
2066 * defines special semantics for setting SIGCHLD to SIG_IGN
2067 * or setting the SA_NOCLDWAIT flag: we should be reaped
2068 * automatically and not left for our parent's wait4 call.
2069 * Rather than having the parent do it as a magic kind of
2070 * signal handler, we just set this to tell do_exit that we
2071 * can be cleaned up without becoming a zombie. Note that
2072 * we still call __wake_up_parent in this case, because a
2073 * blocked sys_wait4 might now return -ECHILD.
2075 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2076 * is implementation-defined: we do (if you don't want
2077 * it, just use SIG_IGN instead).
2080 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2084 * Send with __send_signal as si_pid and si_uid are in the
2085 * parent's namespaces.
2087 if (valid_signal(sig) && sig)
2088 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2089 __wake_up_parent(tsk, tsk->parent);
2090 spin_unlock_irqrestore(&psig->siglock, flags);
2096 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2097 * @tsk: task reporting the state change
2098 * @for_ptracer: the notification is for ptracer
2099 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2101 * Notify @tsk's parent that the stopped/continued state has changed. If
2102 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2103 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2106 * Must be called with tasklist_lock at least read locked.
2108 static void do_notify_parent_cldstop(struct task_struct *tsk,
2109 bool for_ptracer, int why)
2111 struct kernel_siginfo info;
2112 unsigned long flags;
2113 struct task_struct *parent;
2114 struct sighand_struct *sighand;
2118 parent = tsk->parent;
2120 tsk = tsk->group_leader;
2121 parent = tsk->real_parent;
2124 clear_siginfo(&info);
2125 info.si_signo = SIGCHLD;
2128 * see comment in do_notify_parent() about the following 4 lines
2131 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2132 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2135 task_cputime(tsk, &utime, &stime);
2136 info.si_utime = nsec_to_clock_t(utime);
2137 info.si_stime = nsec_to_clock_t(stime);
2142 info.si_status = SIGCONT;
2145 info.si_status = tsk->signal->group_exit_code & 0x7f;
2148 info.si_status = tsk->exit_code & 0x7f;
2154 sighand = parent->sighand;
2155 spin_lock_irqsave(&sighand->siglock, flags);
2156 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2157 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2158 __group_send_sig_info(SIGCHLD, &info, parent);
2160 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2162 __wake_up_parent(tsk, parent);
2163 spin_unlock_irqrestore(&sighand->siglock, flags);
2166 static inline bool may_ptrace_stop(void)
2168 if (!likely(current->ptrace))
2171 * Are we in the middle of do_coredump?
2172 * If so and our tracer is also part of the coredump stopping
2173 * is a deadlock situation, and pointless because our tracer
2174 * is dead so don't allow us to stop.
2175 * If SIGKILL was already sent before the caller unlocked
2176 * ->siglock we must see ->core_state != NULL. Otherwise it
2177 * is safe to enter schedule().
2179 * This is almost outdated, a task with the pending SIGKILL can't
2180 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2181 * after SIGKILL was already dequeued.
2183 if (unlikely(current->mm->core_state) &&
2184 unlikely(current->mm == current->parent->mm))
2192 * This must be called with current->sighand->siglock held.
2194 * This should be the path for all ptrace stops.
2195 * We always set current->last_siginfo while stopped here.
2196 * That makes it a way to test a stopped process for
2197 * being ptrace-stopped vs being job-control-stopped.
2199 * If we actually decide not to stop at all because the tracer
2200 * is gone, we keep current->exit_code unless clear_code.
2202 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2203 __releases(¤t->sighand->siglock)
2204 __acquires(¤t->sighand->siglock)
2206 bool gstop_done = false;
2208 if (arch_ptrace_stop_needed(exit_code, info)) {
2210 * The arch code has something special to do before a
2211 * ptrace stop. This is allowed to block, e.g. for faults
2212 * on user stack pages. We can't keep the siglock while
2213 * calling arch_ptrace_stop, so we must release it now.
2214 * To preserve proper semantics, we must do this before
2215 * any signal bookkeeping like checking group_stop_count.
2217 spin_unlock_irq(¤t->sighand->siglock);
2218 arch_ptrace_stop(exit_code, info);
2219 spin_lock_irq(¤t->sighand->siglock);
2223 * schedule() will not sleep if there is a pending signal that
2224 * can awaken the task.
2226 set_special_state(TASK_TRACED);
2229 * We're committing to trapping. TRACED should be visible before
2230 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2231 * Also, transition to TRACED and updates to ->jobctl should be
2232 * atomic with respect to siglock and should be done after the arch
2233 * hook as siglock is released and regrabbed across it.
2238 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2240 * set_current_state() smp_wmb();
2242 * wait_task_stopped()
2243 * task_stopped_code()
2244 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2248 current->last_siginfo = info;
2249 current->exit_code = exit_code;
2252 * If @why is CLD_STOPPED, we're trapping to participate in a group
2253 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2254 * across siglock relocks since INTERRUPT was scheduled, PENDING
2255 * could be clear now. We act as if SIGCONT is received after
2256 * TASK_TRACED is entered - ignore it.
2258 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2259 gstop_done = task_participate_group_stop(current);
2261 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2262 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2263 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2264 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2266 /* entering a trap, clear TRAPPING */
2267 task_clear_jobctl_trapping(current);
2269 spin_unlock_irq(¤t->sighand->siglock);
2270 read_lock(&tasklist_lock);
2271 if (may_ptrace_stop()) {
2273 * Notify parents of the stop.
2275 * While ptraced, there are two parents - the ptracer and
2276 * the real_parent of the group_leader. The ptracer should
2277 * know about every stop while the real parent is only
2278 * interested in the completion of group stop. The states
2279 * for the two don't interact with each other. Notify
2280 * separately unless they're gonna be duplicates.
2282 do_notify_parent_cldstop(current, true, why);
2283 if (gstop_done && ptrace_reparented(current))
2284 do_notify_parent_cldstop(current, false, why);
2287 * Don't want to allow preemption here, because
2288 * sys_ptrace() needs this task to be inactive.
2290 * XXX: implement read_unlock_no_resched().
2293 read_unlock(&tasklist_lock);
2294 cgroup_enter_frozen();
2295 preempt_enable_no_resched();
2296 freezable_schedule();
2297 cgroup_leave_frozen(true);
2300 * By the time we got the lock, our tracer went away.
2301 * Don't drop the lock yet, another tracer may come.
2303 * If @gstop_done, the ptracer went away between group stop
2304 * completion and here. During detach, it would have set
2305 * JOBCTL_STOP_PENDING on us and we'll re-enter
2306 * TASK_STOPPED in do_signal_stop() on return, so notifying
2307 * the real parent of the group stop completion is enough.
2310 do_notify_parent_cldstop(current, false, why);
2312 /* tasklist protects us from ptrace_freeze_traced() */
2313 __set_current_state(TASK_RUNNING);
2315 current->exit_code = 0;
2316 read_unlock(&tasklist_lock);
2320 * We are back. Now reacquire the siglock before touching
2321 * last_siginfo, so that we are sure to have synchronized with
2322 * any signal-sending on another CPU that wants to examine it.
2324 spin_lock_irq(¤t->sighand->siglock);
2325 current->last_siginfo = NULL;
2327 /* LISTENING can be set only during STOP traps, clear it */
2328 current->jobctl &= ~JOBCTL_LISTENING;
2331 * Queued signals ignored us while we were stopped for tracing.
2332 * So check for any that we should take before resuming user mode.
2333 * This sets TIF_SIGPENDING, but never clears it.
2335 recalc_sigpending_tsk(current);
2338 static void ptrace_do_notify(int signr, int exit_code, int why)
2340 kernel_siginfo_t info;
2342 clear_siginfo(&info);
2343 info.si_signo = signr;
2344 info.si_code = exit_code;
2345 info.si_pid = task_pid_vnr(current);
2346 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2348 /* Let the debugger run. */
2349 ptrace_stop(exit_code, why, 1, &info);
2352 void ptrace_notify(int exit_code)
2354 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2355 if (unlikely(current->task_works))
2358 spin_lock_irq(¤t->sighand->siglock);
2359 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2360 spin_unlock_irq(¤t->sighand->siglock);
2364 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2365 * @signr: signr causing group stop if initiating
2367 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2368 * and participate in it. If already set, participate in the existing
2369 * group stop. If participated in a group stop (and thus slept), %true is
2370 * returned with siglock released.
2372 * If ptraced, this function doesn't handle stop itself. Instead,
2373 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2374 * untouched. The caller must ensure that INTERRUPT trap handling takes
2375 * places afterwards.
2378 * Must be called with @current->sighand->siglock held, which is released
2382 * %false if group stop is already cancelled or ptrace trap is scheduled.
2383 * %true if participated in group stop.
2385 static bool do_signal_stop(int signr)
2386 __releases(¤t->sighand->siglock)
2388 struct signal_struct *sig = current->signal;
2390 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2391 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2392 struct task_struct *t;
2394 /* signr will be recorded in task->jobctl for retries */
2395 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2397 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2398 unlikely(signal_group_exit(sig)))
2401 * There is no group stop already in progress. We must
2404 * While ptraced, a task may be resumed while group stop is
2405 * still in effect and then receive a stop signal and
2406 * initiate another group stop. This deviates from the
2407 * usual behavior as two consecutive stop signals can't
2408 * cause two group stops when !ptraced. That is why we
2409 * also check !task_is_stopped(t) below.
2411 * The condition can be distinguished by testing whether
2412 * SIGNAL_STOP_STOPPED is already set. Don't generate
2413 * group_exit_code in such case.
2415 * This is not necessary for SIGNAL_STOP_CONTINUED because
2416 * an intervening stop signal is required to cause two
2417 * continued events regardless of ptrace.
2419 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2420 sig->group_exit_code = signr;
2422 sig->group_stop_count = 0;
2424 if (task_set_jobctl_pending(current, signr | gstop))
2425 sig->group_stop_count++;
2428 while_each_thread(current, t) {
2430 * Setting state to TASK_STOPPED for a group
2431 * stop is always done with the siglock held,
2432 * so this check has no races.
2434 if (!task_is_stopped(t) &&
2435 task_set_jobctl_pending(t, signr | gstop)) {
2436 sig->group_stop_count++;
2437 if (likely(!(t->ptrace & PT_SEIZED)))
2438 signal_wake_up(t, 0);
2440 ptrace_trap_notify(t);
2445 if (likely(!current->ptrace)) {
2449 * If there are no other threads in the group, or if there
2450 * is a group stop in progress and we are the last to stop,
2451 * report to the parent.
2453 if (task_participate_group_stop(current))
2454 notify = CLD_STOPPED;
2456 set_special_state(TASK_STOPPED);
2457 spin_unlock_irq(¤t->sighand->siglock);
2460 * Notify the parent of the group stop completion. Because
2461 * we're not holding either the siglock or tasklist_lock
2462 * here, ptracer may attach inbetween; however, this is for
2463 * group stop and should always be delivered to the real
2464 * parent of the group leader. The new ptracer will get
2465 * its notification when this task transitions into
2469 read_lock(&tasklist_lock);
2470 do_notify_parent_cldstop(current, false, notify);
2471 read_unlock(&tasklist_lock);
2474 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2475 cgroup_enter_frozen();
2476 freezable_schedule();
2480 * While ptraced, group stop is handled by STOP trap.
2481 * Schedule it and let the caller deal with it.
2483 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2489 * do_jobctl_trap - take care of ptrace jobctl traps
2491 * When PT_SEIZED, it's used for both group stop and explicit
2492 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2493 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2494 * the stop signal; otherwise, %SIGTRAP.
2496 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2497 * number as exit_code and no siginfo.
2500 * Must be called with @current->sighand->siglock held, which may be
2501 * released and re-acquired before returning with intervening sleep.
2503 static void do_jobctl_trap(void)
2505 struct signal_struct *signal = current->signal;
2506 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2508 if (current->ptrace & PT_SEIZED) {
2509 if (!signal->group_stop_count &&
2510 !(signal->flags & SIGNAL_STOP_STOPPED))
2512 WARN_ON_ONCE(!signr);
2513 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2516 WARN_ON_ONCE(!signr);
2517 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2518 current->exit_code = 0;
2523 * do_freezer_trap - handle the freezer jobctl trap
2525 * Puts the task into frozen state, if only the task is not about to quit.
2526 * In this case it drops JOBCTL_TRAP_FREEZE.
2529 * Must be called with @current->sighand->siglock held,
2530 * which is always released before returning.
2532 static void do_freezer_trap(void)
2533 __releases(¤t->sighand->siglock)
2536 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2537 * let's make another loop to give it a chance to be handled.
2538 * In any case, we'll return back.
2540 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2541 JOBCTL_TRAP_FREEZE) {
2542 spin_unlock_irq(¤t->sighand->siglock);
2547 * Now we're sure that there is no pending fatal signal and no
2548 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2549 * immediately (if there is a non-fatal signal pending), and
2550 * put the task into sleep.
2552 __set_current_state(TASK_INTERRUPTIBLE);
2553 clear_thread_flag(TIF_SIGPENDING);
2554 spin_unlock_irq(¤t->sighand->siglock);
2555 cgroup_enter_frozen();
2556 freezable_schedule();
2559 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2562 * We do not check sig_kernel_stop(signr) but set this marker
2563 * unconditionally because we do not know whether debugger will
2564 * change signr. This flag has no meaning unless we are going
2565 * to stop after return from ptrace_stop(). In this case it will
2566 * be checked in do_signal_stop(), we should only stop if it was
2567 * not cleared by SIGCONT while we were sleeping. See also the
2568 * comment in dequeue_signal().
2570 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2571 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2573 /* We're back. Did the debugger cancel the sig? */
2574 signr = current->exit_code;
2578 current->exit_code = 0;
2581 * Update the siginfo structure if the signal has
2582 * changed. If the debugger wanted something
2583 * specific in the siginfo structure then it should
2584 * have updated *info via PTRACE_SETSIGINFO.
2586 if (signr != info->si_signo) {
2587 clear_siginfo(info);
2588 info->si_signo = signr;
2590 info->si_code = SI_USER;
2592 info->si_pid = task_pid_vnr(current->parent);
2593 info->si_uid = from_kuid_munged(current_user_ns(),
2594 task_uid(current->parent));
2598 /* If the (new) signal is now blocked, requeue it. */
2599 if (sigismember(¤t->blocked, signr)) {
2600 send_signal(signr, info, current, PIDTYPE_PID);
2607 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2609 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2611 case SIL_FAULT_TRAPNO:
2612 case SIL_FAULT_MCEERR:
2613 case SIL_FAULT_BNDERR:
2614 case SIL_FAULT_PKUERR:
2615 case SIL_FAULT_PERF_EVENT:
2616 ksig->info.si_addr = arch_untagged_si_addr(
2617 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2629 bool get_signal(struct ksignal *ksig)
2631 struct sighand_struct *sighand = current->sighand;
2632 struct signal_struct *signal = current->signal;
2635 if (unlikely(current->task_works))
2639 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2640 * that the arch handlers don't all have to do it. If we get here
2641 * without TIF_SIGPENDING, just exit after running signal work.
2643 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2644 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2645 tracehook_notify_signal();
2646 if (!task_sigpending(current))
2650 if (unlikely(uprobe_deny_signal()))
2654 * Do this once, we can't return to user-mode if freezing() == T.
2655 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2656 * thus do not need another check after return.
2661 spin_lock_irq(&sighand->siglock);
2664 * Every stopped thread goes here after wakeup. Check to see if
2665 * we should notify the parent, prepare_signal(SIGCONT) encodes
2666 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2668 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2671 if (signal->flags & SIGNAL_CLD_CONTINUED)
2672 why = CLD_CONTINUED;
2676 signal->flags &= ~SIGNAL_CLD_MASK;
2678 spin_unlock_irq(&sighand->siglock);
2681 * Notify the parent that we're continuing. This event is
2682 * always per-process and doesn't make whole lot of sense
2683 * for ptracers, who shouldn't consume the state via
2684 * wait(2) either, but, for backward compatibility, notify
2685 * the ptracer of the group leader too unless it's gonna be
2688 read_lock(&tasklist_lock);
2689 do_notify_parent_cldstop(current, false, why);
2691 if (ptrace_reparented(current->group_leader))
2692 do_notify_parent_cldstop(current->group_leader,
2694 read_unlock(&tasklist_lock);
2699 /* Has this task already been marked for death? */
2700 if (signal_group_exit(signal)) {
2701 ksig->info.si_signo = signr = SIGKILL;
2702 sigdelset(¤t->pending.signal, SIGKILL);
2703 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2704 &sighand->action[SIGKILL - 1]);
2705 recalc_sigpending();
2710 struct k_sigaction *ka;
2712 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2716 if (unlikely(current->jobctl &
2717 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2718 if (current->jobctl & JOBCTL_TRAP_MASK) {
2720 spin_unlock_irq(&sighand->siglock);
2721 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2728 * If the task is leaving the frozen state, let's update
2729 * cgroup counters and reset the frozen bit.
2731 if (unlikely(cgroup_task_frozen(current))) {
2732 spin_unlock_irq(&sighand->siglock);
2733 cgroup_leave_frozen(false);
2738 * Signals generated by the execution of an instruction
2739 * need to be delivered before any other pending signals
2740 * so that the instruction pointer in the signal stack
2741 * frame points to the faulting instruction.
2743 signr = dequeue_synchronous_signal(&ksig->info);
2745 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2748 break; /* will return 0 */
2750 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2751 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2752 signr = ptrace_signal(signr, &ksig->info);
2757 ka = &sighand->action[signr-1];
2759 /* Trace actually delivered signals. */
2760 trace_signal_deliver(signr, &ksig->info, ka);
2762 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2764 if (ka->sa.sa_handler != SIG_DFL) {
2765 /* Run the handler. */
2768 if (ka->sa.sa_flags & SA_ONESHOT)
2769 ka->sa.sa_handler = SIG_DFL;
2771 break; /* will return non-zero "signr" value */
2775 * Now we are doing the default action for this signal.
2777 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2781 * Global init gets no signals it doesn't want.
2782 * Container-init gets no signals it doesn't want from same
2785 * Note that if global/container-init sees a sig_kernel_only()
2786 * signal here, the signal must have been generated internally
2787 * or must have come from an ancestor namespace. In either
2788 * case, the signal cannot be dropped.
2790 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2791 !sig_kernel_only(signr))
2794 if (sig_kernel_stop(signr)) {
2796 * The default action is to stop all threads in
2797 * the thread group. The job control signals
2798 * do nothing in an orphaned pgrp, but SIGSTOP
2799 * always works. Note that siglock needs to be
2800 * dropped during the call to is_orphaned_pgrp()
2801 * because of lock ordering with tasklist_lock.
2802 * This allows an intervening SIGCONT to be posted.
2803 * We need to check for that and bail out if necessary.
2805 if (signr != SIGSTOP) {
2806 spin_unlock_irq(&sighand->siglock);
2808 /* signals can be posted during this window */
2810 if (is_current_pgrp_orphaned())
2813 spin_lock_irq(&sighand->siglock);
2816 if (likely(do_signal_stop(ksig->info.si_signo))) {
2817 /* It released the siglock. */
2822 * We didn't actually stop, due to a race
2823 * with SIGCONT or something like that.
2829 spin_unlock_irq(&sighand->siglock);
2830 if (unlikely(cgroup_task_frozen(current)))
2831 cgroup_leave_frozen(true);
2834 * Anything else is fatal, maybe with a core dump.
2836 current->flags |= PF_SIGNALED;
2838 if (sig_kernel_coredump(signr)) {
2839 if (print_fatal_signals)
2840 print_fatal_signal(ksig->info.si_signo);
2841 proc_coredump_connector(current);
2843 * If it was able to dump core, this kills all
2844 * other threads in the group and synchronizes with
2845 * their demise. If we lost the race with another
2846 * thread getting here, it set group_exit_code
2847 * first and our do_group_exit call below will use
2848 * that value and ignore the one we pass it.
2850 do_coredump(&ksig->info);
2854 * PF_IO_WORKER threads will catch and exit on fatal signals
2855 * themselves. They have cleanup that must be performed, so
2856 * we cannot call do_exit() on their behalf.
2858 if (current->flags & PF_IO_WORKER)
2862 * Death signals, no core dump.
2864 do_group_exit(ksig->info.si_signo);
2867 spin_unlock_irq(&sighand->siglock);
2871 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2872 hide_si_addr_tag_bits(ksig);
2874 return ksig->sig > 0;
2878 * signal_delivered -
2879 * @ksig: kernel signal struct
2880 * @stepping: nonzero if debugger single-step or block-step in use
2882 * This function should be called when a signal has successfully been
2883 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2884 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2885 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2887 static void signal_delivered(struct ksignal *ksig, int stepping)
2891 /* A signal was successfully delivered, and the
2892 saved sigmask was stored on the signal frame,
2893 and will be restored by sigreturn. So we can
2894 simply clear the restore sigmask flag. */
2895 clear_restore_sigmask();
2897 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2898 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2899 sigaddset(&blocked, ksig->sig);
2900 set_current_blocked(&blocked);
2901 if (current->sas_ss_flags & SS_AUTODISARM)
2902 sas_ss_reset(current);
2903 tracehook_signal_handler(stepping);
2906 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2909 force_sigsegv(ksig->sig);
2911 signal_delivered(ksig, stepping);
2915 * It could be that complete_signal() picked us to notify about the
2916 * group-wide signal. Other threads should be notified now to take
2917 * the shared signals in @which since we will not.
2919 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2922 struct task_struct *t;
2924 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2925 if (sigisemptyset(&retarget))
2929 while_each_thread(tsk, t) {
2930 if (t->flags & PF_EXITING)
2933 if (!has_pending_signals(&retarget, &t->blocked))
2935 /* Remove the signals this thread can handle. */
2936 sigandsets(&retarget, &retarget, &t->blocked);
2938 if (!task_sigpending(t))
2939 signal_wake_up(t, 0);
2941 if (sigisemptyset(&retarget))
2946 void exit_signals(struct task_struct *tsk)
2952 * @tsk is about to have PF_EXITING set - lock out users which
2953 * expect stable threadgroup.
2955 cgroup_threadgroup_change_begin(tsk);
2957 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2958 tsk->flags |= PF_EXITING;
2959 cgroup_threadgroup_change_end(tsk);
2963 spin_lock_irq(&tsk->sighand->siglock);
2965 * From now this task is not visible for group-wide signals,
2966 * see wants_signal(), do_signal_stop().
2968 tsk->flags |= PF_EXITING;
2970 cgroup_threadgroup_change_end(tsk);
2972 if (!task_sigpending(tsk))
2975 unblocked = tsk->blocked;
2976 signotset(&unblocked);
2977 retarget_shared_pending(tsk, &unblocked);
2979 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2980 task_participate_group_stop(tsk))
2981 group_stop = CLD_STOPPED;
2983 spin_unlock_irq(&tsk->sighand->siglock);
2986 * If group stop has completed, deliver the notification. This
2987 * should always go to the real parent of the group leader.
2989 if (unlikely(group_stop)) {
2990 read_lock(&tasklist_lock);
2991 do_notify_parent_cldstop(tsk, false, group_stop);
2992 read_unlock(&tasklist_lock);
2997 * System call entry points.
3001 * sys_restart_syscall - restart a system call
3003 SYSCALL_DEFINE0(restart_syscall)
3005 struct restart_block *restart = ¤t->restart_block;
3006 return restart->fn(restart);
3009 long do_no_restart_syscall(struct restart_block *param)
3014 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3016 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3017 sigset_t newblocked;
3018 /* A set of now blocked but previously unblocked signals. */
3019 sigandnsets(&newblocked, newset, ¤t->blocked);
3020 retarget_shared_pending(tsk, &newblocked);
3022 tsk->blocked = *newset;
3023 recalc_sigpending();
3027 * set_current_blocked - change current->blocked mask
3030 * It is wrong to change ->blocked directly, this helper should be used
3031 * to ensure the process can't miss a shared signal we are going to block.
3033 void set_current_blocked(sigset_t *newset)
3035 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3036 __set_current_blocked(newset);
3039 void __set_current_blocked(const sigset_t *newset)
3041 struct task_struct *tsk = current;
3044 * In case the signal mask hasn't changed, there is nothing we need
3045 * to do. The current->blocked shouldn't be modified by other task.
3047 if (sigequalsets(&tsk->blocked, newset))
3050 spin_lock_irq(&tsk->sighand->siglock);
3051 __set_task_blocked(tsk, newset);
3052 spin_unlock_irq(&tsk->sighand->siglock);
3056 * This is also useful for kernel threads that want to temporarily
3057 * (or permanently) block certain signals.
3059 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3060 * interface happily blocks "unblockable" signals like SIGKILL
3063 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3065 struct task_struct *tsk = current;
3068 /* Lockless, only current can change ->blocked, never from irq */
3070 *oldset = tsk->blocked;
3074 sigorsets(&newset, &tsk->blocked, set);
3077 sigandnsets(&newset, &tsk->blocked, set);
3086 __set_current_blocked(&newset);
3089 EXPORT_SYMBOL(sigprocmask);
3092 * The api helps set app-provided sigmasks.
3094 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3095 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3097 * Note that it does set_restore_sigmask() in advance, so it must be always
3098 * paired with restore_saved_sigmask_unless() before return from syscall.
3100 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3106 if (sigsetsize != sizeof(sigset_t))
3108 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3111 set_restore_sigmask();
3112 current->saved_sigmask = current->blocked;
3113 set_current_blocked(&kmask);
3118 #ifdef CONFIG_COMPAT
3119 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3126 if (sigsetsize != sizeof(compat_sigset_t))
3128 if (get_compat_sigset(&kmask, umask))
3131 set_restore_sigmask();
3132 current->saved_sigmask = current->blocked;
3133 set_current_blocked(&kmask);
3140 * sys_rt_sigprocmask - change the list of currently blocked signals
3141 * @how: whether to add, remove, or set signals
3142 * @nset: stores pending signals
3143 * @oset: previous value of signal mask if non-null
3144 * @sigsetsize: size of sigset_t type
3146 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3147 sigset_t __user *, oset, size_t, sigsetsize)
3149 sigset_t old_set, new_set;
3152 /* XXX: Don't preclude handling different sized sigset_t's. */
3153 if (sigsetsize != sizeof(sigset_t))
3156 old_set = current->blocked;
3159 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3161 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3163 error = sigprocmask(how, &new_set, NULL);
3169 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3176 #ifdef CONFIG_COMPAT
3177 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3178 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3180 sigset_t old_set = current->blocked;
3182 /* XXX: Don't preclude handling different sized sigset_t's. */
3183 if (sigsetsize != sizeof(sigset_t))
3189 if (get_compat_sigset(&new_set, nset))
3191 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3193 error = sigprocmask(how, &new_set, NULL);
3197 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3201 static void do_sigpending(sigset_t *set)
3203 spin_lock_irq(¤t->sighand->siglock);
3204 sigorsets(set, ¤t->pending.signal,
3205 ¤t->signal->shared_pending.signal);
3206 spin_unlock_irq(¤t->sighand->siglock);
3208 /* Outside the lock because only this thread touches it. */
3209 sigandsets(set, ¤t->blocked, set);
3213 * sys_rt_sigpending - examine a pending signal that has been raised
3215 * @uset: stores pending signals
3216 * @sigsetsize: size of sigset_t type or larger
3218 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3222 if (sigsetsize > sizeof(*uset))
3225 do_sigpending(&set);
3227 if (copy_to_user(uset, &set, sigsetsize))
3233 #ifdef CONFIG_COMPAT
3234 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3235 compat_size_t, sigsetsize)
3239 if (sigsetsize > sizeof(*uset))
3242 do_sigpending(&set);
3244 return put_compat_sigset(uset, &set, sigsetsize);
3248 static const struct {
3249 unsigned char limit, layout;
3251 [SIGILL] = { NSIGILL, SIL_FAULT },
3252 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3253 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3254 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3255 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3257 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3259 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3260 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3261 [SIGSYS] = { NSIGSYS, SIL_SYS },
3264 static bool known_siginfo_layout(unsigned sig, int si_code)
3266 if (si_code == SI_KERNEL)
3268 else if ((si_code > SI_USER)) {
3269 if (sig_specific_sicodes(sig)) {
3270 if (si_code <= sig_sicodes[sig].limit)
3273 else if (si_code <= NSIGPOLL)
3276 else if (si_code >= SI_DETHREAD)
3278 else if (si_code == SI_ASYNCNL)
3283 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3285 enum siginfo_layout layout = SIL_KILL;
3286 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3287 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3288 (si_code <= sig_sicodes[sig].limit)) {
3289 layout = sig_sicodes[sig].layout;
3290 /* Handle the exceptions */
3291 if ((sig == SIGBUS) &&
3292 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3293 layout = SIL_FAULT_MCEERR;
3294 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3295 layout = SIL_FAULT_BNDERR;
3297 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3298 layout = SIL_FAULT_PKUERR;
3300 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3301 layout = SIL_FAULT_PERF_EVENT;
3302 else if (IS_ENABLED(CONFIG_SPARC) &&
3303 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3304 layout = SIL_FAULT_TRAPNO;
3305 else if (IS_ENABLED(CONFIG_ALPHA) &&
3307 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3308 layout = SIL_FAULT_TRAPNO;
3310 else if (si_code <= NSIGPOLL)
3313 if (si_code == SI_TIMER)
3315 else if (si_code == SI_SIGIO)
3317 else if (si_code < 0)
3323 static inline char __user *si_expansion(const siginfo_t __user *info)
3325 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3328 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3330 char __user *expansion = si_expansion(to);
3331 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3333 if (clear_user(expansion, SI_EXPANSION_SIZE))
3338 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3339 const siginfo_t __user *from)
3341 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3342 char __user *expansion = si_expansion(from);
3343 char buf[SI_EXPANSION_SIZE];
3346 * An unknown si_code might need more than
3347 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3348 * extra bytes are 0. This guarantees copy_siginfo_to_user
3349 * will return this data to userspace exactly.
3351 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3353 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3361 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3362 const siginfo_t __user *from)
3364 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3366 to->si_signo = signo;
3367 return post_copy_siginfo_from_user(to, from);
3370 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3372 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3374 return post_copy_siginfo_from_user(to, from);
3377 #ifdef CONFIG_COMPAT
3379 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3380 * @to: compat siginfo destination
3381 * @from: kernel siginfo source
3383 * Note: This function does not work properly for the SIGCHLD on x32, but
3384 * fortunately it doesn't have to. The only valid callers for this function are
3385 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3386 * The latter does not care because SIGCHLD will never cause a coredump.
3388 void copy_siginfo_to_external32(struct compat_siginfo *to,
3389 const struct kernel_siginfo *from)
3391 memset(to, 0, sizeof(*to));
3393 to->si_signo = from->si_signo;
3394 to->si_errno = from->si_errno;
3395 to->si_code = from->si_code;
3396 switch(siginfo_layout(from->si_signo, from->si_code)) {
3398 to->si_pid = from->si_pid;
3399 to->si_uid = from->si_uid;
3402 to->si_tid = from->si_tid;
3403 to->si_overrun = from->si_overrun;
3404 to->si_int = from->si_int;
3407 to->si_band = from->si_band;
3408 to->si_fd = from->si_fd;
3411 to->si_addr = ptr_to_compat(from->si_addr);
3413 case SIL_FAULT_TRAPNO:
3414 to->si_addr = ptr_to_compat(from->si_addr);
3415 to->si_trapno = from->si_trapno;
3417 case SIL_FAULT_MCEERR:
3418 to->si_addr = ptr_to_compat(from->si_addr);
3419 to->si_addr_lsb = from->si_addr_lsb;
3421 case SIL_FAULT_BNDERR:
3422 to->si_addr = ptr_to_compat(from->si_addr);
3423 to->si_lower = ptr_to_compat(from->si_lower);
3424 to->si_upper = ptr_to_compat(from->si_upper);
3426 case SIL_FAULT_PKUERR:
3427 to->si_addr = ptr_to_compat(from->si_addr);
3428 to->si_pkey = from->si_pkey;
3430 case SIL_FAULT_PERF_EVENT:
3431 to->si_addr = ptr_to_compat(from->si_addr);
3432 to->si_perf_data = from->si_perf_data;
3433 to->si_perf_type = from->si_perf_type;
3436 to->si_pid = from->si_pid;
3437 to->si_uid = from->si_uid;
3438 to->si_status = from->si_status;
3439 to->si_utime = from->si_utime;
3440 to->si_stime = from->si_stime;
3443 to->si_pid = from->si_pid;
3444 to->si_uid = from->si_uid;
3445 to->si_int = from->si_int;
3448 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3449 to->si_syscall = from->si_syscall;
3450 to->si_arch = from->si_arch;
3455 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3456 const struct kernel_siginfo *from)
3458 struct compat_siginfo new;
3460 copy_siginfo_to_external32(&new, from);
3461 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3466 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3467 const struct compat_siginfo *from)
3470 to->si_signo = from->si_signo;
3471 to->si_errno = from->si_errno;
3472 to->si_code = from->si_code;
3473 switch(siginfo_layout(from->si_signo, from->si_code)) {
3475 to->si_pid = from->si_pid;
3476 to->si_uid = from->si_uid;
3479 to->si_tid = from->si_tid;
3480 to->si_overrun = from->si_overrun;
3481 to->si_int = from->si_int;
3484 to->si_band = from->si_band;
3485 to->si_fd = from->si_fd;
3488 to->si_addr = compat_ptr(from->si_addr);
3490 case SIL_FAULT_TRAPNO:
3491 to->si_addr = compat_ptr(from->si_addr);
3492 to->si_trapno = from->si_trapno;
3494 case SIL_FAULT_MCEERR:
3495 to->si_addr = compat_ptr(from->si_addr);
3496 to->si_addr_lsb = from->si_addr_lsb;
3498 case SIL_FAULT_BNDERR:
3499 to->si_addr = compat_ptr(from->si_addr);
3500 to->si_lower = compat_ptr(from->si_lower);
3501 to->si_upper = compat_ptr(from->si_upper);
3503 case SIL_FAULT_PKUERR:
3504 to->si_addr = compat_ptr(from->si_addr);
3505 to->si_pkey = from->si_pkey;
3507 case SIL_FAULT_PERF_EVENT:
3508 to->si_addr = compat_ptr(from->si_addr);
3509 to->si_perf_data = from->si_perf_data;
3510 to->si_perf_type = from->si_perf_type;
3513 to->si_pid = from->si_pid;
3514 to->si_uid = from->si_uid;
3515 to->si_status = from->si_status;
3516 #ifdef CONFIG_X86_X32_ABI
3517 if (in_x32_syscall()) {
3518 to->si_utime = from->_sifields._sigchld_x32._utime;
3519 to->si_stime = from->_sifields._sigchld_x32._stime;
3523 to->si_utime = from->si_utime;
3524 to->si_stime = from->si_stime;
3528 to->si_pid = from->si_pid;
3529 to->si_uid = from->si_uid;
3530 to->si_int = from->si_int;
3533 to->si_call_addr = compat_ptr(from->si_call_addr);
3534 to->si_syscall = from->si_syscall;
3535 to->si_arch = from->si_arch;
3541 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3542 const struct compat_siginfo __user *ufrom)
3544 struct compat_siginfo from;
3546 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3549 from.si_signo = signo;
3550 return post_copy_siginfo_from_user32(to, &from);
3553 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3554 const struct compat_siginfo __user *ufrom)
3556 struct compat_siginfo from;
3558 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3561 return post_copy_siginfo_from_user32(to, &from);
3563 #endif /* CONFIG_COMPAT */
3566 * do_sigtimedwait - wait for queued signals specified in @which
3567 * @which: queued signals to wait for
3568 * @info: if non-null, the signal's siginfo is returned here
3569 * @ts: upper bound on process time suspension
3571 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3572 const struct timespec64 *ts)
3574 ktime_t *to = NULL, timeout = KTIME_MAX;
3575 struct task_struct *tsk = current;
3576 sigset_t mask = *which;
3580 if (!timespec64_valid(ts))
3582 timeout = timespec64_to_ktime(*ts);
3587 * Invert the set of allowed signals to get those we want to block.
3589 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3592 spin_lock_irq(&tsk->sighand->siglock);
3593 sig = dequeue_signal(tsk, &mask, info);
3594 if (!sig && timeout) {
3596 * None ready, temporarily unblock those we're interested
3597 * while we are sleeping in so that we'll be awakened when
3598 * they arrive. Unblocking is always fine, we can avoid
3599 * set_current_blocked().
3601 tsk->real_blocked = tsk->blocked;
3602 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3603 recalc_sigpending();
3604 spin_unlock_irq(&tsk->sighand->siglock);
3606 __set_current_state(TASK_INTERRUPTIBLE);
3607 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3609 spin_lock_irq(&tsk->sighand->siglock);
3610 __set_task_blocked(tsk, &tsk->real_blocked);
3611 sigemptyset(&tsk->real_blocked);
3612 sig = dequeue_signal(tsk, &mask, info);
3614 spin_unlock_irq(&tsk->sighand->siglock);
3618 return ret ? -EINTR : -EAGAIN;
3622 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3624 * @uthese: queued signals to wait for
3625 * @uinfo: if non-null, the signal's siginfo is returned here
3626 * @uts: upper bound on process time suspension
3627 * @sigsetsize: size of sigset_t type
3629 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3630 siginfo_t __user *, uinfo,
3631 const struct __kernel_timespec __user *, uts,
3635 struct timespec64 ts;
3636 kernel_siginfo_t info;
3639 /* XXX: Don't preclude handling different sized sigset_t's. */
3640 if (sigsetsize != sizeof(sigset_t))
3643 if (copy_from_user(&these, uthese, sizeof(these)))
3647 if (get_timespec64(&ts, uts))
3651 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3653 if (ret > 0 && uinfo) {
3654 if (copy_siginfo_to_user(uinfo, &info))
3661 #ifdef CONFIG_COMPAT_32BIT_TIME
3662 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3663 siginfo_t __user *, uinfo,
3664 const struct old_timespec32 __user *, uts,
3668 struct timespec64 ts;
3669 kernel_siginfo_t info;
3672 if (sigsetsize != sizeof(sigset_t))
3675 if (copy_from_user(&these, uthese, sizeof(these)))
3679 if (get_old_timespec32(&ts, uts))
3683 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3685 if (ret > 0 && uinfo) {
3686 if (copy_siginfo_to_user(uinfo, &info))
3694 #ifdef CONFIG_COMPAT
3695 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3696 struct compat_siginfo __user *, uinfo,
3697 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3700 struct timespec64 t;
3701 kernel_siginfo_t info;
3704 if (sigsetsize != sizeof(sigset_t))
3707 if (get_compat_sigset(&s, uthese))
3711 if (get_timespec64(&t, uts))
3715 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3717 if (ret > 0 && uinfo) {
3718 if (copy_siginfo_to_user32(uinfo, &info))
3725 #ifdef CONFIG_COMPAT_32BIT_TIME
3726 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3727 struct compat_siginfo __user *, uinfo,
3728 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3731 struct timespec64 t;
3732 kernel_siginfo_t info;
3735 if (sigsetsize != sizeof(sigset_t))
3738 if (get_compat_sigset(&s, uthese))
3742 if (get_old_timespec32(&t, uts))
3746 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3748 if (ret > 0 && uinfo) {
3749 if (copy_siginfo_to_user32(uinfo, &info))
3758 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3760 clear_siginfo(info);
3761 info->si_signo = sig;
3763 info->si_code = SI_USER;
3764 info->si_pid = task_tgid_vnr(current);
3765 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3769 * sys_kill - send a signal to a process
3770 * @pid: the PID of the process
3771 * @sig: signal to be sent
3773 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3775 struct kernel_siginfo info;
3777 prepare_kill_siginfo(sig, &info);
3779 return kill_something_info(sig, &info, pid);
3783 * Verify that the signaler and signalee either are in the same pid namespace
3784 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3787 static bool access_pidfd_pidns(struct pid *pid)
3789 struct pid_namespace *active = task_active_pid_ns(current);
3790 struct pid_namespace *p = ns_of_pid(pid);
3803 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3804 siginfo_t __user *info)
3806 #ifdef CONFIG_COMPAT
3808 * Avoid hooking up compat syscalls and instead handle necessary
3809 * conversions here. Note, this is a stop-gap measure and should not be
3810 * considered a generic solution.
3812 if (in_compat_syscall())
3813 return copy_siginfo_from_user32(
3814 kinfo, (struct compat_siginfo __user *)info);
3816 return copy_siginfo_from_user(kinfo, info);
3819 static struct pid *pidfd_to_pid(const struct file *file)
3823 pid = pidfd_pid(file);
3827 return tgid_pidfd_to_pid(file);
3831 * sys_pidfd_send_signal - Signal a process through a pidfd
3832 * @pidfd: file descriptor of the process
3833 * @sig: signal to send
3834 * @info: signal info
3835 * @flags: future flags
3837 * The syscall currently only signals via PIDTYPE_PID which covers
3838 * kill(<positive-pid>, <signal>. It does not signal threads or process
3840 * In order to extend the syscall to threads and process groups the @flags
3841 * argument should be used. In essence, the @flags argument will determine
3842 * what is signaled and not the file descriptor itself. Put in other words,
3843 * grouping is a property of the flags argument not a property of the file
3846 * Return: 0 on success, negative errno on failure
3848 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3849 siginfo_t __user *, info, unsigned int, flags)
3854 kernel_siginfo_t kinfo;
3856 /* Enforce flags be set to 0 until we add an extension. */
3864 /* Is this a pidfd? */
3865 pid = pidfd_to_pid(f.file);
3872 if (!access_pidfd_pidns(pid))
3876 ret = copy_siginfo_from_user_any(&kinfo, info);
3881 if (unlikely(sig != kinfo.si_signo))
3884 /* Only allow sending arbitrary signals to yourself. */
3886 if ((task_pid(current) != pid) &&
3887 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3890 prepare_kill_siginfo(sig, &kinfo);
3893 ret = kill_pid_info(sig, &kinfo, pid);
3901 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3903 struct task_struct *p;
3907 p = find_task_by_vpid(pid);
3908 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3909 error = check_kill_permission(sig, info, p);
3911 * The null signal is a permissions and process existence
3912 * probe. No signal is actually delivered.
3914 if (!error && sig) {
3915 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3917 * If lock_task_sighand() failed we pretend the task
3918 * dies after receiving the signal. The window is tiny,
3919 * and the signal is private anyway.
3921 if (unlikely(error == -ESRCH))
3930 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3932 struct kernel_siginfo info;
3934 clear_siginfo(&info);
3935 info.si_signo = sig;
3937 info.si_code = SI_TKILL;
3938 info.si_pid = task_tgid_vnr(current);
3939 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3941 return do_send_specific(tgid, pid, sig, &info);
3945 * sys_tgkill - send signal to one specific thread
3946 * @tgid: the thread group ID of the thread
3947 * @pid: the PID of the thread
3948 * @sig: signal to be sent
3950 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3951 * exists but it's not belonging to the target process anymore. This
3952 * method solves the problem of threads exiting and PIDs getting reused.
3954 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3956 /* This is only valid for single tasks */
3957 if (pid <= 0 || tgid <= 0)
3960 return do_tkill(tgid, pid, sig);
3964 * sys_tkill - send signal to one specific task
3965 * @pid: the PID of the task
3966 * @sig: signal to be sent
3968 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3970 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3972 /* This is only valid for single tasks */
3976 return do_tkill(0, pid, sig);
3979 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3981 /* Not even root can pretend to send signals from the kernel.
3982 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3984 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3985 (task_pid_vnr(current) != pid))
3988 /* POSIX.1b doesn't mention process groups. */
3989 return kill_proc_info(sig, info, pid);
3993 * sys_rt_sigqueueinfo - send signal information to a signal
3994 * @pid: the PID of the thread
3995 * @sig: signal to be sent
3996 * @uinfo: signal info to be sent
3998 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3999 siginfo_t __user *, uinfo)
4001 kernel_siginfo_t info;
4002 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4005 return do_rt_sigqueueinfo(pid, sig, &info);
4008 #ifdef CONFIG_COMPAT
4009 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4012 struct compat_siginfo __user *, uinfo)
4014 kernel_siginfo_t info;
4015 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4018 return do_rt_sigqueueinfo(pid, sig, &info);
4022 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4024 /* This is only valid for single tasks */
4025 if (pid <= 0 || tgid <= 0)
4028 /* Not even root can pretend to send signals from the kernel.
4029 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4031 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4032 (task_pid_vnr(current) != pid))
4035 return do_send_specific(tgid, pid, sig, info);
4038 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4039 siginfo_t __user *, uinfo)
4041 kernel_siginfo_t info;
4042 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4045 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4048 #ifdef CONFIG_COMPAT
4049 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4053 struct compat_siginfo __user *, uinfo)
4055 kernel_siginfo_t info;
4056 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4059 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4064 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4066 void kernel_sigaction(int sig, __sighandler_t action)
4068 spin_lock_irq(¤t->sighand->siglock);
4069 current->sighand->action[sig - 1].sa.sa_handler = action;
4070 if (action == SIG_IGN) {
4074 sigaddset(&mask, sig);
4076 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4077 flush_sigqueue_mask(&mask, ¤t->pending);
4078 recalc_sigpending();
4080 spin_unlock_irq(¤t->sighand->siglock);
4082 EXPORT_SYMBOL(kernel_sigaction);
4084 void __weak sigaction_compat_abi(struct k_sigaction *act,
4085 struct k_sigaction *oact)
4089 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4091 struct task_struct *p = current, *t;
4092 struct k_sigaction *k;
4095 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4098 k = &p->sighand->action[sig-1];
4100 spin_lock_irq(&p->sighand->siglock);
4101 if (k->sa.sa_flags & SA_IMMUTABLE) {
4102 spin_unlock_irq(&p->sighand->siglock);
4109 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4110 * e.g. by having an architecture use the bit in their uapi.
4112 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4115 * Clear unknown flag bits in order to allow userspace to detect missing
4116 * support for flag bits and to allow the kernel to use non-uapi bits
4120 act->sa.sa_flags &= UAPI_SA_FLAGS;
4122 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4124 sigaction_compat_abi(act, oact);
4127 sigdelsetmask(&act->sa.sa_mask,
4128 sigmask(SIGKILL) | sigmask(SIGSTOP));
4132 * "Setting a signal action to SIG_IGN for a signal that is
4133 * pending shall cause the pending signal to be discarded,
4134 * whether or not it is blocked."
4136 * "Setting a signal action to SIG_DFL for a signal that is
4137 * pending and whose default action is to ignore the signal
4138 * (for example, SIGCHLD), shall cause the pending signal to
4139 * be discarded, whether or not it is blocked"
4141 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4143 sigaddset(&mask, sig);
4144 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4145 for_each_thread(p, t)
4146 flush_sigqueue_mask(&mask, &t->pending);
4150 spin_unlock_irq(&p->sighand->siglock);
4155 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4158 struct task_struct *t = current;
4161 memset(oss, 0, sizeof(stack_t));
4162 oss->ss_sp = (void __user *) t->sas_ss_sp;
4163 oss->ss_size = t->sas_ss_size;
4164 oss->ss_flags = sas_ss_flags(sp) |
4165 (current->sas_ss_flags & SS_FLAG_BITS);
4169 void __user *ss_sp = ss->ss_sp;
4170 size_t ss_size = ss->ss_size;
4171 unsigned ss_flags = ss->ss_flags;
4174 if (unlikely(on_sig_stack(sp)))
4177 ss_mode = ss_flags & ~SS_FLAG_BITS;
4178 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4182 if (ss_mode == SS_DISABLE) {
4186 if (unlikely(ss_size < min_ss_size))
4190 t->sas_ss_sp = (unsigned long) ss_sp;
4191 t->sas_ss_size = ss_size;
4192 t->sas_ss_flags = ss_flags;
4197 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4201 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4203 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4204 current_user_stack_pointer(),
4206 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4211 int restore_altstack(const stack_t __user *uss)
4214 if (copy_from_user(&new, uss, sizeof(stack_t)))
4216 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4218 /* squash all but EFAULT for now */
4222 int __save_altstack(stack_t __user *uss, unsigned long sp)
4224 struct task_struct *t = current;
4225 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4226 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4227 __put_user(t->sas_ss_size, &uss->ss_size);
4231 #ifdef CONFIG_COMPAT
4232 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4233 compat_stack_t __user *uoss_ptr)
4239 compat_stack_t uss32;
4240 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4242 uss.ss_sp = compat_ptr(uss32.ss_sp);
4243 uss.ss_flags = uss32.ss_flags;
4244 uss.ss_size = uss32.ss_size;
4246 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4247 compat_user_stack_pointer(),
4248 COMPAT_MINSIGSTKSZ);
4249 if (ret >= 0 && uoss_ptr) {
4251 memset(&old, 0, sizeof(old));
4252 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4253 old.ss_flags = uoss.ss_flags;
4254 old.ss_size = uoss.ss_size;
4255 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4261 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4262 const compat_stack_t __user *, uss_ptr,
4263 compat_stack_t __user *, uoss_ptr)
4265 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4268 int compat_restore_altstack(const compat_stack_t __user *uss)
4270 int err = do_compat_sigaltstack(uss, NULL);
4271 /* squash all but -EFAULT for now */
4272 return err == -EFAULT ? err : 0;
4275 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4278 struct task_struct *t = current;
4279 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4281 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4282 __put_user(t->sas_ss_size, &uss->ss_size);
4287 #ifdef __ARCH_WANT_SYS_SIGPENDING
4290 * sys_sigpending - examine pending signals
4291 * @uset: where mask of pending signal is returned
4293 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4297 if (sizeof(old_sigset_t) > sizeof(*uset))
4300 do_sigpending(&set);
4302 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4308 #ifdef CONFIG_COMPAT
4309 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4313 do_sigpending(&set);
4315 return put_user(set.sig[0], set32);
4321 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4323 * sys_sigprocmask - examine and change blocked signals
4324 * @how: whether to add, remove, or set signals
4325 * @nset: signals to add or remove (if non-null)
4326 * @oset: previous value of signal mask if non-null
4328 * Some platforms have their own version with special arguments;
4329 * others support only sys_rt_sigprocmask.
4332 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4333 old_sigset_t __user *, oset)
4335 old_sigset_t old_set, new_set;
4336 sigset_t new_blocked;
4338 old_set = current->blocked.sig[0];
4341 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4344 new_blocked = current->blocked;
4348 sigaddsetmask(&new_blocked, new_set);
4351 sigdelsetmask(&new_blocked, new_set);
4354 new_blocked.sig[0] = new_set;
4360 set_current_blocked(&new_blocked);
4364 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4370 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4372 #ifndef CONFIG_ODD_RT_SIGACTION
4374 * sys_rt_sigaction - alter an action taken by a process
4375 * @sig: signal to be sent
4376 * @act: new sigaction
4377 * @oact: used to save the previous sigaction
4378 * @sigsetsize: size of sigset_t type
4380 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4381 const struct sigaction __user *, act,
4382 struct sigaction __user *, oact,
4385 struct k_sigaction new_sa, old_sa;
4388 /* XXX: Don't preclude handling different sized sigset_t's. */
4389 if (sigsetsize != sizeof(sigset_t))
4392 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4395 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4399 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4404 #ifdef CONFIG_COMPAT
4405 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4406 const struct compat_sigaction __user *, act,
4407 struct compat_sigaction __user *, oact,
4408 compat_size_t, sigsetsize)
4410 struct k_sigaction new_ka, old_ka;
4411 #ifdef __ARCH_HAS_SA_RESTORER
4412 compat_uptr_t restorer;
4416 /* XXX: Don't preclude handling different sized sigset_t's. */
4417 if (sigsetsize != sizeof(compat_sigset_t))
4421 compat_uptr_t handler;
4422 ret = get_user(handler, &act->sa_handler);
4423 new_ka.sa.sa_handler = compat_ptr(handler);
4424 #ifdef __ARCH_HAS_SA_RESTORER
4425 ret |= get_user(restorer, &act->sa_restorer);
4426 new_ka.sa.sa_restorer = compat_ptr(restorer);
4428 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4429 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4434 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4436 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4438 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4439 sizeof(oact->sa_mask));
4440 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4441 #ifdef __ARCH_HAS_SA_RESTORER
4442 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4443 &oact->sa_restorer);
4449 #endif /* !CONFIG_ODD_RT_SIGACTION */
4451 #ifdef CONFIG_OLD_SIGACTION
4452 SYSCALL_DEFINE3(sigaction, int, sig,
4453 const struct old_sigaction __user *, act,
4454 struct old_sigaction __user *, oact)
4456 struct k_sigaction new_ka, old_ka;
4461 if (!access_ok(act, sizeof(*act)) ||
4462 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4463 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4464 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4465 __get_user(mask, &act->sa_mask))
4467 #ifdef __ARCH_HAS_KA_RESTORER
4468 new_ka.ka_restorer = NULL;
4470 siginitset(&new_ka.sa.sa_mask, mask);
4473 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4476 if (!access_ok(oact, sizeof(*oact)) ||
4477 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4478 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4479 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4480 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4487 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4488 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4489 const struct compat_old_sigaction __user *, act,
4490 struct compat_old_sigaction __user *, oact)
4492 struct k_sigaction new_ka, old_ka;
4494 compat_old_sigset_t mask;
4495 compat_uptr_t handler, restorer;
4498 if (!access_ok(act, sizeof(*act)) ||
4499 __get_user(handler, &act->sa_handler) ||
4500 __get_user(restorer, &act->sa_restorer) ||
4501 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4502 __get_user(mask, &act->sa_mask))
4505 #ifdef __ARCH_HAS_KA_RESTORER
4506 new_ka.ka_restorer = NULL;
4508 new_ka.sa.sa_handler = compat_ptr(handler);
4509 new_ka.sa.sa_restorer = compat_ptr(restorer);
4510 siginitset(&new_ka.sa.sa_mask, mask);
4513 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4516 if (!access_ok(oact, sizeof(*oact)) ||
4517 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4518 &oact->sa_handler) ||
4519 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4520 &oact->sa_restorer) ||
4521 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4522 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4529 #ifdef CONFIG_SGETMASK_SYSCALL
4532 * For backwards compatibility. Functionality superseded by sigprocmask.
4534 SYSCALL_DEFINE0(sgetmask)
4537 return current->blocked.sig[0];
4540 SYSCALL_DEFINE1(ssetmask, int, newmask)
4542 int old = current->blocked.sig[0];
4545 siginitset(&newset, newmask);
4546 set_current_blocked(&newset);
4550 #endif /* CONFIG_SGETMASK_SYSCALL */
4552 #ifdef __ARCH_WANT_SYS_SIGNAL
4554 * For backwards compatibility. Functionality superseded by sigaction.
4556 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4558 struct k_sigaction new_sa, old_sa;
4561 new_sa.sa.sa_handler = handler;
4562 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4563 sigemptyset(&new_sa.sa.sa_mask);
4565 ret = do_sigaction(sig, &new_sa, &old_sa);
4567 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4569 #endif /* __ARCH_WANT_SYS_SIGNAL */
4571 #ifdef __ARCH_WANT_SYS_PAUSE
4573 SYSCALL_DEFINE0(pause)
4575 while (!signal_pending(current)) {
4576 __set_current_state(TASK_INTERRUPTIBLE);
4579 return -ERESTARTNOHAND;
4584 static int sigsuspend(sigset_t *set)
4586 current->saved_sigmask = current->blocked;
4587 set_current_blocked(set);
4589 while (!signal_pending(current)) {
4590 __set_current_state(TASK_INTERRUPTIBLE);
4593 set_restore_sigmask();
4594 return -ERESTARTNOHAND;
4598 * sys_rt_sigsuspend - replace the signal mask for a value with the
4599 * @unewset value until a signal is received
4600 * @unewset: new signal mask value
4601 * @sigsetsize: size of sigset_t type
4603 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4607 /* XXX: Don't preclude handling different sized sigset_t's. */
4608 if (sigsetsize != sizeof(sigset_t))
4611 if (copy_from_user(&newset, unewset, sizeof(newset)))
4613 return sigsuspend(&newset);
4616 #ifdef CONFIG_COMPAT
4617 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4621 /* XXX: Don't preclude handling different sized sigset_t's. */
4622 if (sigsetsize != sizeof(sigset_t))
4625 if (get_compat_sigset(&newset, unewset))
4627 return sigsuspend(&newset);
4631 #ifdef CONFIG_OLD_SIGSUSPEND
4632 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4635 siginitset(&blocked, mask);
4636 return sigsuspend(&blocked);
4639 #ifdef CONFIG_OLD_SIGSUSPEND3
4640 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4643 siginitset(&blocked, mask);
4644 return sigsuspend(&blocked);
4648 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4653 static inline void siginfo_buildtime_checks(void)
4655 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4657 /* Verify the offsets in the two siginfos match */
4658 #define CHECK_OFFSET(field) \
4659 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4662 CHECK_OFFSET(si_pid);
4663 CHECK_OFFSET(si_uid);
4666 CHECK_OFFSET(si_tid);
4667 CHECK_OFFSET(si_overrun);
4668 CHECK_OFFSET(si_value);
4671 CHECK_OFFSET(si_pid);
4672 CHECK_OFFSET(si_uid);
4673 CHECK_OFFSET(si_value);
4676 CHECK_OFFSET(si_pid);
4677 CHECK_OFFSET(si_uid);
4678 CHECK_OFFSET(si_status);
4679 CHECK_OFFSET(si_utime);
4680 CHECK_OFFSET(si_stime);
4683 CHECK_OFFSET(si_addr);
4684 CHECK_OFFSET(si_trapno);
4685 CHECK_OFFSET(si_addr_lsb);
4686 CHECK_OFFSET(si_lower);
4687 CHECK_OFFSET(si_upper);
4688 CHECK_OFFSET(si_pkey);
4689 CHECK_OFFSET(si_perf_data);
4690 CHECK_OFFSET(si_perf_type);
4693 CHECK_OFFSET(si_band);
4694 CHECK_OFFSET(si_fd);
4697 CHECK_OFFSET(si_call_addr);
4698 CHECK_OFFSET(si_syscall);
4699 CHECK_OFFSET(si_arch);
4703 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4704 offsetof(struct siginfo, si_addr));
4705 if (sizeof(int) == sizeof(void __user *)) {
4706 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4707 sizeof(void __user *));
4709 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4710 sizeof_field(struct siginfo, si_uid)) !=
4711 sizeof(void __user *));
4712 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4713 offsetof(struct siginfo, si_uid));
4715 #ifdef CONFIG_COMPAT
4716 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4717 offsetof(struct compat_siginfo, si_addr));
4718 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4719 sizeof(compat_uptr_t));
4720 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4721 sizeof_field(struct siginfo, si_pid));
4725 void __init signals_init(void)
4727 siginfo_buildtime_checks();
4729 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4732 #ifdef CONFIG_KGDB_KDB
4733 #include <linux/kdb.h>
4735 * kdb_send_sig - Allows kdb to send signals without exposing
4736 * signal internals. This function checks if the required locks are
4737 * available before calling the main signal code, to avoid kdb
4740 void kdb_send_sig(struct task_struct *t, int sig)
4742 static struct task_struct *kdb_prev_t;
4744 if (!spin_trylock(&t->sighand->siglock)) {
4745 kdb_printf("Can't do kill command now.\n"
4746 "The sigmask lock is held somewhere else in "
4747 "kernel, try again later\n");
4750 new_t = kdb_prev_t != t;
4752 if (!task_is_running(t) && new_t) {
4753 spin_unlock(&t->sighand->siglock);
4754 kdb_printf("Process is not RUNNING, sending a signal from "
4755 "kdb risks deadlock\n"
4756 "on the run queue locks. "
4757 "The signal has _not_ been sent.\n"
4758 "Reissue the kill command if you want to risk "
4762 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4763 spin_unlock(&t->sighand->siglock);
4765 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4768 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4770 #endif /* CONFIG_KGDB_KDB */