2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
43 #include <linux/livepatch.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/signal.h>
48 #include <asm/param.h>
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/siginfo.h>
52 #include <asm/cacheflush.h>
53 #include "audit.h" /* audit_signal_info() */
56 * SLAB caches for signal bits.
59 static struct kmem_cache *sigqueue_cachep;
61 int print_fatal_signals __read_mostly;
63 static void __user *sig_handler(struct task_struct *t, int sig)
65 return t->sighand->action[sig - 1].sa.sa_handler;
68 static inline bool sig_handler_ignored(void __user *handler, int sig)
70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
75 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79 handler = sig_handler(t, sig);
81 /* SIGKILL and SIGSTOP may not be sent to the global init */
82 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
85 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
86 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
89 /* Only allow kernel generated signals to this kthread */
90 if (unlikely((t->flags & PF_KTHREAD) &&
91 (handler == SIG_KTHREAD_KERNEL) && !force))
94 return sig_handler_ignored(handler, sig);
97 static bool sig_ignored(struct task_struct *t, int sig, bool force)
100 * Blocked signals are never ignored, since the
101 * signal handler may change by the time it is
104 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
108 * Tracers may want to know about even ignored signal unless it
109 * is SIGKILL which can't be reported anyway but can be ignored
110 * by SIGNAL_UNKILLABLE task.
112 if (t->ptrace && sig != SIGKILL)
115 return sig_task_ignored(t, sig, force);
119 * Re-calculate pending state from the set of locally pending
120 * signals, globally pending signals, and blocked signals.
122 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
127 switch (_NSIG_WORDS) {
129 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
130 ready |= signal->sig[i] &~ blocked->sig[i];
133 case 4: ready = signal->sig[3] &~ blocked->sig[3];
134 ready |= signal->sig[2] &~ blocked->sig[2];
135 ready |= signal->sig[1] &~ blocked->sig[1];
136 ready |= signal->sig[0] &~ blocked->sig[0];
139 case 2: ready = signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
150 static bool recalc_sigpending_tsk(struct task_struct *t)
152 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
153 PENDING(&t->pending, &t->blocked) ||
154 PENDING(&t->signal->shared_pending, &t->blocked)) {
155 set_tsk_thread_flag(t, TIF_SIGPENDING);
160 * We must never clear the flag in another thread, or in current
161 * when it's possible the current syscall is returning -ERESTART*.
162 * So we don't clear it here, and only callers who know they should do.
168 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
169 * This is superfluous when called on current, the wakeup is a harmless no-op.
171 void recalc_sigpending_and_wake(struct task_struct *t)
173 if (recalc_sigpending_tsk(t))
174 signal_wake_up(t, 0);
177 void recalc_sigpending(void)
179 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
180 !klp_patch_pending(current))
181 clear_thread_flag(TIF_SIGPENDING);
185 void calculate_sigpending(void)
187 /* Have any signals or users of TIF_SIGPENDING been delayed
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 spin_unlock_irq(¤t->sighand->siglock);
196 /* Given the mask, find the first available signal that should be serviced. */
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 int next_signal(struct sigpending *pending, sigset_t *mask)
204 unsigned long i, *s, *m, x;
207 s = pending->signal.sig;
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
222 switch (_NSIG_WORDS) {
224 for (i = 1; i < _NSIG_WORDS; ++i) {
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
237 sig = ffz(~x) + _NSIG_BPW + 1;
248 static inline void print_dropped_signal(int sig)
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 if (!print_fatal_signals)
255 if (!__ratelimit(&ratelimit_state))
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
263 * task_set_jobctl_pending - set jobctl pending bits
265 * @mask: pending bits to set
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
274 * Must be called with @task->sighand->siglock held.
277 * %true if @mask is set, %false if made noop because @task was dying.
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 task->jobctl |= mask;
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
305 * Must be called with @task->sighand->siglock held.
307 void task_clear_jobctl_trapping(struct task_struct *task)
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 * task_clear_jobctl_pending - clear jobctl pending bits
319 * @mask: pending bits to clear
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
329 * Must be called with @task->sighand->siglock held.
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 task->jobctl &= ~mask;
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate %SIGNAL_* flags are set.
354 * Must be called with @task->sighand->siglock held.
357 * %true if group stop completion should be notified to the parent, %false
360 static bool task_participate_group_stop(struct task_struct *task)
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 void task_join_group_stop(struct task_struct *task)
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
403 * appropriate lock must be held to stop the target task from exiting
405 static struct sigqueue *
406 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
408 struct sigqueue *q = NULL;
409 struct user_struct *user;
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
416 user = get_uid(__task_cred(t)->user);
417 atomic_inc(&user->sigpending);
420 if (override_rlimit ||
421 atomic_read(&user->sigpending) <=
422 task_rlimit(t, RLIMIT_SIGPENDING)) {
423 q = kmem_cache_alloc(sigqueue_cachep, flags);
425 print_dropped_signal(sig);
428 if (unlikely(q == NULL)) {
429 atomic_dec(&user->sigpending);
432 INIT_LIST_HEAD(&q->list);
440 static void __sigqueue_free(struct sigqueue *q)
442 if (q->flags & SIGQUEUE_PREALLOC)
444 atomic_dec(&q->user->sigpending);
446 kmem_cache_free(sigqueue_cachep, q);
449 void flush_sigqueue(struct sigpending *queue)
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
462 * Flush all pending signals for this kthread.
464 void flush_signals(struct task_struct *t)
468 spin_lock_irqsave(&t->sighand->siglock, flags);
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
475 #ifdef CONFIG_POSIX_TIMERS
476 static void __flush_itimer_signals(struct sigpending *pending)
478 sigset_t signal, retain;
479 struct sigqueue *q, *n;
481 signal = pending->signal;
482 sigemptyset(&retain);
484 list_for_each_entry_safe(q, n, &pending->list, list) {
485 int sig = q->info.si_signo;
487 if (likely(q->info.si_code != SI_TIMER)) {
488 sigaddset(&retain, sig);
490 sigdelset(&signal, sig);
491 list_del_init(&q->list);
496 sigorsets(&pending->signal, &signal, &retain);
499 void flush_itimer_signals(void)
501 struct task_struct *tsk = current;
504 spin_lock_irqsave(&tsk->sighand->siglock, flags);
505 __flush_itimer_signals(&tsk->pending);
506 __flush_itimer_signals(&tsk->signal->shared_pending);
507 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
511 void ignore_signals(struct task_struct *t)
515 for (i = 0; i < _NSIG; ++i)
516 t->sighand->action[i].sa.sa_handler = SIG_IGN;
522 * Flush all handlers for a task.
526 flush_signal_handlers(struct task_struct *t, int force_default)
529 struct k_sigaction *ka = &t->sighand->action[0];
530 for (i = _NSIG ; i != 0 ; i--) {
531 if (force_default || ka->sa.sa_handler != SIG_IGN)
532 ka->sa.sa_handler = SIG_DFL;
534 #ifdef __ARCH_HAS_SA_RESTORER
535 ka->sa.sa_restorer = NULL;
537 sigemptyset(&ka->sa.sa_mask);
542 bool unhandled_signal(struct task_struct *tsk, int sig)
544 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
545 if (is_global_init(tsk))
548 if (handler != SIG_IGN && handler != SIG_DFL)
551 /* if ptraced, let the tracer determine */
555 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
558 struct sigqueue *q, *first = NULL;
561 * Collect the siginfo appropriate to this signal. Check if
562 * there is another siginfo for the same signal.
564 list_for_each_entry(q, &list->list, list) {
565 if (q->info.si_signo == sig) {
572 sigdelset(&list->signal, sig);
576 list_del_init(&first->list);
577 copy_siginfo(info, &first->info);
580 (first->flags & SIGQUEUE_PREALLOC) &&
581 (info->si_code == SI_TIMER) &&
582 (info->si_sys_private);
584 __sigqueue_free(first);
587 * Ok, it wasn't in the queue. This must be
588 * a fast-pathed signal or we must have been
589 * out of queue space. So zero out the info.
592 info->si_signo = sig;
594 info->si_code = SI_USER;
600 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
601 siginfo_t *info, bool *resched_timer)
603 int sig = next_signal(pending, mask);
606 collect_signal(sig, pending, info, resched_timer);
611 * Dequeue a signal and return the element to the caller, which is
612 * expected to free it.
614 * All callers have to hold the siglock.
616 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
618 bool resched_timer = false;
621 /* We only dequeue private signals from ourselves, we don't let
622 * signalfd steal them
624 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
626 signr = __dequeue_signal(&tsk->signal->shared_pending,
627 mask, info, &resched_timer);
628 #ifdef CONFIG_POSIX_TIMERS
632 * itimers are process shared and we restart periodic
633 * itimers in the signal delivery path to prevent DoS
634 * attacks in the high resolution timer case. This is
635 * compliant with the old way of self-restarting
636 * itimers, as the SIGALRM is a legacy signal and only
637 * queued once. Changing the restart behaviour to
638 * restart the timer in the signal dequeue path is
639 * reducing the timer noise on heavy loaded !highres
642 if (unlikely(signr == SIGALRM)) {
643 struct hrtimer *tmr = &tsk->signal->real_timer;
645 if (!hrtimer_is_queued(tmr) &&
646 tsk->signal->it_real_incr != 0) {
647 hrtimer_forward(tmr, tmr->base->get_time(),
648 tsk->signal->it_real_incr);
649 hrtimer_restart(tmr);
659 if (unlikely(sig_kernel_stop(signr))) {
661 * Set a marker that we have dequeued a stop signal. Our
662 * caller might release the siglock and then the pending
663 * stop signal it is about to process is no longer in the
664 * pending bitmasks, but must still be cleared by a SIGCONT
665 * (and overruled by a SIGKILL). So those cases clear this
666 * shared flag after we've set it. Note that this flag may
667 * remain set after the signal we return is ignored or
668 * handled. That doesn't matter because its only purpose
669 * is to alert stop-signal processing code when another
670 * processor has come along and cleared the flag.
672 current->jobctl |= JOBCTL_STOP_DEQUEUED;
674 #ifdef CONFIG_POSIX_TIMERS
677 * Release the siglock to ensure proper locking order
678 * of timer locks outside of siglocks. Note, we leave
679 * irqs disabled here, since the posix-timers code is
680 * about to disable them again anyway.
682 spin_unlock(&tsk->sighand->siglock);
683 posixtimer_rearm(info);
684 spin_lock(&tsk->sighand->siglock);
686 /* Don't expose the si_sys_private value to userspace */
687 info->si_sys_private = 0;
693 static int dequeue_synchronous_signal(siginfo_t *info)
695 struct task_struct *tsk = current;
696 struct sigpending *pending = &tsk->pending;
697 struct sigqueue *q, *sync = NULL;
700 * Might a synchronous signal be in the queue?
702 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
706 * Return the first synchronous signal in the queue.
708 list_for_each_entry(q, &pending->list, list) {
709 /* Synchronous signals have a postive si_code */
710 if ((q->info.si_code > SI_USER) &&
711 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
719 * Check if there is another siginfo for the same signal.
721 list_for_each_entry_continue(q, &pending->list, list) {
722 if (q->info.si_signo == sync->info.si_signo)
726 sigdelset(&pending->signal, sync->info.si_signo);
729 list_del_init(&sync->list);
730 copy_siginfo(info, &sync->info);
731 __sigqueue_free(sync);
732 return info->si_signo;
736 * Tell a process that it has a new active signal..
738 * NOTE! we rely on the previous spin_lock to
739 * lock interrupts for us! We can only be called with
740 * "siglock" held, and the local interrupt must
741 * have been disabled when that got acquired!
743 * No need to set need_resched since signal event passing
744 * goes through ->blocked
746 void signal_wake_up_state(struct task_struct *t, unsigned int state)
748 set_tsk_thread_flag(t, TIF_SIGPENDING);
750 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
751 * case. We don't check t->state here because there is a race with it
752 * executing another processor and just now entering stopped state.
753 * By using wake_up_state, we ensure the process will wake up and
754 * handle its death signal.
756 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
761 * Remove signals in mask from the pending set and queue.
762 * Returns 1 if any signals were found.
764 * All callers must be holding the siglock.
766 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
768 struct sigqueue *q, *n;
771 sigandsets(&m, mask, &s->signal);
772 if (sigisemptyset(&m))
775 sigandnsets(&s->signal, &s->signal, mask);
776 list_for_each_entry_safe(q, n, &s->list, list) {
777 if (sigismember(mask, q->info.si_signo)) {
778 list_del_init(&q->list);
784 static inline int is_si_special(const struct siginfo *info)
786 return info <= SEND_SIG_FORCED;
789 static inline bool si_fromuser(const struct siginfo *info)
791 return info == SEND_SIG_NOINFO ||
792 (!is_si_special(info) && SI_FROMUSER(info));
796 * called with RCU read lock from check_kill_permission()
798 static bool kill_ok_by_cred(struct task_struct *t)
800 const struct cred *cred = current_cred();
801 const struct cred *tcred = __task_cred(t);
803 return uid_eq(cred->euid, tcred->suid) ||
804 uid_eq(cred->euid, tcred->uid) ||
805 uid_eq(cred->uid, tcred->suid) ||
806 uid_eq(cred->uid, tcred->uid) ||
807 ns_capable(tcred->user_ns, CAP_KILL);
811 * Bad permissions for sending the signal
812 * - the caller must hold the RCU read lock
814 static int check_kill_permission(int sig, struct siginfo *info,
815 struct task_struct *t)
820 if (!valid_signal(sig))
823 if (!si_fromuser(info))
826 error = audit_signal_info(sig, t); /* Let audit system see the signal */
830 if (!same_thread_group(current, t) &&
831 !kill_ok_by_cred(t)) {
834 sid = task_session(t);
836 * We don't return the error if sid == NULL. The
837 * task was unhashed, the caller must notice this.
839 if (!sid || sid == task_session(current))
846 return security_task_kill(t, info, sig, NULL);
850 * ptrace_trap_notify - schedule trap to notify ptracer
851 * @t: tracee wanting to notify tracer
853 * This function schedules sticky ptrace trap which is cleared on the next
854 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
857 * If @t is running, STOP trap will be taken. If trapped for STOP and
858 * ptracer is listening for events, tracee is woken up so that it can
859 * re-trap for the new event. If trapped otherwise, STOP trap will be
860 * eventually taken without returning to userland after the existing traps
861 * are finished by PTRACE_CONT.
864 * Must be called with @task->sighand->siglock held.
866 static void ptrace_trap_notify(struct task_struct *t)
868 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
869 assert_spin_locked(&t->sighand->siglock);
871 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
872 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
876 * Handle magic process-wide effects of stop/continue signals. Unlike
877 * the signal actions, these happen immediately at signal-generation
878 * time regardless of blocking, ignoring, or handling. This does the
879 * actual continuing for SIGCONT, but not the actual stopping for stop
880 * signals. The process stop is done as a signal action for SIG_DFL.
882 * Returns true if the signal should be actually delivered, otherwise
883 * it should be dropped.
885 static bool prepare_signal(int sig, struct task_struct *p, bool force)
887 struct signal_struct *signal = p->signal;
888 struct task_struct *t;
891 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
892 if (!(signal->flags & SIGNAL_GROUP_EXIT))
893 return sig == SIGKILL;
895 * The process is in the middle of dying, nothing to do.
897 } else if (sig_kernel_stop(sig)) {
899 * This is a stop signal. Remove SIGCONT from all queues.
901 siginitset(&flush, sigmask(SIGCONT));
902 flush_sigqueue_mask(&flush, &signal->shared_pending);
903 for_each_thread(p, t)
904 flush_sigqueue_mask(&flush, &t->pending);
905 } else if (sig == SIGCONT) {
908 * Remove all stop signals from all queues, wake all threads.
910 siginitset(&flush, SIG_KERNEL_STOP_MASK);
911 flush_sigqueue_mask(&flush, &signal->shared_pending);
912 for_each_thread(p, t) {
913 flush_sigqueue_mask(&flush, &t->pending);
914 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
915 if (likely(!(t->ptrace & PT_SEIZED)))
916 wake_up_state(t, __TASK_STOPPED);
918 ptrace_trap_notify(t);
922 * Notify the parent with CLD_CONTINUED if we were stopped.
924 * If we were in the middle of a group stop, we pretend it
925 * was already finished, and then continued. Since SIGCHLD
926 * doesn't queue we report only CLD_STOPPED, as if the next
927 * CLD_CONTINUED was dropped.
930 if (signal->flags & SIGNAL_STOP_STOPPED)
931 why |= SIGNAL_CLD_CONTINUED;
932 else if (signal->group_stop_count)
933 why |= SIGNAL_CLD_STOPPED;
937 * The first thread which returns from do_signal_stop()
938 * will take ->siglock, notice SIGNAL_CLD_MASK, and
939 * notify its parent. See get_signal_to_deliver().
941 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
942 signal->group_stop_count = 0;
943 signal->group_exit_code = 0;
947 return !sig_ignored(p, sig, force);
951 * Test if P wants to take SIG. After we've checked all threads with this,
952 * it's equivalent to finding no threads not blocking SIG. Any threads not
953 * blocking SIG were ruled out because they are not running and already
954 * have pending signals. Such threads will dequeue from the shared queue
955 * as soon as they're available, so putting the signal on the shared queue
956 * will be equivalent to sending it to one such thread.
958 static inline bool wants_signal(int sig, struct task_struct *p)
960 if (sigismember(&p->blocked, sig))
963 if (p->flags & PF_EXITING)
969 if (task_is_stopped_or_traced(p))
972 return task_curr(p) || !signal_pending(p);
975 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
977 struct signal_struct *signal = p->signal;
978 struct task_struct *t;
981 * Now find a thread we can wake up to take the signal off the queue.
983 * If the main thread wants the signal, it gets first crack.
984 * Probably the least surprising to the average bear.
986 if (wants_signal(sig, p))
988 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
990 * There is just one thread and it does not need to be woken.
991 * It will dequeue unblocked signals before it runs again.
996 * Otherwise try to find a suitable thread.
998 t = signal->curr_target;
999 while (!wants_signal(sig, t)) {
1001 if (t == signal->curr_target)
1003 * No thread needs to be woken.
1004 * Any eligible threads will see
1005 * the signal in the queue soon.
1009 signal->curr_target = t;
1013 * Found a killable thread. If the signal will be fatal,
1014 * then start taking the whole group down immediately.
1016 if (sig_fatal(p, sig) &&
1017 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1018 !sigismember(&t->real_blocked, sig) &&
1019 (sig == SIGKILL || !p->ptrace)) {
1021 * This signal will be fatal to the whole group.
1023 if (!sig_kernel_coredump(sig)) {
1025 * Start a group exit and wake everybody up.
1026 * This way we don't have other threads
1027 * running and doing things after a slower
1028 * thread has the fatal signal pending.
1030 signal->flags = SIGNAL_GROUP_EXIT;
1031 signal->group_exit_code = sig;
1032 signal->group_stop_count = 0;
1035 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1036 sigaddset(&t->pending.signal, SIGKILL);
1037 signal_wake_up(t, 1);
1038 } while_each_thread(p, t);
1044 * The signal is already in the shared-pending queue.
1045 * Tell the chosen thread to wake up and dequeue it.
1047 signal_wake_up(t, sig == SIGKILL);
1051 static inline bool legacy_queue(struct sigpending *signals, int sig)
1053 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1056 #ifdef CONFIG_USER_NS
1057 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1059 if (current_user_ns() == task_cred_xxx(t, user_ns))
1062 if (SI_FROMKERNEL(info))
1066 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1067 make_kuid(current_user_ns(), info->si_uid));
1071 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1077 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1078 enum pid_type type, int from_ancestor_ns)
1080 struct sigpending *pending;
1082 int override_rlimit;
1083 int ret = 0, result;
1085 assert_spin_locked(&t->sighand->siglock);
1087 result = TRACE_SIGNAL_IGNORED;
1088 if (!prepare_signal(sig, t,
1089 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1092 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1094 * Short-circuit ignored signals and support queuing
1095 * exactly one non-rt signal, so that we can get more
1096 * detailed information about the cause of the signal.
1098 result = TRACE_SIGNAL_ALREADY_PENDING;
1099 if (legacy_queue(pending, sig))
1102 result = TRACE_SIGNAL_DELIVERED;
1104 * fast-pathed signals for kernel-internal things like SIGSTOP
1107 if (info == SEND_SIG_FORCED)
1111 * Real-time signals must be queued if sent by sigqueue, or
1112 * some other real-time mechanism. It is implementation
1113 * defined whether kill() does so. We attempt to do so, on
1114 * the principle of least surprise, but since kill is not
1115 * allowed to fail with EAGAIN when low on memory we just
1116 * make sure at least one signal gets delivered and don't
1117 * pass on the info struct.
1120 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1122 override_rlimit = 0;
1124 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1126 list_add_tail(&q->list, &pending->list);
1127 switch ((unsigned long) info) {
1128 case (unsigned long) SEND_SIG_NOINFO:
1129 clear_siginfo(&q->info);
1130 q->info.si_signo = sig;
1131 q->info.si_errno = 0;
1132 q->info.si_code = SI_USER;
1133 q->info.si_pid = task_tgid_nr_ns(current,
1134 task_active_pid_ns(t));
1135 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1137 case (unsigned long) SEND_SIG_PRIV:
1138 clear_siginfo(&q->info);
1139 q->info.si_signo = sig;
1140 q->info.si_errno = 0;
1141 q->info.si_code = SI_KERNEL;
1146 copy_siginfo(&q->info, info);
1147 if (from_ancestor_ns)
1152 userns_fixup_signal_uid(&q->info, t);
1154 } else if (!is_si_special(info)) {
1155 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1157 * Queue overflow, abort. We may abort if the
1158 * signal was rt and sent by user using something
1159 * other than kill().
1161 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1166 * This is a silent loss of information. We still
1167 * send the signal, but the *info bits are lost.
1169 result = TRACE_SIGNAL_LOSE_INFO;
1174 signalfd_notify(t, sig);
1175 sigaddset(&pending->signal, sig);
1177 /* Let multiprocess signals appear after on-going forks */
1178 if (type > PIDTYPE_TGID) {
1179 struct multiprocess_signals *delayed;
1180 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1181 sigset_t *signal = &delayed->signal;
1182 /* Can't queue both a stop and a continue signal */
1184 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1185 else if (sig_kernel_stop(sig))
1186 sigdelset(signal, SIGCONT);
1187 sigaddset(signal, sig);
1191 complete_signal(sig, t, type);
1193 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1197 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1200 int from_ancestor_ns = 0;
1202 #ifdef CONFIG_PID_NS
1203 from_ancestor_ns = si_fromuser(info) &&
1204 !task_pid_nr_ns(current, task_active_pid_ns(t));
1207 return __send_signal(sig, info, t, type, from_ancestor_ns);
1210 static void print_fatal_signal(int signr)
1212 struct pt_regs *regs = signal_pt_regs();
1213 pr_info("potentially unexpected fatal signal %d.\n", signr);
1215 #if defined(__i386__) && !defined(__arch_um__)
1216 pr_info("code at %08lx: ", regs->ip);
1219 for (i = 0; i < 16; i++) {
1222 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1224 pr_cont("%02x ", insn);
1234 static int __init setup_print_fatal_signals(char *str)
1236 get_option (&str, &print_fatal_signals);
1241 __setup("print-fatal-signals=", setup_print_fatal_signals);
1244 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1246 return send_signal(sig, info, p, PIDTYPE_TGID);
1250 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1252 return send_signal(sig, info, t, PIDTYPE_PID);
1255 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1258 unsigned long flags;
1261 if (lock_task_sighand(p, &flags)) {
1262 ret = send_signal(sig, info, p, type);
1263 unlock_task_sighand(p, &flags);
1270 * Force a signal that the process can't ignore: if necessary
1271 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1273 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1274 * since we do not want to have a signal handler that was blocked
1275 * be invoked when user space had explicitly blocked it.
1277 * We don't want to have recursive SIGSEGV's etc, for example,
1278 * that is why we also clear SIGNAL_UNKILLABLE.
1281 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1283 unsigned long int flags;
1284 int ret, blocked, ignored;
1285 struct k_sigaction *action;
1287 spin_lock_irqsave(&t->sighand->siglock, flags);
1288 action = &t->sighand->action[sig-1];
1289 ignored = action->sa.sa_handler == SIG_IGN;
1290 blocked = sigismember(&t->blocked, sig);
1291 if (blocked || ignored) {
1292 action->sa.sa_handler = SIG_DFL;
1294 sigdelset(&t->blocked, sig);
1295 recalc_sigpending_and_wake(t);
1299 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1300 * debugging to leave init killable.
1302 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1303 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1304 ret = specific_send_sig_info(sig, info, t);
1305 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1311 * Nuke all other threads in the group.
1313 int zap_other_threads(struct task_struct *p)
1315 struct task_struct *t = p;
1318 p->signal->group_stop_count = 0;
1320 while_each_thread(p, t) {
1321 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1324 /* Don't bother with already dead threads */
1327 sigaddset(&t->pending.signal, SIGKILL);
1328 signal_wake_up(t, 1);
1334 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1335 unsigned long *flags)
1337 struct sighand_struct *sighand;
1341 sighand = rcu_dereference(tsk->sighand);
1342 if (unlikely(sighand == NULL))
1346 * This sighand can be already freed and even reused, but
1347 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1348 * initializes ->siglock: this slab can't go away, it has
1349 * the same object type, ->siglock can't be reinitialized.
1351 * We need to ensure that tsk->sighand is still the same
1352 * after we take the lock, we can race with de_thread() or
1353 * __exit_signal(). In the latter case the next iteration
1354 * must see ->sighand == NULL.
1356 spin_lock_irqsave(&sighand->siglock, *flags);
1357 if (likely(sighand == tsk->sighand))
1359 spin_unlock_irqrestore(&sighand->siglock, *flags);
1367 * send signal info to all the members of a group
1369 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1375 ret = check_kill_permission(sig, info, p);
1379 ret = do_send_sig_info(sig, info, p, type);
1385 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1386 * control characters do (^C, ^Z etc)
1387 * - the caller must hold at least a readlock on tasklist_lock
1389 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1391 struct task_struct *p = NULL;
1392 int retval, success;
1396 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1397 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1400 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1401 return success ? 0 : retval;
1404 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1407 struct task_struct *p;
1411 p = pid_task(pid, PIDTYPE_PID);
1413 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1415 if (likely(!p || error != -ESRCH))
1419 * The task was unhashed in between, try again. If it
1420 * is dead, pid_task() will return NULL, if we race with
1421 * de_thread() it will find the new leader.
1426 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1430 error = kill_pid_info(sig, info, find_vpid(pid));
1435 static inline bool kill_as_cred_perm(const struct cred *cred,
1436 struct task_struct *target)
1438 const struct cred *pcred = __task_cred(target);
1440 return uid_eq(cred->euid, pcred->suid) ||
1441 uid_eq(cred->euid, pcred->uid) ||
1442 uid_eq(cred->uid, pcred->suid) ||
1443 uid_eq(cred->uid, pcred->uid);
1446 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1447 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1448 const struct cred *cred)
1451 struct task_struct *p;
1452 unsigned long flags;
1454 if (!valid_signal(sig))
1458 p = pid_task(pid, PIDTYPE_PID);
1463 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1467 ret = security_task_kill(p, info, sig, cred);
1472 if (lock_task_sighand(p, &flags)) {
1473 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1474 unlock_task_sighand(p, &flags);
1482 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1485 * kill_something_info() interprets pid in interesting ways just like kill(2).
1487 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1488 * is probably wrong. Should make it like BSD or SYSV.
1491 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1497 ret = kill_pid_info(sig, info, find_vpid(pid));
1502 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1506 read_lock(&tasklist_lock);
1508 ret = __kill_pgrp_info(sig, info,
1509 pid ? find_vpid(-pid) : task_pgrp(current));
1511 int retval = 0, count = 0;
1512 struct task_struct * p;
1514 for_each_process(p) {
1515 if (task_pid_vnr(p) > 1 &&
1516 !same_thread_group(p, current)) {
1517 int err = group_send_sig_info(sig, info, p,
1524 ret = count ? retval : -ESRCH;
1526 read_unlock(&tasklist_lock);
1532 * These are for backward compatibility with the rest of the kernel source.
1535 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1538 * Make sure legacy kernel users don't send in bad values
1539 * (normal paths check this in check_kill_permission).
1541 if (!valid_signal(sig))
1544 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1547 #define __si_special(priv) \
1548 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1551 send_sig(int sig, struct task_struct *p, int priv)
1553 return send_sig_info(sig, __si_special(priv), p);
1556 void force_sig(int sig, struct task_struct *p)
1558 force_sig_info(sig, SEND_SIG_PRIV, p);
1562 * When things go south during signal handling, we
1563 * will force a SIGSEGV. And if the signal that caused
1564 * the problem was already a SIGSEGV, we'll want to
1565 * make sure we don't even try to deliver the signal..
1567 void force_sigsegv(int sig, struct task_struct *p)
1569 if (sig == SIGSEGV) {
1570 unsigned long flags;
1571 spin_lock_irqsave(&p->sighand->siglock, flags);
1572 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1573 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1575 force_sig(SIGSEGV, p);
1578 int force_sig_fault(int sig, int code, void __user *addr
1579 ___ARCH_SI_TRAPNO(int trapno)
1580 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1581 , struct task_struct *t)
1583 struct siginfo info;
1585 clear_siginfo(&info);
1586 info.si_signo = sig;
1588 info.si_code = code;
1589 info.si_addr = addr;
1590 #ifdef __ARCH_SI_TRAPNO
1591 info.si_trapno = trapno;
1595 info.si_flags = flags;
1598 return force_sig_info(info.si_signo, &info, t);
1601 int send_sig_fault(int sig, int code, void __user *addr
1602 ___ARCH_SI_TRAPNO(int trapno)
1603 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1604 , struct task_struct *t)
1606 struct siginfo info;
1608 clear_siginfo(&info);
1609 info.si_signo = sig;
1611 info.si_code = code;
1612 info.si_addr = addr;
1613 #ifdef __ARCH_SI_TRAPNO
1614 info.si_trapno = trapno;
1618 info.si_flags = flags;
1621 return send_sig_info(info.si_signo, &info, t);
1624 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1626 struct siginfo info;
1628 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1629 clear_siginfo(&info);
1630 info.si_signo = SIGBUS;
1632 info.si_code = code;
1633 info.si_addr = addr;
1634 info.si_addr_lsb = lsb;
1635 return force_sig_info(info.si_signo, &info, t);
1638 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1640 struct siginfo info;
1642 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1643 clear_siginfo(&info);
1644 info.si_signo = SIGBUS;
1646 info.si_code = code;
1647 info.si_addr = addr;
1648 info.si_addr_lsb = lsb;
1649 return send_sig_info(info.si_signo, &info, t);
1651 EXPORT_SYMBOL(send_sig_mceerr);
1653 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1655 struct siginfo info;
1657 clear_siginfo(&info);
1658 info.si_signo = SIGSEGV;
1660 info.si_code = SEGV_BNDERR;
1661 info.si_addr = addr;
1662 info.si_lower = lower;
1663 info.si_upper = upper;
1664 return force_sig_info(info.si_signo, &info, current);
1668 int force_sig_pkuerr(void __user *addr, u32 pkey)
1670 struct siginfo info;
1672 clear_siginfo(&info);
1673 info.si_signo = SIGSEGV;
1675 info.si_code = SEGV_PKUERR;
1676 info.si_addr = addr;
1677 info.si_pkey = pkey;
1678 return force_sig_info(info.si_signo, &info, current);
1682 /* For the crazy architectures that include trap information in
1683 * the errno field, instead of an actual errno value.
1685 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1687 struct siginfo info;
1689 clear_siginfo(&info);
1690 info.si_signo = SIGTRAP;
1691 info.si_errno = errno;
1692 info.si_code = TRAP_HWBKPT;
1693 info.si_addr = addr;
1694 return force_sig_info(info.si_signo, &info, current);
1697 int kill_pgrp(struct pid *pid, int sig, int priv)
1701 read_lock(&tasklist_lock);
1702 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1703 read_unlock(&tasklist_lock);
1707 EXPORT_SYMBOL(kill_pgrp);
1709 int kill_pid(struct pid *pid, int sig, int priv)
1711 return kill_pid_info(sig, __si_special(priv), pid);
1713 EXPORT_SYMBOL(kill_pid);
1716 * These functions support sending signals using preallocated sigqueue
1717 * structures. This is needed "because realtime applications cannot
1718 * afford to lose notifications of asynchronous events, like timer
1719 * expirations or I/O completions". In the case of POSIX Timers
1720 * we allocate the sigqueue structure from the timer_create. If this
1721 * allocation fails we are able to report the failure to the application
1722 * with an EAGAIN error.
1724 struct sigqueue *sigqueue_alloc(void)
1726 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1729 q->flags |= SIGQUEUE_PREALLOC;
1734 void sigqueue_free(struct sigqueue *q)
1736 unsigned long flags;
1737 spinlock_t *lock = ¤t->sighand->siglock;
1739 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1741 * We must hold ->siglock while testing q->list
1742 * to serialize with collect_signal() or with
1743 * __exit_signal()->flush_sigqueue().
1745 spin_lock_irqsave(lock, flags);
1746 q->flags &= ~SIGQUEUE_PREALLOC;
1748 * If it is queued it will be freed when dequeued,
1749 * like the "regular" sigqueue.
1751 if (!list_empty(&q->list))
1753 spin_unlock_irqrestore(lock, flags);
1759 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1761 int sig = q->info.si_signo;
1762 struct sigpending *pending;
1763 struct task_struct *t;
1764 unsigned long flags;
1767 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1771 t = pid_task(pid, type);
1772 if (!t || !likely(lock_task_sighand(t, &flags)))
1775 ret = 1; /* the signal is ignored */
1776 result = TRACE_SIGNAL_IGNORED;
1777 if (!prepare_signal(sig, t, false))
1781 if (unlikely(!list_empty(&q->list))) {
1783 * If an SI_TIMER entry is already queue just increment
1784 * the overrun count.
1786 BUG_ON(q->info.si_code != SI_TIMER);
1787 q->info.si_overrun++;
1788 result = TRACE_SIGNAL_ALREADY_PENDING;
1791 q->info.si_overrun = 0;
1793 signalfd_notify(t, sig);
1794 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1795 list_add_tail(&q->list, &pending->list);
1796 sigaddset(&pending->signal, sig);
1797 complete_signal(sig, t, type);
1798 result = TRACE_SIGNAL_DELIVERED;
1800 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1801 unlock_task_sighand(t, &flags);
1808 * Let a parent know about the death of a child.
1809 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1811 * Returns true if our parent ignored us and so we've switched to
1814 bool do_notify_parent(struct task_struct *tsk, int sig)
1816 struct siginfo info;
1817 unsigned long flags;
1818 struct sighand_struct *psig;
1819 bool autoreap = false;
1824 /* do_notify_parent_cldstop should have been called instead. */
1825 BUG_ON(task_is_stopped_or_traced(tsk));
1827 BUG_ON(!tsk->ptrace &&
1828 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1830 if (sig != SIGCHLD) {
1832 * This is only possible if parent == real_parent.
1833 * Check if it has changed security domain.
1835 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1839 clear_siginfo(&info);
1840 info.si_signo = sig;
1843 * We are under tasklist_lock here so our parent is tied to
1844 * us and cannot change.
1846 * task_active_pid_ns will always return the same pid namespace
1847 * until a task passes through release_task.
1849 * write_lock() currently calls preempt_disable() which is the
1850 * same as rcu_read_lock(), but according to Oleg, this is not
1851 * correct to rely on this
1854 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1855 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1859 task_cputime(tsk, &utime, &stime);
1860 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1861 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1863 info.si_status = tsk->exit_code & 0x7f;
1864 if (tsk->exit_code & 0x80)
1865 info.si_code = CLD_DUMPED;
1866 else if (tsk->exit_code & 0x7f)
1867 info.si_code = CLD_KILLED;
1869 info.si_code = CLD_EXITED;
1870 info.si_status = tsk->exit_code >> 8;
1873 psig = tsk->parent->sighand;
1874 spin_lock_irqsave(&psig->siglock, flags);
1875 if (!tsk->ptrace && sig == SIGCHLD &&
1876 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1877 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1879 * We are exiting and our parent doesn't care. POSIX.1
1880 * defines special semantics for setting SIGCHLD to SIG_IGN
1881 * or setting the SA_NOCLDWAIT flag: we should be reaped
1882 * automatically and not left for our parent's wait4 call.
1883 * Rather than having the parent do it as a magic kind of
1884 * signal handler, we just set this to tell do_exit that we
1885 * can be cleaned up without becoming a zombie. Note that
1886 * we still call __wake_up_parent in this case, because a
1887 * blocked sys_wait4 might now return -ECHILD.
1889 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1890 * is implementation-defined: we do (if you don't want
1891 * it, just use SIG_IGN instead).
1894 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1897 if (valid_signal(sig) && sig)
1898 __group_send_sig_info(sig, &info, tsk->parent);
1899 __wake_up_parent(tsk, tsk->parent);
1900 spin_unlock_irqrestore(&psig->siglock, flags);
1906 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1907 * @tsk: task reporting the state change
1908 * @for_ptracer: the notification is for ptracer
1909 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1911 * Notify @tsk's parent that the stopped/continued state has changed. If
1912 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1913 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1916 * Must be called with tasklist_lock at least read locked.
1918 static void do_notify_parent_cldstop(struct task_struct *tsk,
1919 bool for_ptracer, int why)
1921 struct siginfo info;
1922 unsigned long flags;
1923 struct task_struct *parent;
1924 struct sighand_struct *sighand;
1928 parent = tsk->parent;
1930 tsk = tsk->group_leader;
1931 parent = tsk->real_parent;
1934 clear_siginfo(&info);
1935 info.si_signo = SIGCHLD;
1938 * see comment in do_notify_parent() about the following 4 lines
1941 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1942 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1945 task_cputime(tsk, &utime, &stime);
1946 info.si_utime = nsec_to_clock_t(utime);
1947 info.si_stime = nsec_to_clock_t(stime);
1952 info.si_status = SIGCONT;
1955 info.si_status = tsk->signal->group_exit_code & 0x7f;
1958 info.si_status = tsk->exit_code & 0x7f;
1964 sighand = parent->sighand;
1965 spin_lock_irqsave(&sighand->siglock, flags);
1966 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1967 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1968 __group_send_sig_info(SIGCHLD, &info, parent);
1970 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1972 __wake_up_parent(tsk, parent);
1973 spin_unlock_irqrestore(&sighand->siglock, flags);
1976 static inline bool may_ptrace_stop(void)
1978 if (!likely(current->ptrace))
1981 * Are we in the middle of do_coredump?
1982 * If so and our tracer is also part of the coredump stopping
1983 * is a deadlock situation, and pointless because our tracer
1984 * is dead so don't allow us to stop.
1985 * If SIGKILL was already sent before the caller unlocked
1986 * ->siglock we must see ->core_state != NULL. Otherwise it
1987 * is safe to enter schedule().
1989 * This is almost outdated, a task with the pending SIGKILL can't
1990 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1991 * after SIGKILL was already dequeued.
1993 if (unlikely(current->mm->core_state) &&
1994 unlikely(current->mm == current->parent->mm))
2001 * Return non-zero if there is a SIGKILL that should be waking us up.
2002 * Called with the siglock held.
2004 static bool sigkill_pending(struct task_struct *tsk)
2006 return sigismember(&tsk->pending.signal, SIGKILL) ||
2007 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2011 * This must be called with current->sighand->siglock held.
2013 * This should be the path for all ptrace stops.
2014 * We always set current->last_siginfo while stopped here.
2015 * That makes it a way to test a stopped process for
2016 * being ptrace-stopped vs being job-control-stopped.
2018 * If we actually decide not to stop at all because the tracer
2019 * is gone, we keep current->exit_code unless clear_code.
2021 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
2022 __releases(¤t->sighand->siglock)
2023 __acquires(¤t->sighand->siglock)
2025 bool gstop_done = false;
2027 if (arch_ptrace_stop_needed(exit_code, info)) {
2029 * The arch code has something special to do before a
2030 * ptrace stop. This is allowed to block, e.g. for faults
2031 * on user stack pages. We can't keep the siglock while
2032 * calling arch_ptrace_stop, so we must release it now.
2033 * To preserve proper semantics, we must do this before
2034 * any signal bookkeeping like checking group_stop_count.
2035 * Meanwhile, a SIGKILL could come in before we retake the
2036 * siglock. That must prevent us from sleeping in TASK_TRACED.
2037 * So after regaining the lock, we must check for SIGKILL.
2039 spin_unlock_irq(¤t->sighand->siglock);
2040 arch_ptrace_stop(exit_code, info);
2041 spin_lock_irq(¤t->sighand->siglock);
2042 if (sigkill_pending(current))
2046 set_special_state(TASK_TRACED);
2049 * We're committing to trapping. TRACED should be visible before
2050 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2051 * Also, transition to TRACED and updates to ->jobctl should be
2052 * atomic with respect to siglock and should be done after the arch
2053 * hook as siglock is released and regrabbed across it.
2058 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2060 * set_current_state() smp_wmb();
2062 * wait_task_stopped()
2063 * task_stopped_code()
2064 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2068 current->last_siginfo = info;
2069 current->exit_code = exit_code;
2072 * If @why is CLD_STOPPED, we're trapping to participate in a group
2073 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2074 * across siglock relocks since INTERRUPT was scheduled, PENDING
2075 * could be clear now. We act as if SIGCONT is received after
2076 * TASK_TRACED is entered - ignore it.
2078 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2079 gstop_done = task_participate_group_stop(current);
2081 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2082 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2083 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2084 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2086 /* entering a trap, clear TRAPPING */
2087 task_clear_jobctl_trapping(current);
2089 spin_unlock_irq(¤t->sighand->siglock);
2090 read_lock(&tasklist_lock);
2091 if (may_ptrace_stop()) {
2093 * Notify parents of the stop.
2095 * While ptraced, there are two parents - the ptracer and
2096 * the real_parent of the group_leader. The ptracer should
2097 * know about every stop while the real parent is only
2098 * interested in the completion of group stop. The states
2099 * for the two don't interact with each other. Notify
2100 * separately unless they're gonna be duplicates.
2102 do_notify_parent_cldstop(current, true, why);
2103 if (gstop_done && ptrace_reparented(current))
2104 do_notify_parent_cldstop(current, false, why);
2107 * Don't want to allow preemption here, because
2108 * sys_ptrace() needs this task to be inactive.
2110 * XXX: implement read_unlock_no_resched().
2113 read_unlock(&tasklist_lock);
2114 preempt_enable_no_resched();
2115 freezable_schedule();
2118 * By the time we got the lock, our tracer went away.
2119 * Don't drop the lock yet, another tracer may come.
2121 * If @gstop_done, the ptracer went away between group stop
2122 * completion and here. During detach, it would have set
2123 * JOBCTL_STOP_PENDING on us and we'll re-enter
2124 * TASK_STOPPED in do_signal_stop() on return, so notifying
2125 * the real parent of the group stop completion is enough.
2128 do_notify_parent_cldstop(current, false, why);
2130 /* tasklist protects us from ptrace_freeze_traced() */
2131 __set_current_state(TASK_RUNNING);
2133 current->exit_code = 0;
2134 read_unlock(&tasklist_lock);
2138 * We are back. Now reacquire the siglock before touching
2139 * last_siginfo, so that we are sure to have synchronized with
2140 * any signal-sending on another CPU that wants to examine it.
2142 spin_lock_irq(¤t->sighand->siglock);
2143 current->last_siginfo = NULL;
2145 /* LISTENING can be set only during STOP traps, clear it */
2146 current->jobctl &= ~JOBCTL_LISTENING;
2149 * Queued signals ignored us while we were stopped for tracing.
2150 * So check for any that we should take before resuming user mode.
2151 * This sets TIF_SIGPENDING, but never clears it.
2153 recalc_sigpending_tsk(current);
2156 static void ptrace_do_notify(int signr, int exit_code, int why)
2160 clear_siginfo(&info);
2161 info.si_signo = signr;
2162 info.si_code = exit_code;
2163 info.si_pid = task_pid_vnr(current);
2164 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2166 /* Let the debugger run. */
2167 ptrace_stop(exit_code, why, 1, &info);
2170 void ptrace_notify(int exit_code)
2172 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2173 if (unlikely(current->task_works))
2176 spin_lock_irq(¤t->sighand->siglock);
2177 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2178 spin_unlock_irq(¤t->sighand->siglock);
2182 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2183 * @signr: signr causing group stop if initiating
2185 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2186 * and participate in it. If already set, participate in the existing
2187 * group stop. If participated in a group stop (and thus slept), %true is
2188 * returned with siglock released.
2190 * If ptraced, this function doesn't handle stop itself. Instead,
2191 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2192 * untouched. The caller must ensure that INTERRUPT trap handling takes
2193 * places afterwards.
2196 * Must be called with @current->sighand->siglock held, which is released
2200 * %false if group stop is already cancelled or ptrace trap is scheduled.
2201 * %true if participated in group stop.
2203 static bool do_signal_stop(int signr)
2204 __releases(¤t->sighand->siglock)
2206 struct signal_struct *sig = current->signal;
2208 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2209 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2210 struct task_struct *t;
2212 /* signr will be recorded in task->jobctl for retries */
2213 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2215 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2216 unlikely(signal_group_exit(sig)))
2219 * There is no group stop already in progress. We must
2222 * While ptraced, a task may be resumed while group stop is
2223 * still in effect and then receive a stop signal and
2224 * initiate another group stop. This deviates from the
2225 * usual behavior as two consecutive stop signals can't
2226 * cause two group stops when !ptraced. That is why we
2227 * also check !task_is_stopped(t) below.
2229 * The condition can be distinguished by testing whether
2230 * SIGNAL_STOP_STOPPED is already set. Don't generate
2231 * group_exit_code in such case.
2233 * This is not necessary for SIGNAL_STOP_CONTINUED because
2234 * an intervening stop signal is required to cause two
2235 * continued events regardless of ptrace.
2237 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2238 sig->group_exit_code = signr;
2240 sig->group_stop_count = 0;
2242 if (task_set_jobctl_pending(current, signr | gstop))
2243 sig->group_stop_count++;
2246 while_each_thread(current, t) {
2248 * Setting state to TASK_STOPPED for a group
2249 * stop is always done with the siglock held,
2250 * so this check has no races.
2252 if (!task_is_stopped(t) &&
2253 task_set_jobctl_pending(t, signr | gstop)) {
2254 sig->group_stop_count++;
2255 if (likely(!(t->ptrace & PT_SEIZED)))
2256 signal_wake_up(t, 0);
2258 ptrace_trap_notify(t);
2263 if (likely(!current->ptrace)) {
2267 * If there are no other threads in the group, or if there
2268 * is a group stop in progress and we are the last to stop,
2269 * report to the parent.
2271 if (task_participate_group_stop(current))
2272 notify = CLD_STOPPED;
2274 set_special_state(TASK_STOPPED);
2275 spin_unlock_irq(¤t->sighand->siglock);
2278 * Notify the parent of the group stop completion. Because
2279 * we're not holding either the siglock or tasklist_lock
2280 * here, ptracer may attach inbetween; however, this is for
2281 * group stop and should always be delivered to the real
2282 * parent of the group leader. The new ptracer will get
2283 * its notification when this task transitions into
2287 read_lock(&tasklist_lock);
2288 do_notify_parent_cldstop(current, false, notify);
2289 read_unlock(&tasklist_lock);
2292 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2293 freezable_schedule();
2297 * While ptraced, group stop is handled by STOP trap.
2298 * Schedule it and let the caller deal with it.
2300 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2306 * do_jobctl_trap - take care of ptrace jobctl traps
2308 * When PT_SEIZED, it's used for both group stop and explicit
2309 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2310 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2311 * the stop signal; otherwise, %SIGTRAP.
2313 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2314 * number as exit_code and no siginfo.
2317 * Must be called with @current->sighand->siglock held, which may be
2318 * released and re-acquired before returning with intervening sleep.
2320 static void do_jobctl_trap(void)
2322 struct signal_struct *signal = current->signal;
2323 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2325 if (current->ptrace & PT_SEIZED) {
2326 if (!signal->group_stop_count &&
2327 !(signal->flags & SIGNAL_STOP_STOPPED))
2329 WARN_ON_ONCE(!signr);
2330 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2333 WARN_ON_ONCE(!signr);
2334 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2335 current->exit_code = 0;
2339 static int ptrace_signal(int signr, siginfo_t *info)
2342 * We do not check sig_kernel_stop(signr) but set this marker
2343 * unconditionally because we do not know whether debugger will
2344 * change signr. This flag has no meaning unless we are going
2345 * to stop after return from ptrace_stop(). In this case it will
2346 * be checked in do_signal_stop(), we should only stop if it was
2347 * not cleared by SIGCONT while we were sleeping. See also the
2348 * comment in dequeue_signal().
2350 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2351 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2353 /* We're back. Did the debugger cancel the sig? */
2354 signr = current->exit_code;
2358 current->exit_code = 0;
2361 * Update the siginfo structure if the signal has
2362 * changed. If the debugger wanted something
2363 * specific in the siginfo structure then it should
2364 * have updated *info via PTRACE_SETSIGINFO.
2366 if (signr != info->si_signo) {
2367 clear_siginfo(info);
2368 info->si_signo = signr;
2370 info->si_code = SI_USER;
2372 info->si_pid = task_pid_vnr(current->parent);
2373 info->si_uid = from_kuid_munged(current_user_ns(),
2374 task_uid(current->parent));
2378 /* If the (new) signal is now blocked, requeue it. */
2379 if (sigismember(¤t->blocked, signr)) {
2380 specific_send_sig_info(signr, info, current);
2387 bool get_signal(struct ksignal *ksig)
2389 struct sighand_struct *sighand = current->sighand;
2390 struct signal_struct *signal = current->signal;
2393 if (unlikely(current->task_works))
2396 if (unlikely(uprobe_deny_signal()))
2400 * Do this once, we can't return to user-mode if freezing() == T.
2401 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2402 * thus do not need another check after return.
2407 spin_lock_irq(&sighand->siglock);
2409 * Every stopped thread goes here after wakeup. Check to see if
2410 * we should notify the parent, prepare_signal(SIGCONT) encodes
2411 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2413 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2416 if (signal->flags & SIGNAL_CLD_CONTINUED)
2417 why = CLD_CONTINUED;
2421 signal->flags &= ~SIGNAL_CLD_MASK;
2423 spin_unlock_irq(&sighand->siglock);
2426 * Notify the parent that we're continuing. This event is
2427 * always per-process and doesn't make whole lot of sense
2428 * for ptracers, who shouldn't consume the state via
2429 * wait(2) either, but, for backward compatibility, notify
2430 * the ptracer of the group leader too unless it's gonna be
2433 read_lock(&tasklist_lock);
2434 do_notify_parent_cldstop(current, false, why);
2436 if (ptrace_reparented(current->group_leader))
2437 do_notify_parent_cldstop(current->group_leader,
2439 read_unlock(&tasklist_lock);
2444 /* Has this task already been marked for death? */
2445 if (signal_group_exit(signal)) {
2446 ksig->info.si_signo = signr = SIGKILL;
2447 sigdelset(¤t->pending.signal, SIGKILL);
2448 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2449 &sighand->action[SIGKILL - 1]);
2450 recalc_sigpending();
2455 struct k_sigaction *ka;
2457 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2461 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2463 spin_unlock_irq(&sighand->siglock);
2468 * Signals generated by the execution of an instruction
2469 * need to be delivered before any other pending signals
2470 * so that the instruction pointer in the signal stack
2471 * frame points to the faulting instruction.
2473 signr = dequeue_synchronous_signal(&ksig->info);
2475 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2478 break; /* will return 0 */
2480 if (unlikely(current->ptrace) && signr != SIGKILL) {
2481 signr = ptrace_signal(signr, &ksig->info);
2486 ka = &sighand->action[signr-1];
2488 /* Trace actually delivered signals. */
2489 trace_signal_deliver(signr, &ksig->info, ka);
2491 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2493 if (ka->sa.sa_handler != SIG_DFL) {
2494 /* Run the handler. */
2497 if (ka->sa.sa_flags & SA_ONESHOT)
2498 ka->sa.sa_handler = SIG_DFL;
2500 break; /* will return non-zero "signr" value */
2504 * Now we are doing the default action for this signal.
2506 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2510 * Global init gets no signals it doesn't want.
2511 * Container-init gets no signals it doesn't want from same
2514 * Note that if global/container-init sees a sig_kernel_only()
2515 * signal here, the signal must have been generated internally
2516 * or must have come from an ancestor namespace. In either
2517 * case, the signal cannot be dropped.
2519 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2520 !sig_kernel_only(signr))
2523 if (sig_kernel_stop(signr)) {
2525 * The default action is to stop all threads in
2526 * the thread group. The job control signals
2527 * do nothing in an orphaned pgrp, but SIGSTOP
2528 * always works. Note that siglock needs to be
2529 * dropped during the call to is_orphaned_pgrp()
2530 * because of lock ordering with tasklist_lock.
2531 * This allows an intervening SIGCONT to be posted.
2532 * We need to check for that and bail out if necessary.
2534 if (signr != SIGSTOP) {
2535 spin_unlock_irq(&sighand->siglock);
2537 /* signals can be posted during this window */
2539 if (is_current_pgrp_orphaned())
2542 spin_lock_irq(&sighand->siglock);
2545 if (likely(do_signal_stop(ksig->info.si_signo))) {
2546 /* It released the siglock. */
2551 * We didn't actually stop, due to a race
2552 * with SIGCONT or something like that.
2558 spin_unlock_irq(&sighand->siglock);
2561 * Anything else is fatal, maybe with a core dump.
2563 current->flags |= PF_SIGNALED;
2565 if (sig_kernel_coredump(signr)) {
2566 if (print_fatal_signals)
2567 print_fatal_signal(ksig->info.si_signo);
2568 proc_coredump_connector(current);
2570 * If it was able to dump core, this kills all
2571 * other threads in the group and synchronizes with
2572 * their demise. If we lost the race with another
2573 * thread getting here, it set group_exit_code
2574 * first and our do_group_exit call below will use
2575 * that value and ignore the one we pass it.
2577 do_coredump(&ksig->info);
2581 * Death signals, no core dump.
2583 do_group_exit(ksig->info.si_signo);
2586 spin_unlock_irq(&sighand->siglock);
2589 return ksig->sig > 0;
2593 * signal_delivered -
2594 * @ksig: kernel signal struct
2595 * @stepping: nonzero if debugger single-step or block-step in use
2597 * This function should be called when a signal has successfully been
2598 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2599 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2600 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2602 static void signal_delivered(struct ksignal *ksig, int stepping)
2606 /* A signal was successfully delivered, and the
2607 saved sigmask was stored on the signal frame,
2608 and will be restored by sigreturn. So we can
2609 simply clear the restore sigmask flag. */
2610 clear_restore_sigmask();
2612 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2613 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2614 sigaddset(&blocked, ksig->sig);
2615 set_current_blocked(&blocked);
2616 tracehook_signal_handler(stepping);
2619 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2622 force_sigsegv(ksig->sig, current);
2624 signal_delivered(ksig, stepping);
2628 * It could be that complete_signal() picked us to notify about the
2629 * group-wide signal. Other threads should be notified now to take
2630 * the shared signals in @which since we will not.
2632 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2635 struct task_struct *t;
2637 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2638 if (sigisemptyset(&retarget))
2642 while_each_thread(tsk, t) {
2643 if (t->flags & PF_EXITING)
2646 if (!has_pending_signals(&retarget, &t->blocked))
2648 /* Remove the signals this thread can handle. */
2649 sigandsets(&retarget, &retarget, &t->blocked);
2651 if (!signal_pending(t))
2652 signal_wake_up(t, 0);
2654 if (sigisemptyset(&retarget))
2659 void exit_signals(struct task_struct *tsk)
2665 * @tsk is about to have PF_EXITING set - lock out users which
2666 * expect stable threadgroup.
2668 cgroup_threadgroup_change_begin(tsk);
2670 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2671 tsk->flags |= PF_EXITING;
2672 cgroup_threadgroup_change_end(tsk);
2676 spin_lock_irq(&tsk->sighand->siglock);
2678 * From now this task is not visible for group-wide signals,
2679 * see wants_signal(), do_signal_stop().
2681 tsk->flags |= PF_EXITING;
2683 cgroup_threadgroup_change_end(tsk);
2685 if (!signal_pending(tsk))
2688 unblocked = tsk->blocked;
2689 signotset(&unblocked);
2690 retarget_shared_pending(tsk, &unblocked);
2692 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2693 task_participate_group_stop(tsk))
2694 group_stop = CLD_STOPPED;
2696 spin_unlock_irq(&tsk->sighand->siglock);
2699 * If group stop has completed, deliver the notification. This
2700 * should always go to the real parent of the group leader.
2702 if (unlikely(group_stop)) {
2703 read_lock(&tasklist_lock);
2704 do_notify_parent_cldstop(tsk, false, group_stop);
2705 read_unlock(&tasklist_lock);
2709 EXPORT_SYMBOL(recalc_sigpending);
2710 EXPORT_SYMBOL_GPL(dequeue_signal);
2711 EXPORT_SYMBOL(flush_signals);
2712 EXPORT_SYMBOL(force_sig);
2713 EXPORT_SYMBOL(send_sig);
2714 EXPORT_SYMBOL(send_sig_info);
2715 EXPORT_SYMBOL(sigprocmask);
2718 * System call entry points.
2722 * sys_restart_syscall - restart a system call
2724 SYSCALL_DEFINE0(restart_syscall)
2726 struct restart_block *restart = ¤t->restart_block;
2727 return restart->fn(restart);
2730 long do_no_restart_syscall(struct restart_block *param)
2735 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2737 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2738 sigset_t newblocked;
2739 /* A set of now blocked but previously unblocked signals. */
2740 sigandnsets(&newblocked, newset, ¤t->blocked);
2741 retarget_shared_pending(tsk, &newblocked);
2743 tsk->blocked = *newset;
2744 recalc_sigpending();
2748 * set_current_blocked - change current->blocked mask
2751 * It is wrong to change ->blocked directly, this helper should be used
2752 * to ensure the process can't miss a shared signal we are going to block.
2754 void set_current_blocked(sigset_t *newset)
2756 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2757 __set_current_blocked(newset);
2760 void __set_current_blocked(const sigset_t *newset)
2762 struct task_struct *tsk = current;
2765 * In case the signal mask hasn't changed, there is nothing we need
2766 * to do. The current->blocked shouldn't be modified by other task.
2768 if (sigequalsets(&tsk->blocked, newset))
2771 spin_lock_irq(&tsk->sighand->siglock);
2772 __set_task_blocked(tsk, newset);
2773 spin_unlock_irq(&tsk->sighand->siglock);
2777 * This is also useful for kernel threads that want to temporarily
2778 * (or permanently) block certain signals.
2780 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2781 * interface happily blocks "unblockable" signals like SIGKILL
2784 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2786 struct task_struct *tsk = current;
2789 /* Lockless, only current can change ->blocked, never from irq */
2791 *oldset = tsk->blocked;
2795 sigorsets(&newset, &tsk->blocked, set);
2798 sigandnsets(&newset, &tsk->blocked, set);
2807 __set_current_blocked(&newset);
2812 * sys_rt_sigprocmask - change the list of currently blocked signals
2813 * @how: whether to add, remove, or set signals
2814 * @nset: stores pending signals
2815 * @oset: previous value of signal mask if non-null
2816 * @sigsetsize: size of sigset_t type
2818 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2819 sigset_t __user *, oset, size_t, sigsetsize)
2821 sigset_t old_set, new_set;
2824 /* XXX: Don't preclude handling different sized sigset_t's. */
2825 if (sigsetsize != sizeof(sigset_t))
2828 old_set = current->blocked;
2831 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2833 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2835 error = sigprocmask(how, &new_set, NULL);
2841 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2848 #ifdef CONFIG_COMPAT
2849 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2850 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2852 sigset_t old_set = current->blocked;
2854 /* XXX: Don't preclude handling different sized sigset_t's. */
2855 if (sigsetsize != sizeof(sigset_t))
2861 if (get_compat_sigset(&new_set, nset))
2863 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2865 error = sigprocmask(how, &new_set, NULL);
2869 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2873 static void do_sigpending(sigset_t *set)
2875 spin_lock_irq(¤t->sighand->siglock);
2876 sigorsets(set, ¤t->pending.signal,
2877 ¤t->signal->shared_pending.signal);
2878 spin_unlock_irq(¤t->sighand->siglock);
2880 /* Outside the lock because only this thread touches it. */
2881 sigandsets(set, ¤t->blocked, set);
2885 * sys_rt_sigpending - examine a pending signal that has been raised
2887 * @uset: stores pending signals
2888 * @sigsetsize: size of sigset_t type or larger
2890 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2894 if (sigsetsize > sizeof(*uset))
2897 do_sigpending(&set);
2899 if (copy_to_user(uset, &set, sigsetsize))
2905 #ifdef CONFIG_COMPAT
2906 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2907 compat_size_t, sigsetsize)
2911 if (sigsetsize > sizeof(*uset))
2914 do_sigpending(&set);
2916 return put_compat_sigset(uset, &set, sigsetsize);
2920 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
2922 enum siginfo_layout layout = SIL_KILL;
2923 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2924 static const struct {
2925 unsigned char limit, layout;
2927 [SIGILL] = { NSIGILL, SIL_FAULT },
2928 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2929 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2930 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2931 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2932 #if defined(SIGEMT) && defined(NSIGEMT)
2933 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2935 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2936 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2937 [SIGSYS] = { NSIGSYS, SIL_SYS },
2939 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
2940 layout = filter[sig].layout;
2941 /* Handle the exceptions */
2942 if ((sig == SIGBUS) &&
2943 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2944 layout = SIL_FAULT_MCEERR;
2945 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2946 layout = SIL_FAULT_BNDERR;
2948 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2949 layout = SIL_FAULT_PKUERR;
2952 else if (si_code <= NSIGPOLL)
2955 if (si_code == SI_TIMER)
2957 else if (si_code == SI_SIGIO)
2959 else if (si_code < 0)
2965 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2967 if (copy_to_user(to, from , sizeof(struct siginfo)))
2972 #ifdef CONFIG_COMPAT
2973 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2974 const struct siginfo *from)
2975 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2977 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2979 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2980 const struct siginfo *from, bool x32_ABI)
2983 struct compat_siginfo new;
2984 memset(&new, 0, sizeof(new));
2986 new.si_signo = from->si_signo;
2987 new.si_errno = from->si_errno;
2988 new.si_code = from->si_code;
2989 switch(siginfo_layout(from->si_signo, from->si_code)) {
2991 new.si_pid = from->si_pid;
2992 new.si_uid = from->si_uid;
2995 new.si_tid = from->si_tid;
2996 new.si_overrun = from->si_overrun;
2997 new.si_int = from->si_int;
3000 new.si_band = from->si_band;
3001 new.si_fd = from->si_fd;
3004 new.si_addr = ptr_to_compat(from->si_addr);
3005 #ifdef __ARCH_SI_TRAPNO
3006 new.si_trapno = from->si_trapno;
3009 case SIL_FAULT_MCEERR:
3010 new.si_addr = ptr_to_compat(from->si_addr);
3011 #ifdef __ARCH_SI_TRAPNO
3012 new.si_trapno = from->si_trapno;
3014 new.si_addr_lsb = from->si_addr_lsb;
3016 case SIL_FAULT_BNDERR:
3017 new.si_addr = ptr_to_compat(from->si_addr);
3018 #ifdef __ARCH_SI_TRAPNO
3019 new.si_trapno = from->si_trapno;
3021 new.si_lower = ptr_to_compat(from->si_lower);
3022 new.si_upper = ptr_to_compat(from->si_upper);
3024 case SIL_FAULT_PKUERR:
3025 new.si_addr = ptr_to_compat(from->si_addr);
3026 #ifdef __ARCH_SI_TRAPNO
3027 new.si_trapno = from->si_trapno;
3029 new.si_pkey = from->si_pkey;
3032 new.si_pid = from->si_pid;
3033 new.si_uid = from->si_uid;
3034 new.si_status = from->si_status;
3035 #ifdef CONFIG_X86_X32_ABI
3037 new._sifields._sigchld_x32._utime = from->si_utime;
3038 new._sifields._sigchld_x32._stime = from->si_stime;
3042 new.si_utime = from->si_utime;
3043 new.si_stime = from->si_stime;
3047 new.si_pid = from->si_pid;
3048 new.si_uid = from->si_uid;
3049 new.si_int = from->si_int;
3052 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3053 new.si_syscall = from->si_syscall;
3054 new.si_arch = from->si_arch;
3058 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3064 int copy_siginfo_from_user32(struct siginfo *to,
3065 const struct compat_siginfo __user *ufrom)
3067 struct compat_siginfo from;
3069 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3073 to->si_signo = from.si_signo;
3074 to->si_errno = from.si_errno;
3075 to->si_code = from.si_code;
3076 switch(siginfo_layout(from.si_signo, from.si_code)) {
3078 to->si_pid = from.si_pid;
3079 to->si_uid = from.si_uid;
3082 to->si_tid = from.si_tid;
3083 to->si_overrun = from.si_overrun;
3084 to->si_int = from.si_int;
3087 to->si_band = from.si_band;
3088 to->si_fd = from.si_fd;
3091 to->si_addr = compat_ptr(from.si_addr);
3092 #ifdef __ARCH_SI_TRAPNO
3093 to->si_trapno = from.si_trapno;
3096 case SIL_FAULT_MCEERR:
3097 to->si_addr = compat_ptr(from.si_addr);
3098 #ifdef __ARCH_SI_TRAPNO
3099 to->si_trapno = from.si_trapno;
3101 to->si_addr_lsb = from.si_addr_lsb;
3103 case SIL_FAULT_BNDERR:
3104 to->si_addr = compat_ptr(from.si_addr);
3105 #ifdef __ARCH_SI_TRAPNO
3106 to->si_trapno = from.si_trapno;
3108 to->si_lower = compat_ptr(from.si_lower);
3109 to->si_upper = compat_ptr(from.si_upper);
3111 case SIL_FAULT_PKUERR:
3112 to->si_addr = compat_ptr(from.si_addr);
3113 #ifdef __ARCH_SI_TRAPNO
3114 to->si_trapno = from.si_trapno;
3116 to->si_pkey = from.si_pkey;
3119 to->si_pid = from.si_pid;
3120 to->si_uid = from.si_uid;
3121 to->si_status = from.si_status;
3122 #ifdef CONFIG_X86_X32_ABI
3123 if (in_x32_syscall()) {
3124 to->si_utime = from._sifields._sigchld_x32._utime;
3125 to->si_stime = from._sifields._sigchld_x32._stime;
3129 to->si_utime = from.si_utime;
3130 to->si_stime = from.si_stime;
3134 to->si_pid = from.si_pid;
3135 to->si_uid = from.si_uid;
3136 to->si_int = from.si_int;
3139 to->si_call_addr = compat_ptr(from.si_call_addr);
3140 to->si_syscall = from.si_syscall;
3141 to->si_arch = from.si_arch;
3146 #endif /* CONFIG_COMPAT */
3149 * do_sigtimedwait - wait for queued signals specified in @which
3150 * @which: queued signals to wait for
3151 * @info: if non-null, the signal's siginfo is returned here
3152 * @ts: upper bound on process time suspension
3154 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3155 const struct timespec *ts)
3157 ktime_t *to = NULL, timeout = KTIME_MAX;
3158 struct task_struct *tsk = current;
3159 sigset_t mask = *which;
3163 if (!timespec_valid(ts))
3165 timeout = timespec_to_ktime(*ts);
3170 * Invert the set of allowed signals to get those we want to block.
3172 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3175 spin_lock_irq(&tsk->sighand->siglock);
3176 sig = dequeue_signal(tsk, &mask, info);
3177 if (!sig && timeout) {
3179 * None ready, temporarily unblock those we're interested
3180 * while we are sleeping in so that we'll be awakened when
3181 * they arrive. Unblocking is always fine, we can avoid
3182 * set_current_blocked().
3184 tsk->real_blocked = tsk->blocked;
3185 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3186 recalc_sigpending();
3187 spin_unlock_irq(&tsk->sighand->siglock);
3189 __set_current_state(TASK_INTERRUPTIBLE);
3190 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3192 spin_lock_irq(&tsk->sighand->siglock);
3193 __set_task_blocked(tsk, &tsk->real_blocked);
3194 sigemptyset(&tsk->real_blocked);
3195 sig = dequeue_signal(tsk, &mask, info);
3197 spin_unlock_irq(&tsk->sighand->siglock);
3201 return ret ? -EINTR : -EAGAIN;
3205 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3207 * @uthese: queued signals to wait for
3208 * @uinfo: if non-null, the signal's siginfo is returned here
3209 * @uts: upper bound on process time suspension
3210 * @sigsetsize: size of sigset_t type
3212 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3213 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3221 /* XXX: Don't preclude handling different sized sigset_t's. */
3222 if (sigsetsize != sizeof(sigset_t))
3225 if (copy_from_user(&these, uthese, sizeof(these)))
3229 if (copy_from_user(&ts, uts, sizeof(ts)))
3233 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3235 if (ret > 0 && uinfo) {
3236 if (copy_siginfo_to_user(uinfo, &info))
3243 #ifdef CONFIG_COMPAT
3244 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3245 struct compat_siginfo __user *, uinfo,
3246 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3253 if (sigsetsize != sizeof(sigset_t))
3256 if (get_compat_sigset(&s, uthese))
3260 if (compat_get_timespec(&t, uts))
3264 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3266 if (ret > 0 && uinfo) {
3267 if (copy_siginfo_to_user32(uinfo, &info))
3276 * sys_kill - send a signal to a process
3277 * @pid: the PID of the process
3278 * @sig: signal to be sent
3280 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3282 struct siginfo info;
3284 clear_siginfo(&info);
3285 info.si_signo = sig;
3287 info.si_code = SI_USER;
3288 info.si_pid = task_tgid_vnr(current);
3289 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3291 return kill_something_info(sig, &info, pid);
3295 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3297 struct task_struct *p;
3301 p = find_task_by_vpid(pid);
3302 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3303 error = check_kill_permission(sig, info, p);
3305 * The null signal is a permissions and process existence
3306 * probe. No signal is actually delivered.
3308 if (!error && sig) {
3309 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3311 * If lock_task_sighand() failed we pretend the task
3312 * dies after receiving the signal. The window is tiny,
3313 * and the signal is private anyway.
3315 if (unlikely(error == -ESRCH))
3324 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3326 struct siginfo info;
3328 clear_siginfo(&info);
3329 info.si_signo = sig;
3331 info.si_code = SI_TKILL;
3332 info.si_pid = task_tgid_vnr(current);
3333 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3335 return do_send_specific(tgid, pid, sig, &info);
3339 * sys_tgkill - send signal to one specific thread
3340 * @tgid: the thread group ID of the thread
3341 * @pid: the PID of the thread
3342 * @sig: signal to be sent
3344 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3345 * exists but it's not belonging to the target process anymore. This
3346 * method solves the problem of threads exiting and PIDs getting reused.
3348 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3350 /* This is only valid for single tasks */
3351 if (pid <= 0 || tgid <= 0)
3354 return do_tkill(tgid, pid, sig);
3358 * sys_tkill - send signal to one specific task
3359 * @pid: the PID of the task
3360 * @sig: signal to be sent
3362 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3364 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3366 /* This is only valid for single tasks */
3370 return do_tkill(0, pid, sig);
3373 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3375 /* Not even root can pretend to send signals from the kernel.
3376 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3378 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3379 (task_pid_vnr(current) != pid))
3382 info->si_signo = sig;
3384 /* POSIX.1b doesn't mention process groups. */
3385 return kill_proc_info(sig, info, pid);
3389 * sys_rt_sigqueueinfo - send signal information to a signal
3390 * @pid: the PID of the thread
3391 * @sig: signal to be sent
3392 * @uinfo: signal info to be sent
3394 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3395 siginfo_t __user *, uinfo)
3398 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3400 return do_rt_sigqueueinfo(pid, sig, &info);
3403 #ifdef CONFIG_COMPAT
3404 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3407 struct compat_siginfo __user *, uinfo)
3410 int ret = copy_siginfo_from_user32(&info, uinfo);
3413 return do_rt_sigqueueinfo(pid, sig, &info);
3417 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3419 /* This is only valid for single tasks */
3420 if (pid <= 0 || tgid <= 0)
3423 /* Not even root can pretend to send signals from the kernel.
3424 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3426 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3427 (task_pid_vnr(current) != pid))
3430 info->si_signo = sig;
3432 return do_send_specific(tgid, pid, sig, info);
3435 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3436 siginfo_t __user *, uinfo)
3440 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3443 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3446 #ifdef CONFIG_COMPAT
3447 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3451 struct compat_siginfo __user *, uinfo)
3455 if (copy_siginfo_from_user32(&info, uinfo))
3457 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3462 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3464 void kernel_sigaction(int sig, __sighandler_t action)
3466 spin_lock_irq(¤t->sighand->siglock);
3467 current->sighand->action[sig - 1].sa.sa_handler = action;
3468 if (action == SIG_IGN) {
3472 sigaddset(&mask, sig);
3474 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3475 flush_sigqueue_mask(&mask, ¤t->pending);
3476 recalc_sigpending();
3478 spin_unlock_irq(¤t->sighand->siglock);
3480 EXPORT_SYMBOL(kernel_sigaction);
3482 void __weak sigaction_compat_abi(struct k_sigaction *act,
3483 struct k_sigaction *oact)
3487 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3489 struct task_struct *p = current, *t;
3490 struct k_sigaction *k;
3493 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3496 k = &p->sighand->action[sig-1];
3498 spin_lock_irq(&p->sighand->siglock);
3502 sigaction_compat_abi(act, oact);
3505 sigdelsetmask(&act->sa.sa_mask,
3506 sigmask(SIGKILL) | sigmask(SIGSTOP));
3510 * "Setting a signal action to SIG_IGN for a signal that is
3511 * pending shall cause the pending signal to be discarded,
3512 * whether or not it is blocked."
3514 * "Setting a signal action to SIG_DFL for a signal that is
3515 * pending and whose default action is to ignore the signal
3516 * (for example, SIGCHLD), shall cause the pending signal to
3517 * be discarded, whether or not it is blocked"
3519 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3521 sigaddset(&mask, sig);
3522 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3523 for_each_thread(p, t)
3524 flush_sigqueue_mask(&mask, &t->pending);
3528 spin_unlock_irq(&p->sighand->siglock);
3533 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3536 struct task_struct *t = current;
3539 memset(oss, 0, sizeof(stack_t));
3540 oss->ss_sp = (void __user *) t->sas_ss_sp;
3541 oss->ss_size = t->sas_ss_size;
3542 oss->ss_flags = sas_ss_flags(sp) |
3543 (current->sas_ss_flags & SS_FLAG_BITS);
3547 void __user *ss_sp = ss->ss_sp;
3548 size_t ss_size = ss->ss_size;
3549 unsigned ss_flags = ss->ss_flags;
3552 if (unlikely(on_sig_stack(sp)))
3555 ss_mode = ss_flags & ~SS_FLAG_BITS;
3556 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3560 if (ss_mode == SS_DISABLE) {
3564 if (unlikely(ss_size < min_ss_size))
3568 t->sas_ss_sp = (unsigned long) ss_sp;
3569 t->sas_ss_size = ss_size;
3570 t->sas_ss_flags = ss_flags;
3575 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3579 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3581 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3582 current_user_stack_pointer(),
3584 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3589 int restore_altstack(const stack_t __user *uss)
3592 if (copy_from_user(&new, uss, sizeof(stack_t)))
3594 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3596 /* squash all but EFAULT for now */
3600 int __save_altstack(stack_t __user *uss, unsigned long sp)
3602 struct task_struct *t = current;
3603 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3604 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3605 __put_user(t->sas_ss_size, &uss->ss_size);
3608 if (t->sas_ss_flags & SS_AUTODISARM)
3613 #ifdef CONFIG_COMPAT
3614 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3615 compat_stack_t __user *uoss_ptr)
3621 compat_stack_t uss32;
3622 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3624 uss.ss_sp = compat_ptr(uss32.ss_sp);
3625 uss.ss_flags = uss32.ss_flags;
3626 uss.ss_size = uss32.ss_size;
3628 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3629 compat_user_stack_pointer(),
3630 COMPAT_MINSIGSTKSZ);
3631 if (ret >= 0 && uoss_ptr) {
3633 memset(&old, 0, sizeof(old));
3634 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3635 old.ss_flags = uoss.ss_flags;
3636 old.ss_size = uoss.ss_size;
3637 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3643 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3644 const compat_stack_t __user *, uss_ptr,
3645 compat_stack_t __user *, uoss_ptr)
3647 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3650 int compat_restore_altstack(const compat_stack_t __user *uss)
3652 int err = do_compat_sigaltstack(uss, NULL);
3653 /* squash all but -EFAULT for now */
3654 return err == -EFAULT ? err : 0;
3657 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3660 struct task_struct *t = current;
3661 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3663 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3664 __put_user(t->sas_ss_size, &uss->ss_size);
3667 if (t->sas_ss_flags & SS_AUTODISARM)
3673 #ifdef __ARCH_WANT_SYS_SIGPENDING
3676 * sys_sigpending - examine pending signals
3677 * @uset: where mask of pending signal is returned
3679 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3683 if (sizeof(old_sigset_t) > sizeof(*uset))
3686 do_sigpending(&set);
3688 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3694 #ifdef CONFIG_COMPAT
3695 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3699 do_sigpending(&set);
3701 return put_user(set.sig[0], set32);
3707 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3709 * sys_sigprocmask - examine and change blocked signals
3710 * @how: whether to add, remove, or set signals
3711 * @nset: signals to add or remove (if non-null)
3712 * @oset: previous value of signal mask if non-null
3714 * Some platforms have their own version with special arguments;
3715 * others support only sys_rt_sigprocmask.
3718 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3719 old_sigset_t __user *, oset)
3721 old_sigset_t old_set, new_set;
3722 sigset_t new_blocked;
3724 old_set = current->blocked.sig[0];
3727 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3730 new_blocked = current->blocked;
3734 sigaddsetmask(&new_blocked, new_set);
3737 sigdelsetmask(&new_blocked, new_set);
3740 new_blocked.sig[0] = new_set;
3746 set_current_blocked(&new_blocked);
3750 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3756 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3758 #ifndef CONFIG_ODD_RT_SIGACTION
3760 * sys_rt_sigaction - alter an action taken by a process
3761 * @sig: signal to be sent
3762 * @act: new sigaction
3763 * @oact: used to save the previous sigaction
3764 * @sigsetsize: size of sigset_t type
3766 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3767 const struct sigaction __user *, act,
3768 struct sigaction __user *, oact,
3771 struct k_sigaction new_sa, old_sa;
3774 /* XXX: Don't preclude handling different sized sigset_t's. */
3775 if (sigsetsize != sizeof(sigset_t))
3778 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3781 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3785 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3790 #ifdef CONFIG_COMPAT
3791 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3792 const struct compat_sigaction __user *, act,
3793 struct compat_sigaction __user *, oact,
3794 compat_size_t, sigsetsize)
3796 struct k_sigaction new_ka, old_ka;
3797 #ifdef __ARCH_HAS_SA_RESTORER
3798 compat_uptr_t restorer;
3802 /* XXX: Don't preclude handling different sized sigset_t's. */
3803 if (sigsetsize != sizeof(compat_sigset_t))
3807 compat_uptr_t handler;
3808 ret = get_user(handler, &act->sa_handler);
3809 new_ka.sa.sa_handler = compat_ptr(handler);
3810 #ifdef __ARCH_HAS_SA_RESTORER
3811 ret |= get_user(restorer, &act->sa_restorer);
3812 new_ka.sa.sa_restorer = compat_ptr(restorer);
3814 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3815 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3820 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3822 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3824 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3825 sizeof(oact->sa_mask));
3826 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3827 #ifdef __ARCH_HAS_SA_RESTORER
3828 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3829 &oact->sa_restorer);
3835 #endif /* !CONFIG_ODD_RT_SIGACTION */
3837 #ifdef CONFIG_OLD_SIGACTION
3838 SYSCALL_DEFINE3(sigaction, int, sig,
3839 const struct old_sigaction __user *, act,
3840 struct old_sigaction __user *, oact)
3842 struct k_sigaction new_ka, old_ka;
3847 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3848 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3849 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3850 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3851 __get_user(mask, &act->sa_mask))
3853 #ifdef __ARCH_HAS_KA_RESTORER
3854 new_ka.ka_restorer = NULL;
3856 siginitset(&new_ka.sa.sa_mask, mask);
3859 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3862 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3863 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3864 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3865 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3866 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3873 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3874 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3875 const struct compat_old_sigaction __user *, act,
3876 struct compat_old_sigaction __user *, oact)
3878 struct k_sigaction new_ka, old_ka;
3880 compat_old_sigset_t mask;
3881 compat_uptr_t handler, restorer;
3884 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3885 __get_user(handler, &act->sa_handler) ||
3886 __get_user(restorer, &act->sa_restorer) ||
3887 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3888 __get_user(mask, &act->sa_mask))
3891 #ifdef __ARCH_HAS_KA_RESTORER
3892 new_ka.ka_restorer = NULL;
3894 new_ka.sa.sa_handler = compat_ptr(handler);
3895 new_ka.sa.sa_restorer = compat_ptr(restorer);
3896 siginitset(&new_ka.sa.sa_mask, mask);
3899 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3902 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3903 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3904 &oact->sa_handler) ||
3905 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3906 &oact->sa_restorer) ||
3907 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3908 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3915 #ifdef CONFIG_SGETMASK_SYSCALL
3918 * For backwards compatibility. Functionality superseded by sigprocmask.
3920 SYSCALL_DEFINE0(sgetmask)
3923 return current->blocked.sig[0];
3926 SYSCALL_DEFINE1(ssetmask, int, newmask)
3928 int old = current->blocked.sig[0];
3931 siginitset(&newset, newmask);
3932 set_current_blocked(&newset);
3936 #endif /* CONFIG_SGETMASK_SYSCALL */
3938 #ifdef __ARCH_WANT_SYS_SIGNAL
3940 * For backwards compatibility. Functionality superseded by sigaction.
3942 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3944 struct k_sigaction new_sa, old_sa;
3947 new_sa.sa.sa_handler = handler;
3948 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3949 sigemptyset(&new_sa.sa.sa_mask);
3951 ret = do_sigaction(sig, &new_sa, &old_sa);
3953 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3955 #endif /* __ARCH_WANT_SYS_SIGNAL */
3957 #ifdef __ARCH_WANT_SYS_PAUSE
3959 SYSCALL_DEFINE0(pause)
3961 while (!signal_pending(current)) {
3962 __set_current_state(TASK_INTERRUPTIBLE);
3965 return -ERESTARTNOHAND;
3970 static int sigsuspend(sigset_t *set)
3972 current->saved_sigmask = current->blocked;
3973 set_current_blocked(set);
3975 while (!signal_pending(current)) {
3976 __set_current_state(TASK_INTERRUPTIBLE);
3979 set_restore_sigmask();
3980 return -ERESTARTNOHAND;
3984 * sys_rt_sigsuspend - replace the signal mask for a value with the
3985 * @unewset value until a signal is received
3986 * @unewset: new signal mask value
3987 * @sigsetsize: size of sigset_t type
3989 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3993 /* XXX: Don't preclude handling different sized sigset_t's. */
3994 if (sigsetsize != sizeof(sigset_t))
3997 if (copy_from_user(&newset, unewset, sizeof(newset)))
3999 return sigsuspend(&newset);
4002 #ifdef CONFIG_COMPAT
4003 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4007 /* XXX: Don't preclude handling different sized sigset_t's. */
4008 if (sigsetsize != sizeof(sigset_t))
4011 if (get_compat_sigset(&newset, unewset))
4013 return sigsuspend(&newset);
4017 #ifdef CONFIG_OLD_SIGSUSPEND
4018 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4021 siginitset(&blocked, mask);
4022 return sigsuspend(&blocked);
4025 #ifdef CONFIG_OLD_SIGSUSPEND3
4026 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4029 siginitset(&blocked, mask);
4030 return sigsuspend(&blocked);
4034 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4039 void __init signals_init(void)
4041 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4042 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4043 != offsetof(struct siginfo, _sifields._pad));
4044 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4046 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4049 #ifdef CONFIG_KGDB_KDB
4050 #include <linux/kdb.h>
4052 * kdb_send_sig - Allows kdb to send signals without exposing
4053 * signal internals. This function checks if the required locks are
4054 * available before calling the main signal code, to avoid kdb
4057 void kdb_send_sig(struct task_struct *t, int sig)
4059 static struct task_struct *kdb_prev_t;
4061 if (!spin_trylock(&t->sighand->siglock)) {
4062 kdb_printf("Can't do kill command now.\n"
4063 "The sigmask lock is held somewhere else in "
4064 "kernel, try again later\n");
4067 new_t = kdb_prev_t != t;
4069 if (t->state != TASK_RUNNING && new_t) {
4070 spin_unlock(&t->sighand->siglock);
4071 kdb_printf("Process is not RUNNING, sending a signal from "
4072 "kdb risks deadlock\n"
4073 "on the run queue locks. "
4074 "The signal has _not_ been sent.\n"
4075 "Reissue the kill command if you want to risk "
4079 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4080 spin_unlock(&t->sighand->siglock);
4082 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4085 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4087 #endif /* CONFIG_KGDB_KDB */