2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
43 #include <linux/livepatch.h>
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/signal.h>
48 #include <asm/param.h>
49 #include <linux/uaccess.h>
50 #include <asm/unistd.h>
51 #include <asm/siginfo.h>
52 #include <asm/cacheflush.h>
53 #include "audit.h" /* audit_signal_info() */
56 * SLAB caches for signal bits.
59 static struct kmem_cache *sigqueue_cachep;
61 int print_fatal_signals __read_mostly;
63 static void __user *sig_handler(struct task_struct *t, int sig)
65 return t->sighand->action[sig - 1].sa.sa_handler;
68 static inline bool sig_handler_ignored(void __user *handler, int sig)
70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig));
75 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
79 handler = sig_handler(t, sig);
81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
85 return sig_handler_ignored(handler, sig);
88 static bool sig_ignored(struct task_struct *t, int sig, bool force)
91 * Blocked signals are never ignored, since the
92 * signal handler may change by the time it is
95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
99 * Tracers may want to know about even ignored signal unless it
100 * is SIGKILL which can't be reported anyway but can be ignored
101 * by SIGNAL_UNKILLABLE task.
103 if (t->ptrace && sig != SIGKILL)
106 return sig_task_ignored(t, sig, force);
110 * Re-calculate pending state from the set of locally pending
111 * signals, globally pending signals, and blocked signals.
113 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
118 switch (_NSIG_WORDS) {
120 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
121 ready |= signal->sig[i] &~ blocked->sig[i];
124 case 4: ready = signal->sig[3] &~ blocked->sig[3];
125 ready |= signal->sig[2] &~ blocked->sig[2];
126 ready |= signal->sig[1] &~ blocked->sig[1];
127 ready |= signal->sig[0] &~ blocked->sig[0];
130 case 2: ready = signal->sig[1] &~ blocked->sig[1];
131 ready |= signal->sig[0] &~ blocked->sig[0];
134 case 1: ready = signal->sig[0] &~ blocked->sig[0];
139 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
141 static bool recalc_sigpending_tsk(struct task_struct *t)
143 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
144 PENDING(&t->pending, &t->blocked) ||
145 PENDING(&t->signal->shared_pending, &t->blocked)) {
146 set_tsk_thread_flag(t, TIF_SIGPENDING);
151 * We must never clear the flag in another thread, or in current
152 * when it's possible the current syscall is returning -ERESTART*.
153 * So we don't clear it here, and only callers who know they should do.
159 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
160 * This is superfluous when called on current, the wakeup is a harmless no-op.
162 void recalc_sigpending_and_wake(struct task_struct *t)
164 if (recalc_sigpending_tsk(t))
165 signal_wake_up(t, 0);
168 void recalc_sigpending(void)
170 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
171 !klp_patch_pending(current))
172 clear_thread_flag(TIF_SIGPENDING);
176 void calculate_sigpending(void)
178 /* Have any signals or users of TIF_SIGPENDING been delayed
181 spin_lock_irq(¤t->sighand->siglock);
182 set_tsk_thread_flag(current, TIF_SIGPENDING);
184 spin_unlock_irq(¤t->sighand->siglock);
187 /* Given the mask, find the first available signal that should be serviced. */
189 #define SYNCHRONOUS_MASK \
190 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
191 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
193 int next_signal(struct sigpending *pending, sigset_t *mask)
195 unsigned long i, *s, *m, x;
198 s = pending->signal.sig;
202 * Handle the first word specially: it contains the
203 * synchronous signals that need to be dequeued first.
207 if (x & SYNCHRONOUS_MASK)
208 x &= SYNCHRONOUS_MASK;
213 switch (_NSIG_WORDS) {
215 for (i = 1; i < _NSIG_WORDS; ++i) {
219 sig = ffz(~x) + i*_NSIG_BPW + 1;
228 sig = ffz(~x) + _NSIG_BPW + 1;
239 static inline void print_dropped_signal(int sig)
241 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
243 if (!print_fatal_signals)
246 if (!__ratelimit(&ratelimit_state))
249 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
250 current->comm, current->pid, sig);
254 * task_set_jobctl_pending - set jobctl pending bits
256 * @mask: pending bits to set
258 * Clear @mask from @task->jobctl. @mask must be subset of
259 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
260 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
261 * cleared. If @task is already being killed or exiting, this function
265 * Must be called with @task->sighand->siglock held.
268 * %true if @mask is set, %false if made noop because @task was dying.
270 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
272 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
273 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
274 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
276 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
279 if (mask & JOBCTL_STOP_SIGMASK)
280 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
282 task->jobctl |= mask;
287 * task_clear_jobctl_trapping - clear jobctl trapping bit
290 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
291 * Clear it and wake up the ptracer. Note that we don't need any further
292 * locking. @task->siglock guarantees that @task->parent points to the
296 * Must be called with @task->sighand->siglock held.
298 void task_clear_jobctl_trapping(struct task_struct *task)
300 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
301 task->jobctl &= ~JOBCTL_TRAPPING;
302 smp_mb(); /* advised by wake_up_bit() */
303 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
308 * task_clear_jobctl_pending - clear jobctl pending bits
310 * @mask: pending bits to clear
312 * Clear @mask from @task->jobctl. @mask must be subset of
313 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
314 * STOP bits are cleared together.
316 * If clearing of @mask leaves no stop or trap pending, this function calls
317 * task_clear_jobctl_trapping().
320 * Must be called with @task->sighand->siglock held.
322 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
324 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
326 if (mask & JOBCTL_STOP_PENDING)
327 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
329 task->jobctl &= ~mask;
331 if (!(task->jobctl & JOBCTL_PENDING_MASK))
332 task_clear_jobctl_trapping(task);
336 * task_participate_group_stop - participate in a group stop
337 * @task: task participating in a group stop
339 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
340 * Group stop states are cleared and the group stop count is consumed if
341 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
342 * stop, the appropriate %SIGNAL_* flags are set.
345 * Must be called with @task->sighand->siglock held.
348 * %true if group stop completion should be notified to the parent, %false
351 static bool task_participate_group_stop(struct task_struct *task)
353 struct signal_struct *sig = task->signal;
354 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
356 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
358 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
363 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
364 sig->group_stop_count--;
367 * Tell the caller to notify completion iff we are entering into a
368 * fresh group stop. Read comment in do_signal_stop() for details.
370 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
371 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
377 void task_join_group_stop(struct task_struct *task)
379 /* Have the new thread join an on-going signal group stop */
380 unsigned long jobctl = current->jobctl;
381 if (jobctl & JOBCTL_STOP_PENDING) {
382 struct signal_struct *sig = current->signal;
383 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
384 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
385 if (task_set_jobctl_pending(task, signr | gstop)) {
386 sig->group_stop_count++;
392 * allocate a new signal queue record
393 * - this may be called without locks if and only if t == current, otherwise an
394 * appropriate lock must be held to stop the target task from exiting
396 static struct sigqueue *
397 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
399 struct sigqueue *q = NULL;
400 struct user_struct *user;
403 * Protect access to @t credentials. This can go away when all
404 * callers hold rcu read lock.
407 user = get_uid(__task_cred(t)->user);
408 atomic_inc(&user->sigpending);
411 if (override_rlimit ||
412 atomic_read(&user->sigpending) <=
413 task_rlimit(t, RLIMIT_SIGPENDING)) {
414 q = kmem_cache_alloc(sigqueue_cachep, flags);
416 print_dropped_signal(sig);
419 if (unlikely(q == NULL)) {
420 atomic_dec(&user->sigpending);
423 INIT_LIST_HEAD(&q->list);
431 static void __sigqueue_free(struct sigqueue *q)
433 if (q->flags & SIGQUEUE_PREALLOC)
435 atomic_dec(&q->user->sigpending);
437 kmem_cache_free(sigqueue_cachep, q);
440 void flush_sigqueue(struct sigpending *queue)
444 sigemptyset(&queue->signal);
445 while (!list_empty(&queue->list)) {
446 q = list_entry(queue->list.next, struct sigqueue , list);
447 list_del_init(&q->list);
453 * Flush all pending signals for this kthread.
455 void flush_signals(struct task_struct *t)
459 spin_lock_irqsave(&t->sighand->siglock, flags);
460 clear_tsk_thread_flag(t, TIF_SIGPENDING);
461 flush_sigqueue(&t->pending);
462 flush_sigqueue(&t->signal->shared_pending);
463 spin_unlock_irqrestore(&t->sighand->siglock, flags);
466 #ifdef CONFIG_POSIX_TIMERS
467 static void __flush_itimer_signals(struct sigpending *pending)
469 sigset_t signal, retain;
470 struct sigqueue *q, *n;
472 signal = pending->signal;
473 sigemptyset(&retain);
475 list_for_each_entry_safe(q, n, &pending->list, list) {
476 int sig = q->info.si_signo;
478 if (likely(q->info.si_code != SI_TIMER)) {
479 sigaddset(&retain, sig);
481 sigdelset(&signal, sig);
482 list_del_init(&q->list);
487 sigorsets(&pending->signal, &signal, &retain);
490 void flush_itimer_signals(void)
492 struct task_struct *tsk = current;
495 spin_lock_irqsave(&tsk->sighand->siglock, flags);
496 __flush_itimer_signals(&tsk->pending);
497 __flush_itimer_signals(&tsk->signal->shared_pending);
498 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
502 void ignore_signals(struct task_struct *t)
506 for (i = 0; i < _NSIG; ++i)
507 t->sighand->action[i].sa.sa_handler = SIG_IGN;
513 * Flush all handlers for a task.
517 flush_signal_handlers(struct task_struct *t, int force_default)
520 struct k_sigaction *ka = &t->sighand->action[0];
521 for (i = _NSIG ; i != 0 ; i--) {
522 if (force_default || ka->sa.sa_handler != SIG_IGN)
523 ka->sa.sa_handler = SIG_DFL;
525 #ifdef __ARCH_HAS_SA_RESTORER
526 ka->sa.sa_restorer = NULL;
528 sigemptyset(&ka->sa.sa_mask);
533 bool unhandled_signal(struct task_struct *tsk, int sig)
535 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
536 if (is_global_init(tsk))
539 if (handler != SIG_IGN && handler != SIG_DFL)
542 /* if ptraced, let the tracer determine */
546 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
549 struct sigqueue *q, *first = NULL;
552 * Collect the siginfo appropriate to this signal. Check if
553 * there is another siginfo for the same signal.
555 list_for_each_entry(q, &list->list, list) {
556 if (q->info.si_signo == sig) {
563 sigdelset(&list->signal, sig);
567 list_del_init(&first->list);
568 copy_siginfo(info, &first->info);
571 (first->flags & SIGQUEUE_PREALLOC) &&
572 (info->si_code == SI_TIMER) &&
573 (info->si_sys_private);
575 __sigqueue_free(first);
578 * Ok, it wasn't in the queue. This must be
579 * a fast-pathed signal or we must have been
580 * out of queue space. So zero out the info.
583 info->si_signo = sig;
585 info->si_code = SI_USER;
591 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
592 siginfo_t *info, bool *resched_timer)
594 int sig = next_signal(pending, mask);
597 collect_signal(sig, pending, info, resched_timer);
602 * Dequeue a signal and return the element to the caller, which is
603 * expected to free it.
605 * All callers have to hold the siglock.
607 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
609 bool resched_timer = false;
612 /* We only dequeue private signals from ourselves, we don't let
613 * signalfd steal them
615 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
617 signr = __dequeue_signal(&tsk->signal->shared_pending,
618 mask, info, &resched_timer);
619 #ifdef CONFIG_POSIX_TIMERS
623 * itimers are process shared and we restart periodic
624 * itimers in the signal delivery path to prevent DoS
625 * attacks in the high resolution timer case. This is
626 * compliant with the old way of self-restarting
627 * itimers, as the SIGALRM is a legacy signal and only
628 * queued once. Changing the restart behaviour to
629 * restart the timer in the signal dequeue path is
630 * reducing the timer noise on heavy loaded !highres
633 if (unlikely(signr == SIGALRM)) {
634 struct hrtimer *tmr = &tsk->signal->real_timer;
636 if (!hrtimer_is_queued(tmr) &&
637 tsk->signal->it_real_incr != 0) {
638 hrtimer_forward(tmr, tmr->base->get_time(),
639 tsk->signal->it_real_incr);
640 hrtimer_restart(tmr);
650 if (unlikely(sig_kernel_stop(signr))) {
652 * Set a marker that we have dequeued a stop signal. Our
653 * caller might release the siglock and then the pending
654 * stop signal it is about to process is no longer in the
655 * pending bitmasks, but must still be cleared by a SIGCONT
656 * (and overruled by a SIGKILL). So those cases clear this
657 * shared flag after we've set it. Note that this flag may
658 * remain set after the signal we return is ignored or
659 * handled. That doesn't matter because its only purpose
660 * is to alert stop-signal processing code when another
661 * processor has come along and cleared the flag.
663 current->jobctl |= JOBCTL_STOP_DEQUEUED;
665 #ifdef CONFIG_POSIX_TIMERS
668 * Release the siglock to ensure proper locking order
669 * of timer locks outside of siglocks. Note, we leave
670 * irqs disabled here, since the posix-timers code is
671 * about to disable them again anyway.
673 spin_unlock(&tsk->sighand->siglock);
674 posixtimer_rearm(info);
675 spin_lock(&tsk->sighand->siglock);
677 /* Don't expose the si_sys_private value to userspace */
678 info->si_sys_private = 0;
684 static int dequeue_synchronous_signal(siginfo_t *info)
686 struct task_struct *tsk = current;
687 struct sigpending *pending = &tsk->pending;
688 struct sigqueue *q, *sync = NULL;
691 * Might a synchronous signal be in the queue?
693 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
697 * Return the first synchronous signal in the queue.
699 list_for_each_entry(q, &pending->list, list) {
700 /* Synchronous signals have a postive si_code */
701 if ((q->info.si_code > SI_USER) &&
702 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
710 * Check if there is another siginfo for the same signal.
712 list_for_each_entry_continue(q, &pending->list, list) {
713 if (q->info.si_signo == sync->info.si_signo)
717 sigdelset(&pending->signal, sync->info.si_signo);
720 list_del_init(&sync->list);
721 copy_siginfo(info, &sync->info);
722 __sigqueue_free(sync);
723 return info->si_signo;
727 * Tell a process that it has a new active signal..
729 * NOTE! we rely on the previous spin_lock to
730 * lock interrupts for us! We can only be called with
731 * "siglock" held, and the local interrupt must
732 * have been disabled when that got acquired!
734 * No need to set need_resched since signal event passing
735 * goes through ->blocked
737 void signal_wake_up_state(struct task_struct *t, unsigned int state)
739 set_tsk_thread_flag(t, TIF_SIGPENDING);
741 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
742 * case. We don't check t->state here because there is a race with it
743 * executing another processor and just now entering stopped state.
744 * By using wake_up_state, we ensure the process will wake up and
745 * handle its death signal.
747 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
752 * Remove signals in mask from the pending set and queue.
753 * Returns 1 if any signals were found.
755 * All callers must be holding the siglock.
757 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
759 struct sigqueue *q, *n;
762 sigandsets(&m, mask, &s->signal);
763 if (sigisemptyset(&m))
766 sigandnsets(&s->signal, &s->signal, mask);
767 list_for_each_entry_safe(q, n, &s->list, list) {
768 if (sigismember(mask, q->info.si_signo)) {
769 list_del_init(&q->list);
775 static inline int is_si_special(const struct siginfo *info)
777 return info <= SEND_SIG_FORCED;
780 static inline bool si_fromuser(const struct siginfo *info)
782 return info == SEND_SIG_NOINFO ||
783 (!is_si_special(info) && SI_FROMUSER(info));
787 * called with RCU read lock from check_kill_permission()
789 static bool kill_ok_by_cred(struct task_struct *t)
791 const struct cred *cred = current_cred();
792 const struct cred *tcred = __task_cred(t);
794 return uid_eq(cred->euid, tcred->suid) ||
795 uid_eq(cred->euid, tcred->uid) ||
796 uid_eq(cred->uid, tcred->suid) ||
797 uid_eq(cred->uid, tcred->uid) ||
798 ns_capable(tcred->user_ns, CAP_KILL);
802 * Bad permissions for sending the signal
803 * - the caller must hold the RCU read lock
805 static int check_kill_permission(int sig, struct siginfo *info,
806 struct task_struct *t)
811 if (!valid_signal(sig))
814 if (!si_fromuser(info))
817 error = audit_signal_info(sig, t); /* Let audit system see the signal */
821 if (!same_thread_group(current, t) &&
822 !kill_ok_by_cred(t)) {
825 sid = task_session(t);
827 * We don't return the error if sid == NULL. The
828 * task was unhashed, the caller must notice this.
830 if (!sid || sid == task_session(current))
837 return security_task_kill(t, info, sig, NULL);
841 * ptrace_trap_notify - schedule trap to notify ptracer
842 * @t: tracee wanting to notify tracer
844 * This function schedules sticky ptrace trap which is cleared on the next
845 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
848 * If @t is running, STOP trap will be taken. If trapped for STOP and
849 * ptracer is listening for events, tracee is woken up so that it can
850 * re-trap for the new event. If trapped otherwise, STOP trap will be
851 * eventually taken without returning to userland after the existing traps
852 * are finished by PTRACE_CONT.
855 * Must be called with @task->sighand->siglock held.
857 static void ptrace_trap_notify(struct task_struct *t)
859 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
860 assert_spin_locked(&t->sighand->siglock);
862 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
863 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
867 * Handle magic process-wide effects of stop/continue signals. Unlike
868 * the signal actions, these happen immediately at signal-generation
869 * time regardless of blocking, ignoring, or handling. This does the
870 * actual continuing for SIGCONT, but not the actual stopping for stop
871 * signals. The process stop is done as a signal action for SIG_DFL.
873 * Returns true if the signal should be actually delivered, otherwise
874 * it should be dropped.
876 static bool prepare_signal(int sig, struct task_struct *p, bool force)
878 struct signal_struct *signal = p->signal;
879 struct task_struct *t;
882 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
883 if (!(signal->flags & SIGNAL_GROUP_EXIT))
884 return sig == SIGKILL;
886 * The process is in the middle of dying, nothing to do.
888 } else if (sig_kernel_stop(sig)) {
890 * This is a stop signal. Remove SIGCONT from all queues.
892 siginitset(&flush, sigmask(SIGCONT));
893 flush_sigqueue_mask(&flush, &signal->shared_pending);
894 for_each_thread(p, t)
895 flush_sigqueue_mask(&flush, &t->pending);
896 } else if (sig == SIGCONT) {
899 * Remove all stop signals from all queues, wake all threads.
901 siginitset(&flush, SIG_KERNEL_STOP_MASK);
902 flush_sigqueue_mask(&flush, &signal->shared_pending);
903 for_each_thread(p, t) {
904 flush_sigqueue_mask(&flush, &t->pending);
905 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
906 if (likely(!(t->ptrace & PT_SEIZED)))
907 wake_up_state(t, __TASK_STOPPED);
909 ptrace_trap_notify(t);
913 * Notify the parent with CLD_CONTINUED if we were stopped.
915 * If we were in the middle of a group stop, we pretend it
916 * was already finished, and then continued. Since SIGCHLD
917 * doesn't queue we report only CLD_STOPPED, as if the next
918 * CLD_CONTINUED was dropped.
921 if (signal->flags & SIGNAL_STOP_STOPPED)
922 why |= SIGNAL_CLD_CONTINUED;
923 else if (signal->group_stop_count)
924 why |= SIGNAL_CLD_STOPPED;
928 * The first thread which returns from do_signal_stop()
929 * will take ->siglock, notice SIGNAL_CLD_MASK, and
930 * notify its parent. See get_signal_to_deliver().
932 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
933 signal->group_stop_count = 0;
934 signal->group_exit_code = 0;
938 return !sig_ignored(p, sig, force);
942 * Test if P wants to take SIG. After we've checked all threads with this,
943 * it's equivalent to finding no threads not blocking SIG. Any threads not
944 * blocking SIG were ruled out because they are not running and already
945 * have pending signals. Such threads will dequeue from the shared queue
946 * as soon as they're available, so putting the signal on the shared queue
947 * will be equivalent to sending it to one such thread.
949 static inline bool wants_signal(int sig, struct task_struct *p)
951 if (sigismember(&p->blocked, sig))
954 if (p->flags & PF_EXITING)
960 if (task_is_stopped_or_traced(p))
963 return task_curr(p) || !signal_pending(p);
966 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
968 struct signal_struct *signal = p->signal;
969 struct task_struct *t;
972 * Now find a thread we can wake up to take the signal off the queue.
974 * If the main thread wants the signal, it gets first crack.
975 * Probably the least surprising to the average bear.
977 if (wants_signal(sig, p))
979 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
981 * There is just one thread and it does not need to be woken.
982 * It will dequeue unblocked signals before it runs again.
987 * Otherwise try to find a suitable thread.
989 t = signal->curr_target;
990 while (!wants_signal(sig, t)) {
992 if (t == signal->curr_target)
994 * No thread needs to be woken.
995 * Any eligible threads will see
996 * the signal in the queue soon.
1000 signal->curr_target = t;
1004 * Found a killable thread. If the signal will be fatal,
1005 * then start taking the whole group down immediately.
1007 if (sig_fatal(p, sig) &&
1008 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1009 !sigismember(&t->real_blocked, sig) &&
1010 (sig == SIGKILL || !p->ptrace)) {
1012 * This signal will be fatal to the whole group.
1014 if (!sig_kernel_coredump(sig)) {
1016 * Start a group exit and wake everybody up.
1017 * This way we don't have other threads
1018 * running and doing things after a slower
1019 * thread has the fatal signal pending.
1021 signal->flags = SIGNAL_GROUP_EXIT;
1022 signal->group_exit_code = sig;
1023 signal->group_stop_count = 0;
1026 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1027 sigaddset(&t->pending.signal, SIGKILL);
1028 signal_wake_up(t, 1);
1029 } while_each_thread(p, t);
1035 * The signal is already in the shared-pending queue.
1036 * Tell the chosen thread to wake up and dequeue it.
1038 signal_wake_up(t, sig == SIGKILL);
1042 static inline bool legacy_queue(struct sigpending *signals, int sig)
1044 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1047 #ifdef CONFIG_USER_NS
1048 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1050 if (current_user_ns() == task_cred_xxx(t, user_ns))
1053 if (SI_FROMKERNEL(info))
1057 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1058 make_kuid(current_user_ns(), info->si_uid));
1062 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1068 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1069 enum pid_type type, int from_ancestor_ns)
1071 struct sigpending *pending;
1073 int override_rlimit;
1074 int ret = 0, result;
1076 assert_spin_locked(&t->sighand->siglock);
1078 result = TRACE_SIGNAL_IGNORED;
1079 if (!prepare_signal(sig, t,
1080 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
1083 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1085 * Short-circuit ignored signals and support queuing
1086 * exactly one non-rt signal, so that we can get more
1087 * detailed information about the cause of the signal.
1089 result = TRACE_SIGNAL_ALREADY_PENDING;
1090 if (legacy_queue(pending, sig))
1093 result = TRACE_SIGNAL_DELIVERED;
1095 * fast-pathed signals for kernel-internal things like SIGSTOP
1098 if (info == SEND_SIG_FORCED)
1102 * Real-time signals must be queued if sent by sigqueue, or
1103 * some other real-time mechanism. It is implementation
1104 * defined whether kill() does so. We attempt to do so, on
1105 * the principle of least surprise, but since kill is not
1106 * allowed to fail with EAGAIN when low on memory we just
1107 * make sure at least one signal gets delivered and don't
1108 * pass on the info struct.
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 override_rlimit = 0;
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1117 list_add_tail(&q->list, &pending->list);
1118 switch ((unsigned long) info) {
1119 case (unsigned long) SEND_SIG_NOINFO:
1120 clear_siginfo(&q->info);
1121 q->info.si_signo = sig;
1122 q->info.si_errno = 0;
1123 q->info.si_code = SI_USER;
1124 q->info.si_pid = task_tgid_nr_ns(current,
1125 task_active_pid_ns(t));
1126 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1128 case (unsigned long) SEND_SIG_PRIV:
1129 clear_siginfo(&q->info);
1130 q->info.si_signo = sig;
1131 q->info.si_errno = 0;
1132 q->info.si_code = SI_KERNEL;
1137 copy_siginfo(&q->info, info);
1138 if (from_ancestor_ns)
1143 userns_fixup_signal_uid(&q->info, t);
1145 } else if (!is_si_special(info)) {
1146 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1148 * Queue overflow, abort. We may abort if the
1149 * signal was rt and sent by user using something
1150 * other than kill().
1152 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1157 * This is a silent loss of information. We still
1158 * send the signal, but the *info bits are lost.
1160 result = TRACE_SIGNAL_LOSE_INFO;
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1168 /* Let multiprocess signals appear after on-going forks */
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173 /* Can't queue both a stop and a continue signal */
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1182 complete_signal(sig, t, type);
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1188 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1191 int from_ancestor_ns = 0;
1193 #ifdef CONFIG_PID_NS
1194 from_ancestor_ns = si_fromuser(info) &&
1195 !task_pid_nr_ns(current, task_active_pid_ns(t));
1198 return __send_signal(sig, info, t, type, from_ancestor_ns);
1201 static void print_fatal_signal(int signr)
1203 struct pt_regs *regs = signal_pt_regs();
1204 pr_info("potentially unexpected fatal signal %d.\n", signr);
1206 #if defined(__i386__) && !defined(__arch_um__)
1207 pr_info("code at %08lx: ", regs->ip);
1210 for (i = 0; i < 16; i++) {
1213 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1215 pr_cont("%02x ", insn);
1225 static int __init setup_print_fatal_signals(char *str)
1227 get_option (&str, &print_fatal_signals);
1232 __setup("print-fatal-signals=", setup_print_fatal_signals);
1235 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1237 return send_signal(sig, info, p, PIDTYPE_TGID);
1241 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1243 return send_signal(sig, info, t, PIDTYPE_PID);
1246 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1249 unsigned long flags;
1252 if (lock_task_sighand(p, &flags)) {
1253 ret = send_signal(sig, info, p, type);
1254 unlock_task_sighand(p, &flags);
1261 * Force a signal that the process can't ignore: if necessary
1262 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1264 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1265 * since we do not want to have a signal handler that was blocked
1266 * be invoked when user space had explicitly blocked it.
1268 * We don't want to have recursive SIGSEGV's etc, for example,
1269 * that is why we also clear SIGNAL_UNKILLABLE.
1272 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1274 unsigned long int flags;
1275 int ret, blocked, ignored;
1276 struct k_sigaction *action;
1278 spin_lock_irqsave(&t->sighand->siglock, flags);
1279 action = &t->sighand->action[sig-1];
1280 ignored = action->sa.sa_handler == SIG_IGN;
1281 blocked = sigismember(&t->blocked, sig);
1282 if (blocked || ignored) {
1283 action->sa.sa_handler = SIG_DFL;
1285 sigdelset(&t->blocked, sig);
1286 recalc_sigpending_and_wake(t);
1290 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1291 * debugging to leave init killable.
1293 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1294 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1295 ret = specific_send_sig_info(sig, info, t);
1296 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1302 * Nuke all other threads in the group.
1304 int zap_other_threads(struct task_struct *p)
1306 struct task_struct *t = p;
1309 p->signal->group_stop_count = 0;
1311 while_each_thread(p, t) {
1312 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1315 /* Don't bother with already dead threads */
1318 sigaddset(&t->pending.signal, SIGKILL);
1319 signal_wake_up(t, 1);
1325 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1326 unsigned long *flags)
1328 struct sighand_struct *sighand;
1332 sighand = rcu_dereference(tsk->sighand);
1333 if (unlikely(sighand == NULL))
1337 * This sighand can be already freed and even reused, but
1338 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1339 * initializes ->siglock: this slab can't go away, it has
1340 * the same object type, ->siglock can't be reinitialized.
1342 * We need to ensure that tsk->sighand is still the same
1343 * after we take the lock, we can race with de_thread() or
1344 * __exit_signal(). In the latter case the next iteration
1345 * must see ->sighand == NULL.
1347 spin_lock_irqsave(&sighand->siglock, *flags);
1348 if (likely(sighand == tsk->sighand))
1350 spin_unlock_irqrestore(&sighand->siglock, *flags);
1358 * send signal info to all the members of a group
1360 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1366 ret = check_kill_permission(sig, info, p);
1370 ret = do_send_sig_info(sig, info, p, type);
1376 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1377 * control characters do (^C, ^Z etc)
1378 * - the caller must hold at least a readlock on tasklist_lock
1380 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1382 struct task_struct *p = NULL;
1383 int retval, success;
1387 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1388 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1391 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1392 return success ? 0 : retval;
1395 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1398 struct task_struct *p;
1402 p = pid_task(pid, PIDTYPE_PID);
1404 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1406 if (likely(!p || error != -ESRCH))
1410 * The task was unhashed in between, try again. If it
1411 * is dead, pid_task() will return NULL, if we race with
1412 * de_thread() it will find the new leader.
1417 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1421 error = kill_pid_info(sig, info, find_vpid(pid));
1426 static inline bool kill_as_cred_perm(const struct cred *cred,
1427 struct task_struct *target)
1429 const struct cred *pcred = __task_cred(target);
1431 return uid_eq(cred->euid, pcred->suid) ||
1432 uid_eq(cred->euid, pcred->uid) ||
1433 uid_eq(cred->uid, pcred->suid) ||
1434 uid_eq(cred->uid, pcred->uid);
1437 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1438 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1439 const struct cred *cred)
1442 struct task_struct *p;
1443 unsigned long flags;
1445 if (!valid_signal(sig))
1449 p = pid_task(pid, PIDTYPE_PID);
1454 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1458 ret = security_task_kill(p, info, sig, cred);
1463 if (lock_task_sighand(p, &flags)) {
1464 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1465 unlock_task_sighand(p, &flags);
1473 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1476 * kill_something_info() interprets pid in interesting ways just like kill(2).
1478 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1479 * is probably wrong. Should make it like BSD or SYSV.
1482 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1488 ret = kill_pid_info(sig, info, find_vpid(pid));
1493 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1497 read_lock(&tasklist_lock);
1499 ret = __kill_pgrp_info(sig, info,
1500 pid ? find_vpid(-pid) : task_pgrp(current));
1502 int retval = 0, count = 0;
1503 struct task_struct * p;
1505 for_each_process(p) {
1506 if (task_pid_vnr(p) > 1 &&
1507 !same_thread_group(p, current)) {
1508 int err = group_send_sig_info(sig, info, p,
1515 ret = count ? retval : -ESRCH;
1517 read_unlock(&tasklist_lock);
1523 * These are for backward compatibility with the rest of the kernel source.
1526 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1529 * Make sure legacy kernel users don't send in bad values
1530 * (normal paths check this in check_kill_permission).
1532 if (!valid_signal(sig))
1535 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1538 #define __si_special(priv) \
1539 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1542 send_sig(int sig, struct task_struct *p, int priv)
1544 return send_sig_info(sig, __si_special(priv), p);
1547 void force_sig(int sig, struct task_struct *p)
1549 force_sig_info(sig, SEND_SIG_PRIV, p);
1553 * When things go south during signal handling, we
1554 * will force a SIGSEGV. And if the signal that caused
1555 * the problem was already a SIGSEGV, we'll want to
1556 * make sure we don't even try to deliver the signal..
1558 void force_sigsegv(int sig, struct task_struct *p)
1560 if (sig == SIGSEGV) {
1561 unsigned long flags;
1562 spin_lock_irqsave(&p->sighand->siglock, flags);
1563 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1564 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1566 force_sig(SIGSEGV, p);
1569 int force_sig_fault(int sig, int code, void __user *addr
1570 ___ARCH_SI_TRAPNO(int trapno)
1571 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1572 , struct task_struct *t)
1574 struct siginfo info;
1576 clear_siginfo(&info);
1577 info.si_signo = sig;
1579 info.si_code = code;
1580 info.si_addr = addr;
1581 #ifdef __ARCH_SI_TRAPNO
1582 info.si_trapno = trapno;
1586 info.si_flags = flags;
1589 return force_sig_info(info.si_signo, &info, t);
1592 int send_sig_fault(int sig, int code, void __user *addr
1593 ___ARCH_SI_TRAPNO(int trapno)
1594 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1595 , struct task_struct *t)
1597 struct siginfo info;
1599 clear_siginfo(&info);
1600 info.si_signo = sig;
1602 info.si_code = code;
1603 info.si_addr = addr;
1604 #ifdef __ARCH_SI_TRAPNO
1605 info.si_trapno = trapno;
1609 info.si_flags = flags;
1612 return send_sig_info(info.si_signo, &info, t);
1615 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1617 struct siginfo info;
1619 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1620 clear_siginfo(&info);
1621 info.si_signo = SIGBUS;
1623 info.si_code = code;
1624 info.si_addr = addr;
1625 info.si_addr_lsb = lsb;
1626 return force_sig_info(info.si_signo, &info, t);
1629 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1631 struct siginfo info;
1633 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1634 clear_siginfo(&info);
1635 info.si_signo = SIGBUS;
1637 info.si_code = code;
1638 info.si_addr = addr;
1639 info.si_addr_lsb = lsb;
1640 return send_sig_info(info.si_signo, &info, t);
1642 EXPORT_SYMBOL(send_sig_mceerr);
1644 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1646 struct siginfo info;
1648 clear_siginfo(&info);
1649 info.si_signo = SIGSEGV;
1651 info.si_code = SEGV_BNDERR;
1652 info.si_addr = addr;
1653 info.si_lower = lower;
1654 info.si_upper = upper;
1655 return force_sig_info(info.si_signo, &info, current);
1659 int force_sig_pkuerr(void __user *addr, u32 pkey)
1661 struct siginfo info;
1663 clear_siginfo(&info);
1664 info.si_signo = SIGSEGV;
1666 info.si_code = SEGV_PKUERR;
1667 info.si_addr = addr;
1668 info.si_pkey = pkey;
1669 return force_sig_info(info.si_signo, &info, current);
1673 /* For the crazy architectures that include trap information in
1674 * the errno field, instead of an actual errno value.
1676 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1678 struct siginfo info;
1680 clear_siginfo(&info);
1681 info.si_signo = SIGTRAP;
1682 info.si_errno = errno;
1683 info.si_code = TRAP_HWBKPT;
1684 info.si_addr = addr;
1685 return force_sig_info(info.si_signo, &info, current);
1688 int kill_pgrp(struct pid *pid, int sig, int priv)
1692 read_lock(&tasklist_lock);
1693 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1694 read_unlock(&tasklist_lock);
1698 EXPORT_SYMBOL(kill_pgrp);
1700 int kill_pid(struct pid *pid, int sig, int priv)
1702 return kill_pid_info(sig, __si_special(priv), pid);
1704 EXPORT_SYMBOL(kill_pid);
1707 * These functions support sending signals using preallocated sigqueue
1708 * structures. This is needed "because realtime applications cannot
1709 * afford to lose notifications of asynchronous events, like timer
1710 * expirations or I/O completions". In the case of POSIX Timers
1711 * we allocate the sigqueue structure from the timer_create. If this
1712 * allocation fails we are able to report the failure to the application
1713 * with an EAGAIN error.
1715 struct sigqueue *sigqueue_alloc(void)
1717 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1720 q->flags |= SIGQUEUE_PREALLOC;
1725 void sigqueue_free(struct sigqueue *q)
1727 unsigned long flags;
1728 spinlock_t *lock = ¤t->sighand->siglock;
1730 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1732 * We must hold ->siglock while testing q->list
1733 * to serialize with collect_signal() or with
1734 * __exit_signal()->flush_sigqueue().
1736 spin_lock_irqsave(lock, flags);
1737 q->flags &= ~SIGQUEUE_PREALLOC;
1739 * If it is queued it will be freed when dequeued,
1740 * like the "regular" sigqueue.
1742 if (!list_empty(&q->list))
1744 spin_unlock_irqrestore(lock, flags);
1750 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1752 int sig = q->info.si_signo;
1753 struct sigpending *pending;
1754 struct task_struct *t;
1755 unsigned long flags;
1758 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1762 t = pid_task(pid, type);
1763 if (!t || !likely(lock_task_sighand(t, &flags)))
1766 ret = 1; /* the signal is ignored */
1767 result = TRACE_SIGNAL_IGNORED;
1768 if (!prepare_signal(sig, t, false))
1772 if (unlikely(!list_empty(&q->list))) {
1774 * If an SI_TIMER entry is already queue just increment
1775 * the overrun count.
1777 BUG_ON(q->info.si_code != SI_TIMER);
1778 q->info.si_overrun++;
1779 result = TRACE_SIGNAL_ALREADY_PENDING;
1782 q->info.si_overrun = 0;
1784 signalfd_notify(t, sig);
1785 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1786 list_add_tail(&q->list, &pending->list);
1787 sigaddset(&pending->signal, sig);
1788 complete_signal(sig, t, type);
1789 result = TRACE_SIGNAL_DELIVERED;
1791 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1792 unlock_task_sighand(t, &flags);
1799 * Let a parent know about the death of a child.
1800 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1802 * Returns true if our parent ignored us and so we've switched to
1805 bool do_notify_parent(struct task_struct *tsk, int sig)
1807 struct siginfo info;
1808 unsigned long flags;
1809 struct sighand_struct *psig;
1810 bool autoreap = false;
1815 /* do_notify_parent_cldstop should have been called instead. */
1816 BUG_ON(task_is_stopped_or_traced(tsk));
1818 BUG_ON(!tsk->ptrace &&
1819 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1821 if (sig != SIGCHLD) {
1823 * This is only possible if parent == real_parent.
1824 * Check if it has changed security domain.
1826 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1830 clear_siginfo(&info);
1831 info.si_signo = sig;
1834 * We are under tasklist_lock here so our parent is tied to
1835 * us and cannot change.
1837 * task_active_pid_ns will always return the same pid namespace
1838 * until a task passes through release_task.
1840 * write_lock() currently calls preempt_disable() which is the
1841 * same as rcu_read_lock(), but according to Oleg, this is not
1842 * correct to rely on this
1845 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1846 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1850 task_cputime(tsk, &utime, &stime);
1851 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1852 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1854 info.si_status = tsk->exit_code & 0x7f;
1855 if (tsk->exit_code & 0x80)
1856 info.si_code = CLD_DUMPED;
1857 else if (tsk->exit_code & 0x7f)
1858 info.si_code = CLD_KILLED;
1860 info.si_code = CLD_EXITED;
1861 info.si_status = tsk->exit_code >> 8;
1864 psig = tsk->parent->sighand;
1865 spin_lock_irqsave(&psig->siglock, flags);
1866 if (!tsk->ptrace && sig == SIGCHLD &&
1867 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1868 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1870 * We are exiting and our parent doesn't care. POSIX.1
1871 * defines special semantics for setting SIGCHLD to SIG_IGN
1872 * or setting the SA_NOCLDWAIT flag: we should be reaped
1873 * automatically and not left for our parent's wait4 call.
1874 * Rather than having the parent do it as a magic kind of
1875 * signal handler, we just set this to tell do_exit that we
1876 * can be cleaned up without becoming a zombie. Note that
1877 * we still call __wake_up_parent in this case, because a
1878 * blocked sys_wait4 might now return -ECHILD.
1880 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1881 * is implementation-defined: we do (if you don't want
1882 * it, just use SIG_IGN instead).
1885 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1888 if (valid_signal(sig) && sig)
1889 __group_send_sig_info(sig, &info, tsk->parent);
1890 __wake_up_parent(tsk, tsk->parent);
1891 spin_unlock_irqrestore(&psig->siglock, flags);
1897 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1898 * @tsk: task reporting the state change
1899 * @for_ptracer: the notification is for ptracer
1900 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1902 * Notify @tsk's parent that the stopped/continued state has changed. If
1903 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1904 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1907 * Must be called with tasklist_lock at least read locked.
1909 static void do_notify_parent_cldstop(struct task_struct *tsk,
1910 bool for_ptracer, int why)
1912 struct siginfo info;
1913 unsigned long flags;
1914 struct task_struct *parent;
1915 struct sighand_struct *sighand;
1919 parent = tsk->parent;
1921 tsk = tsk->group_leader;
1922 parent = tsk->real_parent;
1925 clear_siginfo(&info);
1926 info.si_signo = SIGCHLD;
1929 * see comment in do_notify_parent() about the following 4 lines
1932 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1933 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1936 task_cputime(tsk, &utime, &stime);
1937 info.si_utime = nsec_to_clock_t(utime);
1938 info.si_stime = nsec_to_clock_t(stime);
1943 info.si_status = SIGCONT;
1946 info.si_status = tsk->signal->group_exit_code & 0x7f;
1949 info.si_status = tsk->exit_code & 0x7f;
1955 sighand = parent->sighand;
1956 spin_lock_irqsave(&sighand->siglock, flags);
1957 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1958 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1959 __group_send_sig_info(SIGCHLD, &info, parent);
1961 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1963 __wake_up_parent(tsk, parent);
1964 spin_unlock_irqrestore(&sighand->siglock, flags);
1967 static inline bool may_ptrace_stop(void)
1969 if (!likely(current->ptrace))
1972 * Are we in the middle of do_coredump?
1973 * If so and our tracer is also part of the coredump stopping
1974 * is a deadlock situation, and pointless because our tracer
1975 * is dead so don't allow us to stop.
1976 * If SIGKILL was already sent before the caller unlocked
1977 * ->siglock we must see ->core_state != NULL. Otherwise it
1978 * is safe to enter schedule().
1980 * This is almost outdated, a task with the pending SIGKILL can't
1981 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1982 * after SIGKILL was already dequeued.
1984 if (unlikely(current->mm->core_state) &&
1985 unlikely(current->mm == current->parent->mm))
1992 * Return non-zero if there is a SIGKILL that should be waking us up.
1993 * Called with the siglock held.
1995 static bool sigkill_pending(struct task_struct *tsk)
1997 return sigismember(&tsk->pending.signal, SIGKILL) ||
1998 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2002 * This must be called with current->sighand->siglock held.
2004 * This should be the path for all ptrace stops.
2005 * We always set current->last_siginfo while stopped here.
2006 * That makes it a way to test a stopped process for
2007 * being ptrace-stopped vs being job-control-stopped.
2009 * If we actually decide not to stop at all because the tracer
2010 * is gone, we keep current->exit_code unless clear_code.
2012 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
2013 __releases(¤t->sighand->siglock)
2014 __acquires(¤t->sighand->siglock)
2016 bool gstop_done = false;
2018 if (arch_ptrace_stop_needed(exit_code, info)) {
2020 * The arch code has something special to do before a
2021 * ptrace stop. This is allowed to block, e.g. for faults
2022 * on user stack pages. We can't keep the siglock while
2023 * calling arch_ptrace_stop, so we must release it now.
2024 * To preserve proper semantics, we must do this before
2025 * any signal bookkeeping like checking group_stop_count.
2026 * Meanwhile, a SIGKILL could come in before we retake the
2027 * siglock. That must prevent us from sleeping in TASK_TRACED.
2028 * So after regaining the lock, we must check for SIGKILL.
2030 spin_unlock_irq(¤t->sighand->siglock);
2031 arch_ptrace_stop(exit_code, info);
2032 spin_lock_irq(¤t->sighand->siglock);
2033 if (sigkill_pending(current))
2037 set_special_state(TASK_TRACED);
2040 * We're committing to trapping. TRACED should be visible before
2041 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2042 * Also, transition to TRACED and updates to ->jobctl should be
2043 * atomic with respect to siglock and should be done after the arch
2044 * hook as siglock is released and regrabbed across it.
2049 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2051 * set_current_state() smp_wmb();
2053 * wait_task_stopped()
2054 * task_stopped_code()
2055 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2059 current->last_siginfo = info;
2060 current->exit_code = exit_code;
2063 * If @why is CLD_STOPPED, we're trapping to participate in a group
2064 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2065 * across siglock relocks since INTERRUPT was scheduled, PENDING
2066 * could be clear now. We act as if SIGCONT is received after
2067 * TASK_TRACED is entered - ignore it.
2069 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2070 gstop_done = task_participate_group_stop(current);
2072 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2073 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2074 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2075 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2077 /* entering a trap, clear TRAPPING */
2078 task_clear_jobctl_trapping(current);
2080 spin_unlock_irq(¤t->sighand->siglock);
2081 read_lock(&tasklist_lock);
2082 if (may_ptrace_stop()) {
2084 * Notify parents of the stop.
2086 * While ptraced, there are two parents - the ptracer and
2087 * the real_parent of the group_leader. The ptracer should
2088 * know about every stop while the real parent is only
2089 * interested in the completion of group stop. The states
2090 * for the two don't interact with each other. Notify
2091 * separately unless they're gonna be duplicates.
2093 do_notify_parent_cldstop(current, true, why);
2094 if (gstop_done && ptrace_reparented(current))
2095 do_notify_parent_cldstop(current, false, why);
2098 * Don't want to allow preemption here, because
2099 * sys_ptrace() needs this task to be inactive.
2101 * XXX: implement read_unlock_no_resched().
2104 read_unlock(&tasklist_lock);
2105 preempt_enable_no_resched();
2106 freezable_schedule();
2109 * By the time we got the lock, our tracer went away.
2110 * Don't drop the lock yet, another tracer may come.
2112 * If @gstop_done, the ptracer went away between group stop
2113 * completion and here. During detach, it would have set
2114 * JOBCTL_STOP_PENDING on us and we'll re-enter
2115 * TASK_STOPPED in do_signal_stop() on return, so notifying
2116 * the real parent of the group stop completion is enough.
2119 do_notify_parent_cldstop(current, false, why);
2121 /* tasklist protects us from ptrace_freeze_traced() */
2122 __set_current_state(TASK_RUNNING);
2124 current->exit_code = 0;
2125 read_unlock(&tasklist_lock);
2129 * We are back. Now reacquire the siglock before touching
2130 * last_siginfo, so that we are sure to have synchronized with
2131 * any signal-sending on another CPU that wants to examine it.
2133 spin_lock_irq(¤t->sighand->siglock);
2134 current->last_siginfo = NULL;
2136 /* LISTENING can be set only during STOP traps, clear it */
2137 current->jobctl &= ~JOBCTL_LISTENING;
2140 * Queued signals ignored us while we were stopped for tracing.
2141 * So check for any that we should take before resuming user mode.
2142 * This sets TIF_SIGPENDING, but never clears it.
2144 recalc_sigpending_tsk(current);
2147 static void ptrace_do_notify(int signr, int exit_code, int why)
2151 clear_siginfo(&info);
2152 info.si_signo = signr;
2153 info.si_code = exit_code;
2154 info.si_pid = task_pid_vnr(current);
2155 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2157 /* Let the debugger run. */
2158 ptrace_stop(exit_code, why, 1, &info);
2161 void ptrace_notify(int exit_code)
2163 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2164 if (unlikely(current->task_works))
2167 spin_lock_irq(¤t->sighand->siglock);
2168 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2169 spin_unlock_irq(¤t->sighand->siglock);
2173 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2174 * @signr: signr causing group stop if initiating
2176 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2177 * and participate in it. If already set, participate in the existing
2178 * group stop. If participated in a group stop (and thus slept), %true is
2179 * returned with siglock released.
2181 * If ptraced, this function doesn't handle stop itself. Instead,
2182 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2183 * untouched. The caller must ensure that INTERRUPT trap handling takes
2184 * places afterwards.
2187 * Must be called with @current->sighand->siglock held, which is released
2191 * %false if group stop is already cancelled or ptrace trap is scheduled.
2192 * %true if participated in group stop.
2194 static bool do_signal_stop(int signr)
2195 __releases(¤t->sighand->siglock)
2197 struct signal_struct *sig = current->signal;
2199 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2200 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2201 struct task_struct *t;
2203 /* signr will be recorded in task->jobctl for retries */
2204 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2206 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2207 unlikely(signal_group_exit(sig)))
2210 * There is no group stop already in progress. We must
2213 * While ptraced, a task may be resumed while group stop is
2214 * still in effect and then receive a stop signal and
2215 * initiate another group stop. This deviates from the
2216 * usual behavior as two consecutive stop signals can't
2217 * cause two group stops when !ptraced. That is why we
2218 * also check !task_is_stopped(t) below.
2220 * The condition can be distinguished by testing whether
2221 * SIGNAL_STOP_STOPPED is already set. Don't generate
2222 * group_exit_code in such case.
2224 * This is not necessary for SIGNAL_STOP_CONTINUED because
2225 * an intervening stop signal is required to cause two
2226 * continued events regardless of ptrace.
2228 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2229 sig->group_exit_code = signr;
2231 sig->group_stop_count = 0;
2233 if (task_set_jobctl_pending(current, signr | gstop))
2234 sig->group_stop_count++;
2237 while_each_thread(current, t) {
2239 * Setting state to TASK_STOPPED for a group
2240 * stop is always done with the siglock held,
2241 * so this check has no races.
2243 if (!task_is_stopped(t) &&
2244 task_set_jobctl_pending(t, signr | gstop)) {
2245 sig->group_stop_count++;
2246 if (likely(!(t->ptrace & PT_SEIZED)))
2247 signal_wake_up(t, 0);
2249 ptrace_trap_notify(t);
2254 if (likely(!current->ptrace)) {
2258 * If there are no other threads in the group, or if there
2259 * is a group stop in progress and we are the last to stop,
2260 * report to the parent.
2262 if (task_participate_group_stop(current))
2263 notify = CLD_STOPPED;
2265 set_special_state(TASK_STOPPED);
2266 spin_unlock_irq(¤t->sighand->siglock);
2269 * Notify the parent of the group stop completion. Because
2270 * we're not holding either the siglock or tasklist_lock
2271 * here, ptracer may attach inbetween; however, this is for
2272 * group stop and should always be delivered to the real
2273 * parent of the group leader. The new ptracer will get
2274 * its notification when this task transitions into
2278 read_lock(&tasklist_lock);
2279 do_notify_parent_cldstop(current, false, notify);
2280 read_unlock(&tasklist_lock);
2283 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2284 freezable_schedule();
2288 * While ptraced, group stop is handled by STOP trap.
2289 * Schedule it and let the caller deal with it.
2291 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2297 * do_jobctl_trap - take care of ptrace jobctl traps
2299 * When PT_SEIZED, it's used for both group stop and explicit
2300 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2301 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2302 * the stop signal; otherwise, %SIGTRAP.
2304 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2305 * number as exit_code and no siginfo.
2308 * Must be called with @current->sighand->siglock held, which may be
2309 * released and re-acquired before returning with intervening sleep.
2311 static void do_jobctl_trap(void)
2313 struct signal_struct *signal = current->signal;
2314 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2316 if (current->ptrace & PT_SEIZED) {
2317 if (!signal->group_stop_count &&
2318 !(signal->flags & SIGNAL_STOP_STOPPED))
2320 WARN_ON_ONCE(!signr);
2321 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2324 WARN_ON_ONCE(!signr);
2325 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2326 current->exit_code = 0;
2330 static int ptrace_signal(int signr, siginfo_t *info)
2333 * We do not check sig_kernel_stop(signr) but set this marker
2334 * unconditionally because we do not know whether debugger will
2335 * change signr. This flag has no meaning unless we are going
2336 * to stop after return from ptrace_stop(). In this case it will
2337 * be checked in do_signal_stop(), we should only stop if it was
2338 * not cleared by SIGCONT while we were sleeping. See also the
2339 * comment in dequeue_signal().
2341 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2342 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2344 /* We're back. Did the debugger cancel the sig? */
2345 signr = current->exit_code;
2349 current->exit_code = 0;
2352 * Update the siginfo structure if the signal has
2353 * changed. If the debugger wanted something
2354 * specific in the siginfo structure then it should
2355 * have updated *info via PTRACE_SETSIGINFO.
2357 if (signr != info->si_signo) {
2358 clear_siginfo(info);
2359 info->si_signo = signr;
2361 info->si_code = SI_USER;
2363 info->si_pid = task_pid_vnr(current->parent);
2364 info->si_uid = from_kuid_munged(current_user_ns(),
2365 task_uid(current->parent));
2369 /* If the (new) signal is now blocked, requeue it. */
2370 if (sigismember(¤t->blocked, signr)) {
2371 specific_send_sig_info(signr, info, current);
2378 bool get_signal(struct ksignal *ksig)
2380 struct sighand_struct *sighand = current->sighand;
2381 struct signal_struct *signal = current->signal;
2384 if (unlikely(current->task_works))
2387 if (unlikely(uprobe_deny_signal()))
2391 * Do this once, we can't return to user-mode if freezing() == T.
2392 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2393 * thus do not need another check after return.
2398 spin_lock_irq(&sighand->siglock);
2400 * Every stopped thread goes here after wakeup. Check to see if
2401 * we should notify the parent, prepare_signal(SIGCONT) encodes
2402 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2404 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2407 if (signal->flags & SIGNAL_CLD_CONTINUED)
2408 why = CLD_CONTINUED;
2412 signal->flags &= ~SIGNAL_CLD_MASK;
2414 spin_unlock_irq(&sighand->siglock);
2417 * Notify the parent that we're continuing. This event is
2418 * always per-process and doesn't make whole lot of sense
2419 * for ptracers, who shouldn't consume the state via
2420 * wait(2) either, but, for backward compatibility, notify
2421 * the ptracer of the group leader too unless it's gonna be
2424 read_lock(&tasklist_lock);
2425 do_notify_parent_cldstop(current, false, why);
2427 if (ptrace_reparented(current->group_leader))
2428 do_notify_parent_cldstop(current->group_leader,
2430 read_unlock(&tasklist_lock);
2435 /* Has this task already been marked for death? */
2436 if (signal_group_exit(signal)) {
2437 ksig->info.si_signo = signr = SIGKILL;
2438 sigdelset(¤t->pending.signal, SIGKILL);
2439 recalc_sigpending();
2444 struct k_sigaction *ka;
2446 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2450 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2452 spin_unlock_irq(&sighand->siglock);
2457 * Signals generated by the execution of an instruction
2458 * need to be delivered before any other pending signals
2459 * so that the instruction pointer in the signal stack
2460 * frame points to the faulting instruction.
2462 signr = dequeue_synchronous_signal(&ksig->info);
2464 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2467 break; /* will return 0 */
2469 if (unlikely(current->ptrace) && signr != SIGKILL) {
2470 signr = ptrace_signal(signr, &ksig->info);
2475 ka = &sighand->action[signr-1];
2477 /* Trace actually delivered signals. */
2478 trace_signal_deliver(signr, &ksig->info, ka);
2480 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2482 if (ka->sa.sa_handler != SIG_DFL) {
2483 /* Run the handler. */
2486 if (ka->sa.sa_flags & SA_ONESHOT)
2487 ka->sa.sa_handler = SIG_DFL;
2489 break; /* will return non-zero "signr" value */
2493 * Now we are doing the default action for this signal.
2495 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2499 * Global init gets no signals it doesn't want.
2500 * Container-init gets no signals it doesn't want from same
2503 * Note that if global/container-init sees a sig_kernel_only()
2504 * signal here, the signal must have been generated internally
2505 * or must have come from an ancestor namespace. In either
2506 * case, the signal cannot be dropped.
2508 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2509 !sig_kernel_only(signr))
2512 if (sig_kernel_stop(signr)) {
2514 * The default action is to stop all threads in
2515 * the thread group. The job control signals
2516 * do nothing in an orphaned pgrp, but SIGSTOP
2517 * always works. Note that siglock needs to be
2518 * dropped during the call to is_orphaned_pgrp()
2519 * because of lock ordering with tasklist_lock.
2520 * This allows an intervening SIGCONT to be posted.
2521 * We need to check for that and bail out if necessary.
2523 if (signr != SIGSTOP) {
2524 spin_unlock_irq(&sighand->siglock);
2526 /* signals can be posted during this window */
2528 if (is_current_pgrp_orphaned())
2531 spin_lock_irq(&sighand->siglock);
2534 if (likely(do_signal_stop(ksig->info.si_signo))) {
2535 /* It released the siglock. */
2540 * We didn't actually stop, due to a race
2541 * with SIGCONT or something like that.
2547 spin_unlock_irq(&sighand->siglock);
2550 * Anything else is fatal, maybe with a core dump.
2552 current->flags |= PF_SIGNALED;
2554 if (sig_kernel_coredump(signr)) {
2555 if (print_fatal_signals)
2556 print_fatal_signal(ksig->info.si_signo);
2557 proc_coredump_connector(current);
2559 * If it was able to dump core, this kills all
2560 * other threads in the group and synchronizes with
2561 * their demise. If we lost the race with another
2562 * thread getting here, it set group_exit_code
2563 * first and our do_group_exit call below will use
2564 * that value and ignore the one we pass it.
2566 do_coredump(&ksig->info);
2570 * Death signals, no core dump.
2572 do_group_exit(ksig->info.si_signo);
2575 spin_unlock_irq(&sighand->siglock);
2578 return ksig->sig > 0;
2582 * signal_delivered -
2583 * @ksig: kernel signal struct
2584 * @stepping: nonzero if debugger single-step or block-step in use
2586 * This function should be called when a signal has successfully been
2587 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2588 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2589 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2591 static void signal_delivered(struct ksignal *ksig, int stepping)
2595 /* A signal was successfully delivered, and the
2596 saved sigmask was stored on the signal frame,
2597 and will be restored by sigreturn. So we can
2598 simply clear the restore sigmask flag. */
2599 clear_restore_sigmask();
2601 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2602 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2603 sigaddset(&blocked, ksig->sig);
2604 set_current_blocked(&blocked);
2605 tracehook_signal_handler(stepping);
2608 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2611 force_sigsegv(ksig->sig, current);
2613 signal_delivered(ksig, stepping);
2617 * It could be that complete_signal() picked us to notify about the
2618 * group-wide signal. Other threads should be notified now to take
2619 * the shared signals in @which since we will not.
2621 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2624 struct task_struct *t;
2626 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2627 if (sigisemptyset(&retarget))
2631 while_each_thread(tsk, t) {
2632 if (t->flags & PF_EXITING)
2635 if (!has_pending_signals(&retarget, &t->blocked))
2637 /* Remove the signals this thread can handle. */
2638 sigandsets(&retarget, &retarget, &t->blocked);
2640 if (!signal_pending(t))
2641 signal_wake_up(t, 0);
2643 if (sigisemptyset(&retarget))
2648 void exit_signals(struct task_struct *tsk)
2654 * @tsk is about to have PF_EXITING set - lock out users which
2655 * expect stable threadgroup.
2657 cgroup_threadgroup_change_begin(tsk);
2659 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2660 tsk->flags |= PF_EXITING;
2661 cgroup_threadgroup_change_end(tsk);
2665 spin_lock_irq(&tsk->sighand->siglock);
2667 * From now this task is not visible for group-wide signals,
2668 * see wants_signal(), do_signal_stop().
2670 tsk->flags |= PF_EXITING;
2672 cgroup_threadgroup_change_end(tsk);
2674 if (!signal_pending(tsk))
2677 unblocked = tsk->blocked;
2678 signotset(&unblocked);
2679 retarget_shared_pending(tsk, &unblocked);
2681 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2682 task_participate_group_stop(tsk))
2683 group_stop = CLD_STOPPED;
2685 spin_unlock_irq(&tsk->sighand->siglock);
2688 * If group stop has completed, deliver the notification. This
2689 * should always go to the real parent of the group leader.
2691 if (unlikely(group_stop)) {
2692 read_lock(&tasklist_lock);
2693 do_notify_parent_cldstop(tsk, false, group_stop);
2694 read_unlock(&tasklist_lock);
2698 EXPORT_SYMBOL(recalc_sigpending);
2699 EXPORT_SYMBOL_GPL(dequeue_signal);
2700 EXPORT_SYMBOL(flush_signals);
2701 EXPORT_SYMBOL(force_sig);
2702 EXPORT_SYMBOL(send_sig);
2703 EXPORT_SYMBOL(send_sig_info);
2704 EXPORT_SYMBOL(sigprocmask);
2707 * System call entry points.
2711 * sys_restart_syscall - restart a system call
2713 SYSCALL_DEFINE0(restart_syscall)
2715 struct restart_block *restart = ¤t->restart_block;
2716 return restart->fn(restart);
2719 long do_no_restart_syscall(struct restart_block *param)
2724 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2726 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2727 sigset_t newblocked;
2728 /* A set of now blocked but previously unblocked signals. */
2729 sigandnsets(&newblocked, newset, ¤t->blocked);
2730 retarget_shared_pending(tsk, &newblocked);
2732 tsk->blocked = *newset;
2733 recalc_sigpending();
2737 * set_current_blocked - change current->blocked mask
2740 * It is wrong to change ->blocked directly, this helper should be used
2741 * to ensure the process can't miss a shared signal we are going to block.
2743 void set_current_blocked(sigset_t *newset)
2745 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2746 __set_current_blocked(newset);
2749 void __set_current_blocked(const sigset_t *newset)
2751 struct task_struct *tsk = current;
2754 * In case the signal mask hasn't changed, there is nothing we need
2755 * to do. The current->blocked shouldn't be modified by other task.
2757 if (sigequalsets(&tsk->blocked, newset))
2760 spin_lock_irq(&tsk->sighand->siglock);
2761 __set_task_blocked(tsk, newset);
2762 spin_unlock_irq(&tsk->sighand->siglock);
2766 * This is also useful for kernel threads that want to temporarily
2767 * (or permanently) block certain signals.
2769 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2770 * interface happily blocks "unblockable" signals like SIGKILL
2773 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2775 struct task_struct *tsk = current;
2778 /* Lockless, only current can change ->blocked, never from irq */
2780 *oldset = tsk->blocked;
2784 sigorsets(&newset, &tsk->blocked, set);
2787 sigandnsets(&newset, &tsk->blocked, set);
2796 __set_current_blocked(&newset);
2801 * sys_rt_sigprocmask - change the list of currently blocked signals
2802 * @how: whether to add, remove, or set signals
2803 * @nset: stores pending signals
2804 * @oset: previous value of signal mask if non-null
2805 * @sigsetsize: size of sigset_t type
2807 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2808 sigset_t __user *, oset, size_t, sigsetsize)
2810 sigset_t old_set, new_set;
2813 /* XXX: Don't preclude handling different sized sigset_t's. */
2814 if (sigsetsize != sizeof(sigset_t))
2817 old_set = current->blocked;
2820 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2822 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2824 error = sigprocmask(how, &new_set, NULL);
2830 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2837 #ifdef CONFIG_COMPAT
2838 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2839 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2841 sigset_t old_set = current->blocked;
2843 /* XXX: Don't preclude handling different sized sigset_t's. */
2844 if (sigsetsize != sizeof(sigset_t))
2850 if (get_compat_sigset(&new_set, nset))
2852 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2854 error = sigprocmask(how, &new_set, NULL);
2858 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2862 static void do_sigpending(sigset_t *set)
2864 spin_lock_irq(¤t->sighand->siglock);
2865 sigorsets(set, ¤t->pending.signal,
2866 ¤t->signal->shared_pending.signal);
2867 spin_unlock_irq(¤t->sighand->siglock);
2869 /* Outside the lock because only this thread touches it. */
2870 sigandsets(set, ¤t->blocked, set);
2874 * sys_rt_sigpending - examine a pending signal that has been raised
2876 * @uset: stores pending signals
2877 * @sigsetsize: size of sigset_t type or larger
2879 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2883 if (sigsetsize > sizeof(*uset))
2886 do_sigpending(&set);
2888 if (copy_to_user(uset, &set, sigsetsize))
2894 #ifdef CONFIG_COMPAT
2895 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2896 compat_size_t, sigsetsize)
2900 if (sigsetsize > sizeof(*uset))
2903 do_sigpending(&set);
2905 return put_compat_sigset(uset, &set, sigsetsize);
2909 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
2911 enum siginfo_layout layout = SIL_KILL;
2912 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2913 static const struct {
2914 unsigned char limit, layout;
2916 [SIGILL] = { NSIGILL, SIL_FAULT },
2917 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2918 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2919 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2920 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2921 #if defined(SIGEMT) && defined(NSIGEMT)
2922 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2924 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2925 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2926 [SIGSYS] = { NSIGSYS, SIL_SYS },
2928 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit)) {
2929 layout = filter[sig].layout;
2930 /* Handle the exceptions */
2931 if ((sig == SIGBUS) &&
2932 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
2933 layout = SIL_FAULT_MCEERR;
2934 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
2935 layout = SIL_FAULT_BNDERR;
2937 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
2938 layout = SIL_FAULT_PKUERR;
2941 else if (si_code <= NSIGPOLL)
2944 if (si_code == SI_TIMER)
2946 else if (si_code == SI_SIGIO)
2948 else if (si_code < 0)
2954 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2956 if (copy_to_user(to, from , sizeof(struct siginfo)))
2961 #ifdef CONFIG_COMPAT
2962 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
2963 const struct siginfo *from)
2964 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
2966 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
2968 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
2969 const struct siginfo *from, bool x32_ABI)
2972 struct compat_siginfo new;
2973 memset(&new, 0, sizeof(new));
2975 new.si_signo = from->si_signo;
2976 new.si_errno = from->si_errno;
2977 new.si_code = from->si_code;
2978 switch(siginfo_layout(from->si_signo, from->si_code)) {
2980 new.si_pid = from->si_pid;
2981 new.si_uid = from->si_uid;
2984 new.si_tid = from->si_tid;
2985 new.si_overrun = from->si_overrun;
2986 new.si_int = from->si_int;
2989 new.si_band = from->si_band;
2990 new.si_fd = from->si_fd;
2993 new.si_addr = ptr_to_compat(from->si_addr);
2994 #ifdef __ARCH_SI_TRAPNO
2995 new.si_trapno = from->si_trapno;
2998 case SIL_FAULT_MCEERR:
2999 new.si_addr = ptr_to_compat(from->si_addr);
3000 #ifdef __ARCH_SI_TRAPNO
3001 new.si_trapno = from->si_trapno;
3003 new.si_addr_lsb = from->si_addr_lsb;
3005 case SIL_FAULT_BNDERR:
3006 new.si_addr = ptr_to_compat(from->si_addr);
3007 #ifdef __ARCH_SI_TRAPNO
3008 new.si_trapno = from->si_trapno;
3010 new.si_lower = ptr_to_compat(from->si_lower);
3011 new.si_upper = ptr_to_compat(from->si_upper);
3013 case SIL_FAULT_PKUERR:
3014 new.si_addr = ptr_to_compat(from->si_addr);
3015 #ifdef __ARCH_SI_TRAPNO
3016 new.si_trapno = from->si_trapno;
3018 new.si_pkey = from->si_pkey;
3021 new.si_pid = from->si_pid;
3022 new.si_uid = from->si_uid;
3023 new.si_status = from->si_status;
3024 #ifdef CONFIG_X86_X32_ABI
3026 new._sifields._sigchld_x32._utime = from->si_utime;
3027 new._sifields._sigchld_x32._stime = from->si_stime;
3031 new.si_utime = from->si_utime;
3032 new.si_stime = from->si_stime;
3036 new.si_pid = from->si_pid;
3037 new.si_uid = from->si_uid;
3038 new.si_int = from->si_int;
3041 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3042 new.si_syscall = from->si_syscall;
3043 new.si_arch = from->si_arch;
3047 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3053 int copy_siginfo_from_user32(struct siginfo *to,
3054 const struct compat_siginfo __user *ufrom)
3056 struct compat_siginfo from;
3058 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3062 to->si_signo = from.si_signo;
3063 to->si_errno = from.si_errno;
3064 to->si_code = from.si_code;
3065 switch(siginfo_layout(from.si_signo, from.si_code)) {
3067 to->si_pid = from.si_pid;
3068 to->si_uid = from.si_uid;
3071 to->si_tid = from.si_tid;
3072 to->si_overrun = from.si_overrun;
3073 to->si_int = from.si_int;
3076 to->si_band = from.si_band;
3077 to->si_fd = from.si_fd;
3080 to->si_addr = compat_ptr(from.si_addr);
3081 #ifdef __ARCH_SI_TRAPNO
3082 to->si_trapno = from.si_trapno;
3085 case SIL_FAULT_MCEERR:
3086 to->si_addr = compat_ptr(from.si_addr);
3087 #ifdef __ARCH_SI_TRAPNO
3088 to->si_trapno = from.si_trapno;
3090 to->si_addr_lsb = from.si_addr_lsb;
3092 case SIL_FAULT_BNDERR:
3093 to->si_addr = compat_ptr(from.si_addr);
3094 #ifdef __ARCH_SI_TRAPNO
3095 to->si_trapno = from.si_trapno;
3097 to->si_lower = compat_ptr(from.si_lower);
3098 to->si_upper = compat_ptr(from.si_upper);
3100 case SIL_FAULT_PKUERR:
3101 to->si_addr = compat_ptr(from.si_addr);
3102 #ifdef __ARCH_SI_TRAPNO
3103 to->si_trapno = from.si_trapno;
3105 to->si_pkey = from.si_pkey;
3108 to->si_pid = from.si_pid;
3109 to->si_uid = from.si_uid;
3110 to->si_status = from.si_status;
3111 #ifdef CONFIG_X86_X32_ABI
3112 if (in_x32_syscall()) {
3113 to->si_utime = from._sifields._sigchld_x32._utime;
3114 to->si_stime = from._sifields._sigchld_x32._stime;
3118 to->si_utime = from.si_utime;
3119 to->si_stime = from.si_stime;
3123 to->si_pid = from.si_pid;
3124 to->si_uid = from.si_uid;
3125 to->si_int = from.si_int;
3128 to->si_call_addr = compat_ptr(from.si_call_addr);
3129 to->si_syscall = from.si_syscall;
3130 to->si_arch = from.si_arch;
3135 #endif /* CONFIG_COMPAT */
3138 * do_sigtimedwait - wait for queued signals specified in @which
3139 * @which: queued signals to wait for
3140 * @info: if non-null, the signal's siginfo is returned here
3141 * @ts: upper bound on process time suspension
3143 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
3144 const struct timespec *ts)
3146 ktime_t *to = NULL, timeout = KTIME_MAX;
3147 struct task_struct *tsk = current;
3148 sigset_t mask = *which;
3152 if (!timespec_valid(ts))
3154 timeout = timespec_to_ktime(*ts);
3159 * Invert the set of allowed signals to get those we want to block.
3161 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3164 spin_lock_irq(&tsk->sighand->siglock);
3165 sig = dequeue_signal(tsk, &mask, info);
3166 if (!sig && timeout) {
3168 * None ready, temporarily unblock those we're interested
3169 * while we are sleeping in so that we'll be awakened when
3170 * they arrive. Unblocking is always fine, we can avoid
3171 * set_current_blocked().
3173 tsk->real_blocked = tsk->blocked;
3174 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3175 recalc_sigpending();
3176 spin_unlock_irq(&tsk->sighand->siglock);
3178 __set_current_state(TASK_INTERRUPTIBLE);
3179 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3181 spin_lock_irq(&tsk->sighand->siglock);
3182 __set_task_blocked(tsk, &tsk->real_blocked);
3183 sigemptyset(&tsk->real_blocked);
3184 sig = dequeue_signal(tsk, &mask, info);
3186 spin_unlock_irq(&tsk->sighand->siglock);
3190 return ret ? -EINTR : -EAGAIN;
3194 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3196 * @uthese: queued signals to wait for
3197 * @uinfo: if non-null, the signal's siginfo is returned here
3198 * @uts: upper bound on process time suspension
3199 * @sigsetsize: size of sigset_t type
3201 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3202 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
3210 /* XXX: Don't preclude handling different sized sigset_t's. */
3211 if (sigsetsize != sizeof(sigset_t))
3214 if (copy_from_user(&these, uthese, sizeof(these)))
3218 if (copy_from_user(&ts, uts, sizeof(ts)))
3222 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3224 if (ret > 0 && uinfo) {
3225 if (copy_siginfo_to_user(uinfo, &info))
3232 #ifdef CONFIG_COMPAT
3233 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
3234 struct compat_siginfo __user *, uinfo,
3235 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
3242 if (sigsetsize != sizeof(sigset_t))
3245 if (get_compat_sigset(&s, uthese))
3249 if (compat_get_timespec(&t, uts))
3253 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3255 if (ret > 0 && uinfo) {
3256 if (copy_siginfo_to_user32(uinfo, &info))
3265 * sys_kill - send a signal to a process
3266 * @pid: the PID of the process
3267 * @sig: signal to be sent
3269 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3271 struct siginfo info;
3273 clear_siginfo(&info);
3274 info.si_signo = sig;
3276 info.si_code = SI_USER;
3277 info.si_pid = task_tgid_vnr(current);
3278 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3280 return kill_something_info(sig, &info, pid);
3284 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
3286 struct task_struct *p;
3290 p = find_task_by_vpid(pid);
3291 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3292 error = check_kill_permission(sig, info, p);
3294 * The null signal is a permissions and process existence
3295 * probe. No signal is actually delivered.
3297 if (!error && sig) {
3298 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3300 * If lock_task_sighand() failed we pretend the task
3301 * dies after receiving the signal. The window is tiny,
3302 * and the signal is private anyway.
3304 if (unlikely(error == -ESRCH))
3313 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3315 struct siginfo info;
3317 clear_siginfo(&info);
3318 info.si_signo = sig;
3320 info.si_code = SI_TKILL;
3321 info.si_pid = task_tgid_vnr(current);
3322 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3324 return do_send_specific(tgid, pid, sig, &info);
3328 * sys_tgkill - send signal to one specific thread
3329 * @tgid: the thread group ID of the thread
3330 * @pid: the PID of the thread
3331 * @sig: signal to be sent
3333 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3334 * exists but it's not belonging to the target process anymore. This
3335 * method solves the problem of threads exiting and PIDs getting reused.
3337 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3339 /* This is only valid for single tasks */
3340 if (pid <= 0 || tgid <= 0)
3343 return do_tkill(tgid, pid, sig);
3347 * sys_tkill - send signal to one specific task
3348 * @pid: the PID of the task
3349 * @sig: signal to be sent
3351 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3353 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3355 /* This is only valid for single tasks */
3359 return do_tkill(0, pid, sig);
3362 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3364 /* Not even root can pretend to send signals from the kernel.
3365 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3367 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3368 (task_pid_vnr(current) != pid))
3371 info->si_signo = sig;
3373 /* POSIX.1b doesn't mention process groups. */
3374 return kill_proc_info(sig, info, pid);
3378 * sys_rt_sigqueueinfo - send signal information to a signal
3379 * @pid: the PID of the thread
3380 * @sig: signal to be sent
3381 * @uinfo: signal info to be sent
3383 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3384 siginfo_t __user *, uinfo)
3387 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3389 return do_rt_sigqueueinfo(pid, sig, &info);
3392 #ifdef CONFIG_COMPAT
3393 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3396 struct compat_siginfo __user *, uinfo)
3399 int ret = copy_siginfo_from_user32(&info, uinfo);
3402 return do_rt_sigqueueinfo(pid, sig, &info);
3406 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3408 /* This is only valid for single tasks */
3409 if (pid <= 0 || tgid <= 0)
3412 /* Not even root can pretend to send signals from the kernel.
3413 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3415 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3416 (task_pid_vnr(current) != pid))
3419 info->si_signo = sig;
3421 return do_send_specific(tgid, pid, sig, info);
3424 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3425 siginfo_t __user *, uinfo)
3429 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3432 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3435 #ifdef CONFIG_COMPAT
3436 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3440 struct compat_siginfo __user *, uinfo)
3444 if (copy_siginfo_from_user32(&info, uinfo))
3446 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3451 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3453 void kernel_sigaction(int sig, __sighandler_t action)
3455 spin_lock_irq(¤t->sighand->siglock);
3456 current->sighand->action[sig - 1].sa.sa_handler = action;
3457 if (action == SIG_IGN) {
3461 sigaddset(&mask, sig);
3463 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3464 flush_sigqueue_mask(&mask, ¤t->pending);
3465 recalc_sigpending();
3467 spin_unlock_irq(¤t->sighand->siglock);
3469 EXPORT_SYMBOL(kernel_sigaction);
3471 void __weak sigaction_compat_abi(struct k_sigaction *act,
3472 struct k_sigaction *oact)
3476 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3478 struct task_struct *p = current, *t;
3479 struct k_sigaction *k;
3482 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3485 k = &p->sighand->action[sig-1];
3487 spin_lock_irq(&p->sighand->siglock);
3491 sigaction_compat_abi(act, oact);
3494 sigdelsetmask(&act->sa.sa_mask,
3495 sigmask(SIGKILL) | sigmask(SIGSTOP));
3499 * "Setting a signal action to SIG_IGN for a signal that is
3500 * pending shall cause the pending signal to be discarded,
3501 * whether or not it is blocked."
3503 * "Setting a signal action to SIG_DFL for a signal that is
3504 * pending and whose default action is to ignore the signal
3505 * (for example, SIGCHLD), shall cause the pending signal to
3506 * be discarded, whether or not it is blocked"
3508 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3510 sigaddset(&mask, sig);
3511 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3512 for_each_thread(p, t)
3513 flush_sigqueue_mask(&mask, &t->pending);
3517 spin_unlock_irq(&p->sighand->siglock);
3522 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3525 struct task_struct *t = current;
3528 memset(oss, 0, sizeof(stack_t));
3529 oss->ss_sp = (void __user *) t->sas_ss_sp;
3530 oss->ss_size = t->sas_ss_size;
3531 oss->ss_flags = sas_ss_flags(sp) |
3532 (current->sas_ss_flags & SS_FLAG_BITS);
3536 void __user *ss_sp = ss->ss_sp;
3537 size_t ss_size = ss->ss_size;
3538 unsigned ss_flags = ss->ss_flags;
3541 if (unlikely(on_sig_stack(sp)))
3544 ss_mode = ss_flags & ~SS_FLAG_BITS;
3545 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3549 if (ss_mode == SS_DISABLE) {
3553 if (unlikely(ss_size < min_ss_size))
3557 t->sas_ss_sp = (unsigned long) ss_sp;
3558 t->sas_ss_size = ss_size;
3559 t->sas_ss_flags = ss_flags;
3564 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3568 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3570 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3571 current_user_stack_pointer(),
3573 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3578 int restore_altstack(const stack_t __user *uss)
3581 if (copy_from_user(&new, uss, sizeof(stack_t)))
3583 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3585 /* squash all but EFAULT for now */
3589 int __save_altstack(stack_t __user *uss, unsigned long sp)
3591 struct task_struct *t = current;
3592 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3593 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3594 __put_user(t->sas_ss_size, &uss->ss_size);
3597 if (t->sas_ss_flags & SS_AUTODISARM)
3602 #ifdef CONFIG_COMPAT
3603 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
3604 compat_stack_t __user *uoss_ptr)
3610 compat_stack_t uss32;
3611 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3613 uss.ss_sp = compat_ptr(uss32.ss_sp);
3614 uss.ss_flags = uss32.ss_flags;
3615 uss.ss_size = uss32.ss_size;
3617 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3618 compat_user_stack_pointer(),
3619 COMPAT_MINSIGSTKSZ);
3620 if (ret >= 0 && uoss_ptr) {
3622 memset(&old, 0, sizeof(old));
3623 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3624 old.ss_flags = uoss.ss_flags;
3625 old.ss_size = uoss.ss_size;
3626 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3632 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3633 const compat_stack_t __user *, uss_ptr,
3634 compat_stack_t __user *, uoss_ptr)
3636 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
3639 int compat_restore_altstack(const compat_stack_t __user *uss)
3641 int err = do_compat_sigaltstack(uss, NULL);
3642 /* squash all but -EFAULT for now */
3643 return err == -EFAULT ? err : 0;
3646 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3649 struct task_struct *t = current;
3650 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3652 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3653 __put_user(t->sas_ss_size, &uss->ss_size);
3656 if (t->sas_ss_flags & SS_AUTODISARM)
3662 #ifdef __ARCH_WANT_SYS_SIGPENDING
3665 * sys_sigpending - examine pending signals
3666 * @uset: where mask of pending signal is returned
3668 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3672 if (sizeof(old_sigset_t) > sizeof(*uset))
3675 do_sigpending(&set);
3677 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3683 #ifdef CONFIG_COMPAT
3684 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3688 do_sigpending(&set);
3690 return put_user(set.sig[0], set32);
3696 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3698 * sys_sigprocmask - examine and change blocked signals
3699 * @how: whether to add, remove, or set signals
3700 * @nset: signals to add or remove (if non-null)
3701 * @oset: previous value of signal mask if non-null
3703 * Some platforms have their own version with special arguments;
3704 * others support only sys_rt_sigprocmask.
3707 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3708 old_sigset_t __user *, oset)
3710 old_sigset_t old_set, new_set;
3711 sigset_t new_blocked;
3713 old_set = current->blocked.sig[0];
3716 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3719 new_blocked = current->blocked;
3723 sigaddsetmask(&new_blocked, new_set);
3726 sigdelsetmask(&new_blocked, new_set);
3729 new_blocked.sig[0] = new_set;
3735 set_current_blocked(&new_blocked);
3739 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3745 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3747 #ifndef CONFIG_ODD_RT_SIGACTION
3749 * sys_rt_sigaction - alter an action taken by a process
3750 * @sig: signal to be sent
3751 * @act: new sigaction
3752 * @oact: used to save the previous sigaction
3753 * @sigsetsize: size of sigset_t type
3755 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3756 const struct sigaction __user *, act,
3757 struct sigaction __user *, oact,
3760 struct k_sigaction new_sa, old_sa;
3763 /* XXX: Don't preclude handling different sized sigset_t's. */
3764 if (sigsetsize != sizeof(sigset_t))
3767 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3770 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3774 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3779 #ifdef CONFIG_COMPAT
3780 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3781 const struct compat_sigaction __user *, act,
3782 struct compat_sigaction __user *, oact,
3783 compat_size_t, sigsetsize)
3785 struct k_sigaction new_ka, old_ka;
3786 #ifdef __ARCH_HAS_SA_RESTORER
3787 compat_uptr_t restorer;
3791 /* XXX: Don't preclude handling different sized sigset_t's. */
3792 if (sigsetsize != sizeof(compat_sigset_t))
3796 compat_uptr_t handler;
3797 ret = get_user(handler, &act->sa_handler);
3798 new_ka.sa.sa_handler = compat_ptr(handler);
3799 #ifdef __ARCH_HAS_SA_RESTORER
3800 ret |= get_user(restorer, &act->sa_restorer);
3801 new_ka.sa.sa_restorer = compat_ptr(restorer);
3803 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3804 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3809 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3811 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3813 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3814 sizeof(oact->sa_mask));
3815 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3816 #ifdef __ARCH_HAS_SA_RESTORER
3817 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3818 &oact->sa_restorer);
3824 #endif /* !CONFIG_ODD_RT_SIGACTION */
3826 #ifdef CONFIG_OLD_SIGACTION
3827 SYSCALL_DEFINE3(sigaction, int, sig,
3828 const struct old_sigaction __user *, act,
3829 struct old_sigaction __user *, oact)
3831 struct k_sigaction new_ka, old_ka;
3836 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3837 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3838 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3839 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3840 __get_user(mask, &act->sa_mask))
3842 #ifdef __ARCH_HAS_KA_RESTORER
3843 new_ka.ka_restorer = NULL;
3845 siginitset(&new_ka.sa.sa_mask, mask);
3848 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3851 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3852 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3853 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3854 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3855 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3862 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3863 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3864 const struct compat_old_sigaction __user *, act,
3865 struct compat_old_sigaction __user *, oact)
3867 struct k_sigaction new_ka, old_ka;
3869 compat_old_sigset_t mask;
3870 compat_uptr_t handler, restorer;
3873 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3874 __get_user(handler, &act->sa_handler) ||
3875 __get_user(restorer, &act->sa_restorer) ||
3876 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3877 __get_user(mask, &act->sa_mask))
3880 #ifdef __ARCH_HAS_KA_RESTORER
3881 new_ka.ka_restorer = NULL;
3883 new_ka.sa.sa_handler = compat_ptr(handler);
3884 new_ka.sa.sa_restorer = compat_ptr(restorer);
3885 siginitset(&new_ka.sa.sa_mask, mask);
3888 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3891 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3892 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3893 &oact->sa_handler) ||
3894 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3895 &oact->sa_restorer) ||
3896 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3897 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3904 #ifdef CONFIG_SGETMASK_SYSCALL
3907 * For backwards compatibility. Functionality superseded by sigprocmask.
3909 SYSCALL_DEFINE0(sgetmask)
3912 return current->blocked.sig[0];
3915 SYSCALL_DEFINE1(ssetmask, int, newmask)
3917 int old = current->blocked.sig[0];
3920 siginitset(&newset, newmask);
3921 set_current_blocked(&newset);
3925 #endif /* CONFIG_SGETMASK_SYSCALL */
3927 #ifdef __ARCH_WANT_SYS_SIGNAL
3929 * For backwards compatibility. Functionality superseded by sigaction.
3931 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3933 struct k_sigaction new_sa, old_sa;
3936 new_sa.sa.sa_handler = handler;
3937 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3938 sigemptyset(&new_sa.sa.sa_mask);
3940 ret = do_sigaction(sig, &new_sa, &old_sa);
3942 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3944 #endif /* __ARCH_WANT_SYS_SIGNAL */
3946 #ifdef __ARCH_WANT_SYS_PAUSE
3948 SYSCALL_DEFINE0(pause)
3950 while (!signal_pending(current)) {
3951 __set_current_state(TASK_INTERRUPTIBLE);
3954 return -ERESTARTNOHAND;
3959 static int sigsuspend(sigset_t *set)
3961 current->saved_sigmask = current->blocked;
3962 set_current_blocked(set);
3964 while (!signal_pending(current)) {
3965 __set_current_state(TASK_INTERRUPTIBLE);
3968 set_restore_sigmask();
3969 return -ERESTARTNOHAND;
3973 * sys_rt_sigsuspend - replace the signal mask for a value with the
3974 * @unewset value until a signal is received
3975 * @unewset: new signal mask value
3976 * @sigsetsize: size of sigset_t type
3978 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3982 /* XXX: Don't preclude handling different sized sigset_t's. */
3983 if (sigsetsize != sizeof(sigset_t))
3986 if (copy_from_user(&newset, unewset, sizeof(newset)))
3988 return sigsuspend(&newset);
3991 #ifdef CONFIG_COMPAT
3992 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3996 /* XXX: Don't preclude handling different sized sigset_t's. */
3997 if (sigsetsize != sizeof(sigset_t))
4000 if (get_compat_sigset(&newset, unewset))
4002 return sigsuspend(&newset);
4006 #ifdef CONFIG_OLD_SIGSUSPEND
4007 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4010 siginitset(&blocked, mask);
4011 return sigsuspend(&blocked);
4014 #ifdef CONFIG_OLD_SIGSUSPEND3
4015 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4018 siginitset(&blocked, mask);
4019 return sigsuspend(&blocked);
4023 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4028 void __init signals_init(void)
4030 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
4031 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
4032 != offsetof(struct siginfo, _sifields._pad));
4033 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4035 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4038 #ifdef CONFIG_KGDB_KDB
4039 #include <linux/kdb.h>
4041 * kdb_send_sig - Allows kdb to send signals without exposing
4042 * signal internals. This function checks if the required locks are
4043 * available before calling the main signal code, to avoid kdb
4046 void kdb_send_sig(struct task_struct *t, int sig)
4048 static struct task_struct *kdb_prev_t;
4050 if (!spin_trylock(&t->sighand->siglock)) {
4051 kdb_printf("Can't do kill command now.\n"
4052 "The sigmask lock is held somewhere else in "
4053 "kernel, try again later\n");
4056 new_t = kdb_prev_t != t;
4058 if (t->state != TASK_RUNNING && new_t) {
4059 spin_unlock(&t->sighand->siglock);
4060 kdb_printf("Process is not RUNNING, sending a signal from "
4061 "kdb risks deadlock\n"
4062 "on the run queue locks. "
4063 "The signal has _not_ been sent.\n"
4064 "Reissue the kill command if you want to risk "
4068 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4069 spin_unlock(&t->sighand->siglock);
4071 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4074 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4076 #endif /* CONFIG_KGDB_KDB */