2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/signal.h>
47 #include <asm/param.h>
48 #include <linux/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/siginfo.h>
51 #include <asm/cacheflush.h>
52 #include "audit.h" /* audit_signal_info() */
55 * SLAB caches for signal bits.
58 static struct kmem_cache *sigqueue_cachep;
60 int print_fatal_signals __read_mostly;
62 static void __user *sig_handler(struct task_struct *t, int sig)
64 return t->sighand->action[sig - 1].sa.sa_handler;
67 static int sig_handler_ignored(void __user *handler, int sig)
69 /* Is it explicitly or implicitly ignored? */
70 return handler == SIG_IGN ||
71 (handler == SIG_DFL && sig_kernel_ignore(sig));
74 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78 handler = sig_handler(t, sig);
80 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
81 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
84 return sig_handler_ignored(handler, sig);
87 static int sig_ignored(struct task_struct *t, int sig, bool force)
90 * Blocked signals are never ignored, since the
91 * signal handler may change by the time it is
94 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
98 * Tracers may want to know about even ignored signal unless it
99 * is SIGKILL which can't be reported anyway but can be ignored
100 * by SIGNAL_UNKILLABLE task.
102 if (t->ptrace && sig != SIGKILL)
105 return sig_task_ignored(t, sig, force);
109 * Re-calculate pending state from the set of locally pending
110 * signals, globally pending signals, and blocked signals.
112 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
117 switch (_NSIG_WORDS) {
119 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
120 ready |= signal->sig[i] &~ blocked->sig[i];
123 case 4: ready = signal->sig[3] &~ blocked->sig[3];
124 ready |= signal->sig[2] &~ blocked->sig[2];
125 ready |= signal->sig[1] &~ blocked->sig[1];
126 ready |= signal->sig[0] &~ blocked->sig[0];
129 case 2: ready = signal->sig[1] &~ blocked->sig[1];
130 ready |= signal->sig[0] &~ blocked->sig[0];
133 case 1: ready = signal->sig[0] &~ blocked->sig[0];
138 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
140 static int recalc_sigpending_tsk(struct task_struct *t)
142 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
143 PENDING(&t->pending, &t->blocked) ||
144 PENDING(&t->signal->shared_pending, &t->blocked)) {
145 set_tsk_thread_flag(t, TIF_SIGPENDING);
149 * We must never clear the flag in another thread, or in current
150 * when it's possible the current syscall is returning -ERESTART*.
151 * So we don't clear it here, and only callers who know they should do.
157 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
158 * This is superfluous when called on current, the wakeup is a harmless no-op.
160 void recalc_sigpending_and_wake(struct task_struct *t)
162 if (recalc_sigpending_tsk(t))
163 signal_wake_up(t, 0);
166 void recalc_sigpending(void)
168 if (!recalc_sigpending_tsk(current) && !freezing(current))
169 clear_thread_flag(TIF_SIGPENDING);
173 /* Given the mask, find the first available signal that should be serviced. */
175 #define SYNCHRONOUS_MASK \
176 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
177 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
179 int next_signal(struct sigpending *pending, sigset_t *mask)
181 unsigned long i, *s, *m, x;
184 s = pending->signal.sig;
188 * Handle the first word specially: it contains the
189 * synchronous signals that need to be dequeued first.
193 if (x & SYNCHRONOUS_MASK)
194 x &= SYNCHRONOUS_MASK;
199 switch (_NSIG_WORDS) {
201 for (i = 1; i < _NSIG_WORDS; ++i) {
205 sig = ffz(~x) + i*_NSIG_BPW + 1;
214 sig = ffz(~x) + _NSIG_BPW + 1;
225 static inline void print_dropped_signal(int sig)
227 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
229 if (!print_fatal_signals)
232 if (!__ratelimit(&ratelimit_state))
235 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
236 current->comm, current->pid, sig);
240 * task_set_jobctl_pending - set jobctl pending bits
242 * @mask: pending bits to set
244 * Clear @mask from @task->jobctl. @mask must be subset of
245 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
246 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
247 * cleared. If @task is already being killed or exiting, this function
251 * Must be called with @task->sighand->siglock held.
254 * %true if @mask is set, %false if made noop because @task was dying.
256 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
258 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
259 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
260 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
262 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
265 if (mask & JOBCTL_STOP_SIGMASK)
266 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
268 task->jobctl |= mask;
273 * task_clear_jobctl_trapping - clear jobctl trapping bit
276 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
277 * Clear it and wake up the ptracer. Note that we don't need any further
278 * locking. @task->siglock guarantees that @task->parent points to the
282 * Must be called with @task->sighand->siglock held.
284 void task_clear_jobctl_trapping(struct task_struct *task)
286 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
287 task->jobctl &= ~JOBCTL_TRAPPING;
288 smp_mb(); /* advised by wake_up_bit() */
289 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
294 * task_clear_jobctl_pending - clear jobctl pending bits
296 * @mask: pending bits to clear
298 * Clear @mask from @task->jobctl. @mask must be subset of
299 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
300 * STOP bits are cleared together.
302 * If clearing of @mask leaves no stop or trap pending, this function calls
303 * task_clear_jobctl_trapping().
306 * Must be called with @task->sighand->siglock held.
308 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
310 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
312 if (mask & JOBCTL_STOP_PENDING)
313 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
315 task->jobctl &= ~mask;
317 if (!(task->jobctl & JOBCTL_PENDING_MASK))
318 task_clear_jobctl_trapping(task);
322 * task_participate_group_stop - participate in a group stop
323 * @task: task participating in a group stop
325 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
326 * Group stop states are cleared and the group stop count is consumed if
327 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
328 * stop, the appropriate %SIGNAL_* flags are set.
331 * Must be called with @task->sighand->siglock held.
334 * %true if group stop completion should be notified to the parent, %false
337 static bool task_participate_group_stop(struct task_struct *task)
339 struct signal_struct *sig = task->signal;
340 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
342 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
344 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
349 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
350 sig->group_stop_count--;
353 * Tell the caller to notify completion iff we are entering into a
354 * fresh group stop. Read comment in do_signal_stop() for details.
356 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
357 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
364 * allocate a new signal queue record
365 * - this may be called without locks if and only if t == current, otherwise an
366 * appropriate lock must be held to stop the target task from exiting
368 static struct sigqueue *
369 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
371 struct sigqueue *q = NULL;
372 struct user_struct *user;
375 * Protect access to @t credentials. This can go away when all
376 * callers hold rcu read lock.
379 user = get_uid(__task_cred(t)->user);
380 atomic_inc(&user->sigpending);
383 if (override_rlimit ||
384 atomic_read(&user->sigpending) <=
385 task_rlimit(t, RLIMIT_SIGPENDING)) {
386 q = kmem_cache_alloc(sigqueue_cachep, flags);
388 print_dropped_signal(sig);
391 if (unlikely(q == NULL)) {
392 atomic_dec(&user->sigpending);
395 INIT_LIST_HEAD(&q->list);
403 static void __sigqueue_free(struct sigqueue *q)
405 if (q->flags & SIGQUEUE_PREALLOC)
407 atomic_dec(&q->user->sigpending);
409 kmem_cache_free(sigqueue_cachep, q);
412 void flush_sigqueue(struct sigpending *queue)
416 sigemptyset(&queue->signal);
417 while (!list_empty(&queue->list)) {
418 q = list_entry(queue->list.next, struct sigqueue , list);
419 list_del_init(&q->list);
425 * Flush all pending signals for this kthread.
427 void flush_signals(struct task_struct *t)
431 spin_lock_irqsave(&t->sighand->siglock, flags);
432 clear_tsk_thread_flag(t, TIF_SIGPENDING);
433 flush_sigqueue(&t->pending);
434 flush_sigqueue(&t->signal->shared_pending);
435 spin_unlock_irqrestore(&t->sighand->siglock, flags);
438 #ifdef CONFIG_POSIX_TIMERS
439 static void __flush_itimer_signals(struct sigpending *pending)
441 sigset_t signal, retain;
442 struct sigqueue *q, *n;
444 signal = pending->signal;
445 sigemptyset(&retain);
447 list_for_each_entry_safe(q, n, &pending->list, list) {
448 int sig = q->info.si_signo;
450 if (likely(q->info.si_code != SI_TIMER)) {
451 sigaddset(&retain, sig);
453 sigdelset(&signal, sig);
454 list_del_init(&q->list);
459 sigorsets(&pending->signal, &signal, &retain);
462 void flush_itimer_signals(void)
464 struct task_struct *tsk = current;
467 spin_lock_irqsave(&tsk->sighand->siglock, flags);
468 __flush_itimer_signals(&tsk->pending);
469 __flush_itimer_signals(&tsk->signal->shared_pending);
470 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
474 void ignore_signals(struct task_struct *t)
478 for (i = 0; i < _NSIG; ++i)
479 t->sighand->action[i].sa.sa_handler = SIG_IGN;
485 * Flush all handlers for a task.
489 flush_signal_handlers(struct task_struct *t, int force_default)
492 struct k_sigaction *ka = &t->sighand->action[0];
493 for (i = _NSIG ; i != 0 ; i--) {
494 if (force_default || ka->sa.sa_handler != SIG_IGN)
495 ka->sa.sa_handler = SIG_DFL;
497 #ifdef __ARCH_HAS_SA_RESTORER
498 ka->sa.sa_restorer = NULL;
500 sigemptyset(&ka->sa.sa_mask);
505 int unhandled_signal(struct task_struct *tsk, int sig)
507 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
508 if (is_global_init(tsk))
510 if (handler != SIG_IGN && handler != SIG_DFL)
512 /* if ptraced, let the tracer determine */
516 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
519 struct sigqueue *q, *first = NULL;
522 * Collect the siginfo appropriate to this signal. Check if
523 * there is another siginfo for the same signal.
525 list_for_each_entry(q, &list->list, list) {
526 if (q->info.si_signo == sig) {
533 sigdelset(&list->signal, sig);
537 list_del_init(&first->list);
538 copy_siginfo(info, &first->info);
541 (first->flags & SIGQUEUE_PREALLOC) &&
542 (info->si_code == SI_TIMER) &&
543 (info->si_sys_private);
545 __sigqueue_free(first);
548 * Ok, it wasn't in the queue. This must be
549 * a fast-pathed signal or we must have been
550 * out of queue space. So zero out the info.
552 info->si_signo = sig;
554 info->si_code = SI_USER;
560 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
561 siginfo_t *info, bool *resched_timer)
563 int sig = next_signal(pending, mask);
566 collect_signal(sig, pending, info, resched_timer);
571 * Dequeue a signal and return the element to the caller, which is
572 * expected to free it.
574 * All callers have to hold the siglock.
576 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
578 bool resched_timer = false;
581 /* We only dequeue private signals from ourselves, we don't let
582 * signalfd steal them
584 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
586 signr = __dequeue_signal(&tsk->signal->shared_pending,
587 mask, info, &resched_timer);
588 #ifdef CONFIG_POSIX_TIMERS
592 * itimers are process shared and we restart periodic
593 * itimers in the signal delivery path to prevent DoS
594 * attacks in the high resolution timer case. This is
595 * compliant with the old way of self-restarting
596 * itimers, as the SIGALRM is a legacy signal and only
597 * queued once. Changing the restart behaviour to
598 * restart the timer in the signal dequeue path is
599 * reducing the timer noise on heavy loaded !highres
602 if (unlikely(signr == SIGALRM)) {
603 struct hrtimer *tmr = &tsk->signal->real_timer;
605 if (!hrtimer_is_queued(tmr) &&
606 tsk->signal->it_real_incr != 0) {
607 hrtimer_forward(tmr, tmr->base->get_time(),
608 tsk->signal->it_real_incr);
609 hrtimer_restart(tmr);
619 if (unlikely(sig_kernel_stop(signr))) {
621 * Set a marker that we have dequeued a stop signal. Our
622 * caller might release the siglock and then the pending
623 * stop signal it is about to process is no longer in the
624 * pending bitmasks, but must still be cleared by a SIGCONT
625 * (and overruled by a SIGKILL). So those cases clear this
626 * shared flag after we've set it. Note that this flag may
627 * remain set after the signal we return is ignored or
628 * handled. That doesn't matter because its only purpose
629 * is to alert stop-signal processing code when another
630 * processor has come along and cleared the flag.
632 current->jobctl |= JOBCTL_STOP_DEQUEUED;
634 #ifdef CONFIG_POSIX_TIMERS
637 * Release the siglock to ensure proper locking order
638 * of timer locks outside of siglocks. Note, we leave
639 * irqs disabled here, since the posix-timers code is
640 * about to disable them again anyway.
642 spin_unlock(&tsk->sighand->siglock);
643 posixtimer_rearm(info);
644 spin_lock(&tsk->sighand->siglock);
651 * Tell a process that it has a new active signal..
653 * NOTE! we rely on the previous spin_lock to
654 * lock interrupts for us! We can only be called with
655 * "siglock" held, and the local interrupt must
656 * have been disabled when that got acquired!
658 * No need to set need_resched since signal event passing
659 * goes through ->blocked
661 void signal_wake_up_state(struct task_struct *t, unsigned int state)
663 set_tsk_thread_flag(t, TIF_SIGPENDING);
665 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
666 * case. We don't check t->state here because there is a race with it
667 * executing another processor and just now entering stopped state.
668 * By using wake_up_state, we ensure the process will wake up and
669 * handle its death signal.
671 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
676 * Remove signals in mask from the pending set and queue.
677 * Returns 1 if any signals were found.
679 * All callers must be holding the siglock.
681 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
683 struct sigqueue *q, *n;
686 sigandsets(&m, mask, &s->signal);
687 if (sigisemptyset(&m))
690 sigandnsets(&s->signal, &s->signal, mask);
691 list_for_each_entry_safe(q, n, &s->list, list) {
692 if (sigismember(mask, q->info.si_signo)) {
693 list_del_init(&q->list);
700 static inline int is_si_special(const struct siginfo *info)
702 return info <= SEND_SIG_FORCED;
705 static inline bool si_fromuser(const struct siginfo *info)
707 return info == SEND_SIG_NOINFO ||
708 (!is_si_special(info) && SI_FROMUSER(info));
712 * called with RCU read lock from check_kill_permission()
714 static int kill_ok_by_cred(struct task_struct *t)
716 const struct cred *cred = current_cred();
717 const struct cred *tcred = __task_cred(t);
719 if (uid_eq(cred->euid, tcred->suid) ||
720 uid_eq(cred->euid, tcred->uid) ||
721 uid_eq(cred->uid, tcred->suid) ||
722 uid_eq(cred->uid, tcred->uid))
725 if (ns_capable(tcred->user_ns, CAP_KILL))
732 * Bad permissions for sending the signal
733 * - the caller must hold the RCU read lock
735 static int check_kill_permission(int sig, struct siginfo *info,
736 struct task_struct *t)
741 if (!valid_signal(sig))
744 if (!si_fromuser(info))
747 error = audit_signal_info(sig, t); /* Let audit system see the signal */
751 if (!same_thread_group(current, t) &&
752 !kill_ok_by_cred(t)) {
755 sid = task_session(t);
757 * We don't return the error if sid == NULL. The
758 * task was unhashed, the caller must notice this.
760 if (!sid || sid == task_session(current))
767 return security_task_kill(t, info, sig, 0);
771 * ptrace_trap_notify - schedule trap to notify ptracer
772 * @t: tracee wanting to notify tracer
774 * This function schedules sticky ptrace trap which is cleared on the next
775 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
778 * If @t is running, STOP trap will be taken. If trapped for STOP and
779 * ptracer is listening for events, tracee is woken up so that it can
780 * re-trap for the new event. If trapped otherwise, STOP trap will be
781 * eventually taken without returning to userland after the existing traps
782 * are finished by PTRACE_CONT.
785 * Must be called with @task->sighand->siglock held.
787 static void ptrace_trap_notify(struct task_struct *t)
789 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
790 assert_spin_locked(&t->sighand->siglock);
792 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
793 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
797 * Handle magic process-wide effects of stop/continue signals. Unlike
798 * the signal actions, these happen immediately at signal-generation
799 * time regardless of blocking, ignoring, or handling. This does the
800 * actual continuing for SIGCONT, but not the actual stopping for stop
801 * signals. The process stop is done as a signal action for SIG_DFL.
803 * Returns true if the signal should be actually delivered, otherwise
804 * it should be dropped.
806 static bool prepare_signal(int sig, struct task_struct *p, bool force)
808 struct signal_struct *signal = p->signal;
809 struct task_struct *t;
812 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
813 if (!(signal->flags & SIGNAL_GROUP_EXIT))
814 return sig == SIGKILL;
816 * The process is in the middle of dying, nothing to do.
818 } else if (sig_kernel_stop(sig)) {
820 * This is a stop signal. Remove SIGCONT from all queues.
822 siginitset(&flush, sigmask(SIGCONT));
823 flush_sigqueue_mask(&flush, &signal->shared_pending);
824 for_each_thread(p, t)
825 flush_sigqueue_mask(&flush, &t->pending);
826 } else if (sig == SIGCONT) {
829 * Remove all stop signals from all queues, wake all threads.
831 siginitset(&flush, SIG_KERNEL_STOP_MASK);
832 flush_sigqueue_mask(&flush, &signal->shared_pending);
833 for_each_thread(p, t) {
834 flush_sigqueue_mask(&flush, &t->pending);
835 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
836 if (likely(!(t->ptrace & PT_SEIZED)))
837 wake_up_state(t, __TASK_STOPPED);
839 ptrace_trap_notify(t);
843 * Notify the parent with CLD_CONTINUED if we were stopped.
845 * If we were in the middle of a group stop, we pretend it
846 * was already finished, and then continued. Since SIGCHLD
847 * doesn't queue we report only CLD_STOPPED, as if the next
848 * CLD_CONTINUED was dropped.
851 if (signal->flags & SIGNAL_STOP_STOPPED)
852 why |= SIGNAL_CLD_CONTINUED;
853 else if (signal->group_stop_count)
854 why |= SIGNAL_CLD_STOPPED;
858 * The first thread which returns from do_signal_stop()
859 * will take ->siglock, notice SIGNAL_CLD_MASK, and
860 * notify its parent. See get_signal_to_deliver().
862 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
863 signal->group_stop_count = 0;
864 signal->group_exit_code = 0;
868 return !sig_ignored(p, sig, force);
872 * Test if P wants to take SIG. After we've checked all threads with this,
873 * it's equivalent to finding no threads not blocking SIG. Any threads not
874 * blocking SIG were ruled out because they are not running and already
875 * have pending signals. Such threads will dequeue from the shared queue
876 * as soon as they're available, so putting the signal on the shared queue
877 * will be equivalent to sending it to one such thread.
879 static inline int wants_signal(int sig, struct task_struct *p)
881 if (sigismember(&p->blocked, sig))
883 if (p->flags & PF_EXITING)
887 if (task_is_stopped_or_traced(p))
889 return task_curr(p) || !signal_pending(p);
892 static void complete_signal(int sig, struct task_struct *p, int group)
894 struct signal_struct *signal = p->signal;
895 struct task_struct *t;
898 * Now find a thread we can wake up to take the signal off the queue.
900 * If the main thread wants the signal, it gets first crack.
901 * Probably the least surprising to the average bear.
903 if (wants_signal(sig, p))
905 else if (!group || thread_group_empty(p))
907 * There is just one thread and it does not need to be woken.
908 * It will dequeue unblocked signals before it runs again.
913 * Otherwise try to find a suitable thread.
915 t = signal->curr_target;
916 while (!wants_signal(sig, t)) {
918 if (t == signal->curr_target)
920 * No thread needs to be woken.
921 * Any eligible threads will see
922 * the signal in the queue soon.
926 signal->curr_target = t;
930 * Found a killable thread. If the signal will be fatal,
931 * then start taking the whole group down immediately.
933 if (sig_fatal(p, sig) &&
934 !(signal->flags & SIGNAL_GROUP_EXIT) &&
935 !sigismember(&t->real_blocked, sig) &&
936 (sig == SIGKILL || !p->ptrace)) {
938 * This signal will be fatal to the whole group.
940 if (!sig_kernel_coredump(sig)) {
942 * Start a group exit and wake everybody up.
943 * This way we don't have other threads
944 * running and doing things after a slower
945 * thread has the fatal signal pending.
947 signal->flags = SIGNAL_GROUP_EXIT;
948 signal->group_exit_code = sig;
949 signal->group_stop_count = 0;
952 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
953 sigaddset(&t->pending.signal, SIGKILL);
954 signal_wake_up(t, 1);
955 } while_each_thread(p, t);
961 * The signal is already in the shared-pending queue.
962 * Tell the chosen thread to wake up and dequeue it.
964 signal_wake_up(t, sig == SIGKILL);
968 static inline int legacy_queue(struct sigpending *signals, int sig)
970 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
973 #ifdef CONFIG_USER_NS
974 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
976 if (current_user_ns() == task_cred_xxx(t, user_ns))
979 if (SI_FROMKERNEL(info))
983 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
984 make_kuid(current_user_ns(), info->si_uid));
988 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
994 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
995 int group, int from_ancestor_ns)
997 struct sigpending *pending;
1000 int ret = 0, result;
1002 assert_spin_locked(&t->sighand->siglock);
1004 result = TRACE_SIGNAL_IGNORED;
1005 if (!prepare_signal(sig, t,
1006 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1009 pending = group ? &t->signal->shared_pending : &t->pending;
1011 * Short-circuit ignored signals and support queuing
1012 * exactly one non-rt signal, so that we can get more
1013 * detailed information about the cause of the signal.
1015 result = TRACE_SIGNAL_ALREADY_PENDING;
1016 if (legacy_queue(pending, sig))
1019 result = TRACE_SIGNAL_DELIVERED;
1021 * fast-pathed signals for kernel-internal things like SIGSTOP
1024 if (info == SEND_SIG_FORCED)
1028 * Real-time signals must be queued if sent by sigqueue, or
1029 * some other real-time mechanism. It is implementation
1030 * defined whether kill() does so. We attempt to do so, on
1031 * the principle of least surprise, but since kill is not
1032 * allowed to fail with EAGAIN when low on memory we just
1033 * make sure at least one signal gets delivered and don't
1034 * pass on the info struct.
1037 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1039 override_rlimit = 0;
1041 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1044 list_add_tail(&q->list, &pending->list);
1045 switch ((unsigned long) info) {
1046 case (unsigned long) SEND_SIG_NOINFO:
1047 q->info.si_signo = sig;
1048 q->info.si_errno = 0;
1049 q->info.si_code = SI_USER;
1050 q->info.si_pid = task_tgid_nr_ns(current,
1051 task_active_pid_ns(t));
1052 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1054 case (unsigned long) SEND_SIG_PRIV:
1055 q->info.si_signo = sig;
1056 q->info.si_errno = 0;
1057 q->info.si_code = SI_KERNEL;
1062 copy_siginfo(&q->info, info);
1063 if (from_ancestor_ns)
1068 userns_fixup_signal_uid(&q->info, t);
1070 } else if (!is_si_special(info)) {
1071 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1073 * Queue overflow, abort. We may abort if the
1074 * signal was rt and sent by user using something
1075 * other than kill().
1077 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1082 * This is a silent loss of information. We still
1083 * send the signal, but the *info bits are lost.
1085 result = TRACE_SIGNAL_LOSE_INFO;
1090 signalfd_notify(t, sig);
1091 sigaddset(&pending->signal, sig);
1092 complete_signal(sig, t, group);
1094 trace_signal_generate(sig, info, t, group, result);
1098 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1101 int from_ancestor_ns = 0;
1103 #ifdef CONFIG_PID_NS
1104 from_ancestor_ns = si_fromuser(info) &&
1105 !task_pid_nr_ns(current, task_active_pid_ns(t));
1108 return __send_signal(sig, info, t, group, from_ancestor_ns);
1111 static void print_fatal_signal(int signr)
1113 struct pt_regs *regs = signal_pt_regs();
1114 pr_info("potentially unexpected fatal signal %d.\n", signr);
1116 #if defined(__i386__) && !defined(__arch_um__)
1117 pr_info("code at %08lx: ", regs->ip);
1120 for (i = 0; i < 16; i++) {
1123 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1125 pr_cont("%02x ", insn);
1135 static int __init setup_print_fatal_signals(char *str)
1137 get_option (&str, &print_fatal_signals);
1142 __setup("print-fatal-signals=", setup_print_fatal_signals);
1145 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1147 return send_signal(sig, info, p, 1);
1151 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1153 return send_signal(sig, info, t, 0);
1156 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1159 unsigned long flags;
1162 if (lock_task_sighand(p, &flags)) {
1163 ret = send_signal(sig, info, p, group);
1164 unlock_task_sighand(p, &flags);
1171 * Force a signal that the process can't ignore: if necessary
1172 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1174 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1175 * since we do not want to have a signal handler that was blocked
1176 * be invoked when user space had explicitly blocked it.
1178 * We don't want to have recursive SIGSEGV's etc, for example,
1179 * that is why we also clear SIGNAL_UNKILLABLE.
1182 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1184 unsigned long int flags;
1185 int ret, blocked, ignored;
1186 struct k_sigaction *action;
1188 spin_lock_irqsave(&t->sighand->siglock, flags);
1189 action = &t->sighand->action[sig-1];
1190 ignored = action->sa.sa_handler == SIG_IGN;
1191 blocked = sigismember(&t->blocked, sig);
1192 if (blocked || ignored) {
1193 action->sa.sa_handler = SIG_DFL;
1195 sigdelset(&t->blocked, sig);
1196 recalc_sigpending_and_wake(t);
1200 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1201 * debugging to leave init killable.
1203 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1204 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1205 ret = specific_send_sig_info(sig, info, t);
1206 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1212 * Nuke all other threads in the group.
1214 int zap_other_threads(struct task_struct *p)
1216 struct task_struct *t = p;
1219 p->signal->group_stop_count = 0;
1221 while_each_thread(p, t) {
1222 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1225 /* Don't bother with already dead threads */
1228 sigaddset(&t->pending.signal, SIGKILL);
1229 signal_wake_up(t, 1);
1235 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1236 unsigned long *flags)
1238 struct sighand_struct *sighand;
1242 * Disable interrupts early to avoid deadlocks.
1243 * See rcu_read_unlock() comment header for details.
1245 local_irq_save(*flags);
1247 sighand = rcu_dereference(tsk->sighand);
1248 if (unlikely(sighand == NULL)) {
1250 local_irq_restore(*flags);
1254 * This sighand can be already freed and even reused, but
1255 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1256 * initializes ->siglock: this slab can't go away, it has
1257 * the same object type, ->siglock can't be reinitialized.
1259 * We need to ensure that tsk->sighand is still the same
1260 * after we take the lock, we can race with de_thread() or
1261 * __exit_signal(). In the latter case the next iteration
1262 * must see ->sighand == NULL.
1264 spin_lock(&sighand->siglock);
1265 if (likely(sighand == tsk->sighand)) {
1269 spin_unlock(&sighand->siglock);
1271 local_irq_restore(*flags);
1278 * send signal info to all the members of a group
1280 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1285 ret = check_kill_permission(sig, info, p);
1289 ret = do_send_sig_info(sig, info, p, true);
1295 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1296 * control characters do (^C, ^Z etc)
1297 * - the caller must hold at least a readlock on tasklist_lock
1299 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1301 struct task_struct *p = NULL;
1302 int retval, success;
1306 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1307 int err = group_send_sig_info(sig, info, p);
1310 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1311 return success ? 0 : retval;
1314 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1317 struct task_struct *p;
1321 p = pid_task(pid, PIDTYPE_PID);
1323 error = group_send_sig_info(sig, info, p);
1325 if (likely(!p || error != -ESRCH))
1329 * The task was unhashed in between, try again. If it
1330 * is dead, pid_task() will return NULL, if we race with
1331 * de_thread() it will find the new leader.
1336 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1340 error = kill_pid_info(sig, info, find_vpid(pid));
1345 static int kill_as_cred_perm(const struct cred *cred,
1346 struct task_struct *target)
1348 const struct cred *pcred = __task_cred(target);
1349 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1350 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1355 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1356 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1357 const struct cred *cred, u32 secid)
1360 struct task_struct *p;
1361 unsigned long flags;
1363 if (!valid_signal(sig))
1367 p = pid_task(pid, PIDTYPE_PID);
1372 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1376 ret = security_task_kill(p, info, sig, secid);
1381 if (lock_task_sighand(p, &flags)) {
1382 ret = __send_signal(sig, info, p, 1, 0);
1383 unlock_task_sighand(p, &flags);
1391 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1394 * kill_something_info() interprets pid in interesting ways just like kill(2).
1396 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1397 * is probably wrong. Should make it like BSD or SYSV.
1400 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1406 ret = kill_pid_info(sig, info, find_vpid(pid));
1411 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1415 read_lock(&tasklist_lock);
1417 ret = __kill_pgrp_info(sig, info,
1418 pid ? find_vpid(-pid) : task_pgrp(current));
1420 int retval = 0, count = 0;
1421 struct task_struct * p;
1423 for_each_process(p) {
1424 if (task_pid_vnr(p) > 1 &&
1425 !same_thread_group(p, current)) {
1426 int err = group_send_sig_info(sig, info, p);
1432 ret = count ? retval : -ESRCH;
1434 read_unlock(&tasklist_lock);
1440 * These are for backward compatibility with the rest of the kernel source.
1443 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1446 * Make sure legacy kernel users don't send in bad values
1447 * (normal paths check this in check_kill_permission).
1449 if (!valid_signal(sig))
1452 return do_send_sig_info(sig, info, p, false);
1455 #define __si_special(priv) \
1456 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1459 send_sig(int sig, struct task_struct *p, int priv)
1461 return send_sig_info(sig, __si_special(priv), p);
1465 force_sig(int sig, struct task_struct *p)
1467 force_sig_info(sig, SEND_SIG_PRIV, p);
1471 * When things go south during signal handling, we
1472 * will force a SIGSEGV. And if the signal that caused
1473 * the problem was already a SIGSEGV, we'll want to
1474 * make sure we don't even try to deliver the signal..
1477 force_sigsegv(int sig, struct task_struct *p)
1479 if (sig == SIGSEGV) {
1480 unsigned long flags;
1481 spin_lock_irqsave(&p->sighand->siglock, flags);
1482 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1483 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1485 force_sig(SIGSEGV, p);
1489 int kill_pgrp(struct pid *pid, int sig, int priv)
1493 read_lock(&tasklist_lock);
1494 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1495 read_unlock(&tasklist_lock);
1499 EXPORT_SYMBOL(kill_pgrp);
1501 int kill_pid(struct pid *pid, int sig, int priv)
1503 return kill_pid_info(sig, __si_special(priv), pid);
1505 EXPORT_SYMBOL(kill_pid);
1508 * These functions support sending signals using preallocated sigqueue
1509 * structures. This is needed "because realtime applications cannot
1510 * afford to lose notifications of asynchronous events, like timer
1511 * expirations or I/O completions". In the case of POSIX Timers
1512 * we allocate the sigqueue structure from the timer_create. If this
1513 * allocation fails we are able to report the failure to the application
1514 * with an EAGAIN error.
1516 struct sigqueue *sigqueue_alloc(void)
1518 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1521 q->flags |= SIGQUEUE_PREALLOC;
1526 void sigqueue_free(struct sigqueue *q)
1528 unsigned long flags;
1529 spinlock_t *lock = ¤t->sighand->siglock;
1531 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1533 * We must hold ->siglock while testing q->list
1534 * to serialize with collect_signal() or with
1535 * __exit_signal()->flush_sigqueue().
1537 spin_lock_irqsave(lock, flags);
1538 q->flags &= ~SIGQUEUE_PREALLOC;
1540 * If it is queued it will be freed when dequeued,
1541 * like the "regular" sigqueue.
1543 if (!list_empty(&q->list))
1545 spin_unlock_irqrestore(lock, flags);
1551 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1553 int sig = q->info.si_signo;
1554 struct sigpending *pending;
1555 unsigned long flags;
1558 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1561 if (!likely(lock_task_sighand(t, &flags)))
1564 ret = 1; /* the signal is ignored */
1565 result = TRACE_SIGNAL_IGNORED;
1566 if (!prepare_signal(sig, t, false))
1570 if (unlikely(!list_empty(&q->list))) {
1572 * If an SI_TIMER entry is already queue just increment
1573 * the overrun count.
1575 BUG_ON(q->info.si_code != SI_TIMER);
1576 q->info.si_overrun++;
1577 result = TRACE_SIGNAL_ALREADY_PENDING;
1580 q->info.si_overrun = 0;
1582 signalfd_notify(t, sig);
1583 pending = group ? &t->signal->shared_pending : &t->pending;
1584 list_add_tail(&q->list, &pending->list);
1585 sigaddset(&pending->signal, sig);
1586 complete_signal(sig, t, group);
1587 result = TRACE_SIGNAL_DELIVERED;
1589 trace_signal_generate(sig, &q->info, t, group, result);
1590 unlock_task_sighand(t, &flags);
1596 * Let a parent know about the death of a child.
1597 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1599 * Returns true if our parent ignored us and so we've switched to
1602 bool do_notify_parent(struct task_struct *tsk, int sig)
1604 struct siginfo info;
1605 unsigned long flags;
1606 struct sighand_struct *psig;
1607 bool autoreap = false;
1612 /* do_notify_parent_cldstop should have been called instead. */
1613 BUG_ON(task_is_stopped_or_traced(tsk));
1615 BUG_ON(!tsk->ptrace &&
1616 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1618 if (sig != SIGCHLD) {
1620 * This is only possible if parent == real_parent.
1621 * Check if it has changed security domain.
1623 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1627 info.si_signo = sig;
1630 * We are under tasklist_lock here so our parent is tied to
1631 * us and cannot change.
1633 * task_active_pid_ns will always return the same pid namespace
1634 * until a task passes through release_task.
1636 * write_lock() currently calls preempt_disable() which is the
1637 * same as rcu_read_lock(), but according to Oleg, this is not
1638 * correct to rely on this
1641 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1642 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1646 task_cputime(tsk, &utime, &stime);
1647 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1648 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1650 info.si_status = tsk->exit_code & 0x7f;
1651 if (tsk->exit_code & 0x80)
1652 info.si_code = CLD_DUMPED;
1653 else if (tsk->exit_code & 0x7f)
1654 info.si_code = CLD_KILLED;
1656 info.si_code = CLD_EXITED;
1657 info.si_status = tsk->exit_code >> 8;
1660 psig = tsk->parent->sighand;
1661 spin_lock_irqsave(&psig->siglock, flags);
1662 if (!tsk->ptrace && sig == SIGCHLD &&
1663 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1664 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1666 * We are exiting and our parent doesn't care. POSIX.1
1667 * defines special semantics for setting SIGCHLD to SIG_IGN
1668 * or setting the SA_NOCLDWAIT flag: we should be reaped
1669 * automatically and not left for our parent's wait4 call.
1670 * Rather than having the parent do it as a magic kind of
1671 * signal handler, we just set this to tell do_exit that we
1672 * can be cleaned up without becoming a zombie. Note that
1673 * we still call __wake_up_parent in this case, because a
1674 * blocked sys_wait4 might now return -ECHILD.
1676 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1677 * is implementation-defined: we do (if you don't want
1678 * it, just use SIG_IGN instead).
1681 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1684 if (valid_signal(sig) && sig)
1685 __group_send_sig_info(sig, &info, tsk->parent);
1686 __wake_up_parent(tsk, tsk->parent);
1687 spin_unlock_irqrestore(&psig->siglock, flags);
1693 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1694 * @tsk: task reporting the state change
1695 * @for_ptracer: the notification is for ptracer
1696 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1698 * Notify @tsk's parent that the stopped/continued state has changed. If
1699 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1700 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1703 * Must be called with tasklist_lock at least read locked.
1705 static void do_notify_parent_cldstop(struct task_struct *tsk,
1706 bool for_ptracer, int why)
1708 struct siginfo info;
1709 unsigned long flags;
1710 struct task_struct *parent;
1711 struct sighand_struct *sighand;
1715 parent = tsk->parent;
1717 tsk = tsk->group_leader;
1718 parent = tsk->real_parent;
1721 info.si_signo = SIGCHLD;
1724 * see comment in do_notify_parent() about the following 4 lines
1727 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1728 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1731 task_cputime(tsk, &utime, &stime);
1732 info.si_utime = nsec_to_clock_t(utime);
1733 info.si_stime = nsec_to_clock_t(stime);
1738 info.si_status = SIGCONT;
1741 info.si_status = tsk->signal->group_exit_code & 0x7f;
1744 info.si_status = tsk->exit_code & 0x7f;
1750 sighand = parent->sighand;
1751 spin_lock_irqsave(&sighand->siglock, flags);
1752 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1753 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1754 __group_send_sig_info(SIGCHLD, &info, parent);
1756 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1758 __wake_up_parent(tsk, parent);
1759 spin_unlock_irqrestore(&sighand->siglock, flags);
1762 static inline int may_ptrace_stop(void)
1764 if (!likely(current->ptrace))
1767 * Are we in the middle of do_coredump?
1768 * If so and our tracer is also part of the coredump stopping
1769 * is a deadlock situation, and pointless because our tracer
1770 * is dead so don't allow us to stop.
1771 * If SIGKILL was already sent before the caller unlocked
1772 * ->siglock we must see ->core_state != NULL. Otherwise it
1773 * is safe to enter schedule().
1775 * This is almost outdated, a task with the pending SIGKILL can't
1776 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1777 * after SIGKILL was already dequeued.
1779 if (unlikely(current->mm->core_state) &&
1780 unlikely(current->mm == current->parent->mm))
1787 * Return non-zero if there is a SIGKILL that should be waking us up.
1788 * Called with the siglock held.
1790 static int sigkill_pending(struct task_struct *tsk)
1792 return sigismember(&tsk->pending.signal, SIGKILL) ||
1793 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1797 * This must be called with current->sighand->siglock held.
1799 * This should be the path for all ptrace stops.
1800 * We always set current->last_siginfo while stopped here.
1801 * That makes it a way to test a stopped process for
1802 * being ptrace-stopped vs being job-control-stopped.
1804 * If we actually decide not to stop at all because the tracer
1805 * is gone, we keep current->exit_code unless clear_code.
1807 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1808 __releases(¤t->sighand->siglock)
1809 __acquires(¤t->sighand->siglock)
1811 bool gstop_done = false;
1813 if (arch_ptrace_stop_needed(exit_code, info)) {
1815 * The arch code has something special to do before a
1816 * ptrace stop. This is allowed to block, e.g. for faults
1817 * on user stack pages. We can't keep the siglock while
1818 * calling arch_ptrace_stop, so we must release it now.
1819 * To preserve proper semantics, we must do this before
1820 * any signal bookkeeping like checking group_stop_count.
1821 * Meanwhile, a SIGKILL could come in before we retake the
1822 * siglock. That must prevent us from sleeping in TASK_TRACED.
1823 * So after regaining the lock, we must check for SIGKILL.
1825 spin_unlock_irq(¤t->sighand->siglock);
1826 arch_ptrace_stop(exit_code, info);
1827 spin_lock_irq(¤t->sighand->siglock);
1828 if (sigkill_pending(current))
1833 * We're committing to trapping. TRACED should be visible before
1834 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1835 * Also, transition to TRACED and updates to ->jobctl should be
1836 * atomic with respect to siglock and should be done after the arch
1837 * hook as siglock is released and regrabbed across it.
1839 set_current_state(TASK_TRACED);
1841 current->last_siginfo = info;
1842 current->exit_code = exit_code;
1845 * If @why is CLD_STOPPED, we're trapping to participate in a group
1846 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1847 * across siglock relocks since INTERRUPT was scheduled, PENDING
1848 * could be clear now. We act as if SIGCONT is received after
1849 * TASK_TRACED is entered - ignore it.
1851 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1852 gstop_done = task_participate_group_stop(current);
1854 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1855 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1856 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1857 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1859 /* entering a trap, clear TRAPPING */
1860 task_clear_jobctl_trapping(current);
1862 spin_unlock_irq(¤t->sighand->siglock);
1863 read_lock(&tasklist_lock);
1864 if (may_ptrace_stop()) {
1866 * Notify parents of the stop.
1868 * While ptraced, there are two parents - the ptracer and
1869 * the real_parent of the group_leader. The ptracer should
1870 * know about every stop while the real parent is only
1871 * interested in the completion of group stop. The states
1872 * for the two don't interact with each other. Notify
1873 * separately unless they're gonna be duplicates.
1875 do_notify_parent_cldstop(current, true, why);
1876 if (gstop_done && ptrace_reparented(current))
1877 do_notify_parent_cldstop(current, false, why);
1880 * Don't want to allow preemption here, because
1881 * sys_ptrace() needs this task to be inactive.
1883 * XXX: implement read_unlock_no_resched().
1886 read_unlock(&tasklist_lock);
1887 preempt_enable_no_resched();
1888 freezable_schedule();
1891 * By the time we got the lock, our tracer went away.
1892 * Don't drop the lock yet, another tracer may come.
1894 * If @gstop_done, the ptracer went away between group stop
1895 * completion and here. During detach, it would have set
1896 * JOBCTL_STOP_PENDING on us and we'll re-enter
1897 * TASK_STOPPED in do_signal_stop() on return, so notifying
1898 * the real parent of the group stop completion is enough.
1901 do_notify_parent_cldstop(current, false, why);
1903 /* tasklist protects us from ptrace_freeze_traced() */
1904 __set_current_state(TASK_RUNNING);
1906 current->exit_code = 0;
1907 read_unlock(&tasklist_lock);
1911 * We are back. Now reacquire the siglock before touching
1912 * last_siginfo, so that we are sure to have synchronized with
1913 * any signal-sending on another CPU that wants to examine it.
1915 spin_lock_irq(¤t->sighand->siglock);
1916 current->last_siginfo = NULL;
1918 /* LISTENING can be set only during STOP traps, clear it */
1919 current->jobctl &= ~JOBCTL_LISTENING;
1922 * Queued signals ignored us while we were stopped for tracing.
1923 * So check for any that we should take before resuming user mode.
1924 * This sets TIF_SIGPENDING, but never clears it.
1926 recalc_sigpending_tsk(current);
1929 static void ptrace_do_notify(int signr, int exit_code, int why)
1933 memset(&info, 0, sizeof info);
1934 info.si_signo = signr;
1935 info.si_code = exit_code;
1936 info.si_pid = task_pid_vnr(current);
1937 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1939 /* Let the debugger run. */
1940 ptrace_stop(exit_code, why, 1, &info);
1943 void ptrace_notify(int exit_code)
1945 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1946 if (unlikely(current->task_works))
1949 spin_lock_irq(¤t->sighand->siglock);
1950 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1951 spin_unlock_irq(¤t->sighand->siglock);
1955 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1956 * @signr: signr causing group stop if initiating
1958 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1959 * and participate in it. If already set, participate in the existing
1960 * group stop. If participated in a group stop (and thus slept), %true is
1961 * returned with siglock released.
1963 * If ptraced, this function doesn't handle stop itself. Instead,
1964 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1965 * untouched. The caller must ensure that INTERRUPT trap handling takes
1966 * places afterwards.
1969 * Must be called with @current->sighand->siglock held, which is released
1973 * %false if group stop is already cancelled or ptrace trap is scheduled.
1974 * %true if participated in group stop.
1976 static bool do_signal_stop(int signr)
1977 __releases(¤t->sighand->siglock)
1979 struct signal_struct *sig = current->signal;
1981 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1982 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1983 struct task_struct *t;
1985 /* signr will be recorded in task->jobctl for retries */
1986 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1988 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1989 unlikely(signal_group_exit(sig)))
1992 * There is no group stop already in progress. We must
1995 * While ptraced, a task may be resumed while group stop is
1996 * still in effect and then receive a stop signal and
1997 * initiate another group stop. This deviates from the
1998 * usual behavior as two consecutive stop signals can't
1999 * cause two group stops when !ptraced. That is why we
2000 * also check !task_is_stopped(t) below.
2002 * The condition can be distinguished by testing whether
2003 * SIGNAL_STOP_STOPPED is already set. Don't generate
2004 * group_exit_code in such case.
2006 * This is not necessary for SIGNAL_STOP_CONTINUED because
2007 * an intervening stop signal is required to cause two
2008 * continued events regardless of ptrace.
2010 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2011 sig->group_exit_code = signr;
2013 sig->group_stop_count = 0;
2015 if (task_set_jobctl_pending(current, signr | gstop))
2016 sig->group_stop_count++;
2019 while_each_thread(current, t) {
2021 * Setting state to TASK_STOPPED for a group
2022 * stop is always done with the siglock held,
2023 * so this check has no races.
2025 if (!task_is_stopped(t) &&
2026 task_set_jobctl_pending(t, signr | gstop)) {
2027 sig->group_stop_count++;
2028 if (likely(!(t->ptrace & PT_SEIZED)))
2029 signal_wake_up(t, 0);
2031 ptrace_trap_notify(t);
2036 if (likely(!current->ptrace)) {
2040 * If there are no other threads in the group, or if there
2041 * is a group stop in progress and we are the last to stop,
2042 * report to the parent.
2044 if (task_participate_group_stop(current))
2045 notify = CLD_STOPPED;
2047 __set_current_state(TASK_STOPPED);
2048 spin_unlock_irq(¤t->sighand->siglock);
2051 * Notify the parent of the group stop completion. Because
2052 * we're not holding either the siglock or tasklist_lock
2053 * here, ptracer may attach inbetween; however, this is for
2054 * group stop and should always be delivered to the real
2055 * parent of the group leader. The new ptracer will get
2056 * its notification when this task transitions into
2060 read_lock(&tasklist_lock);
2061 do_notify_parent_cldstop(current, false, notify);
2062 read_unlock(&tasklist_lock);
2065 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2066 freezable_schedule();
2070 * While ptraced, group stop is handled by STOP trap.
2071 * Schedule it and let the caller deal with it.
2073 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2079 * do_jobctl_trap - take care of ptrace jobctl traps
2081 * When PT_SEIZED, it's used for both group stop and explicit
2082 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2083 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2084 * the stop signal; otherwise, %SIGTRAP.
2086 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2087 * number as exit_code and no siginfo.
2090 * Must be called with @current->sighand->siglock held, which may be
2091 * released and re-acquired before returning with intervening sleep.
2093 static void do_jobctl_trap(void)
2095 struct signal_struct *signal = current->signal;
2096 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2098 if (current->ptrace & PT_SEIZED) {
2099 if (!signal->group_stop_count &&
2100 !(signal->flags & SIGNAL_STOP_STOPPED))
2102 WARN_ON_ONCE(!signr);
2103 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2106 WARN_ON_ONCE(!signr);
2107 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2108 current->exit_code = 0;
2112 static int ptrace_signal(int signr, siginfo_t *info)
2115 * We do not check sig_kernel_stop(signr) but set this marker
2116 * unconditionally because we do not know whether debugger will
2117 * change signr. This flag has no meaning unless we are going
2118 * to stop after return from ptrace_stop(). In this case it will
2119 * be checked in do_signal_stop(), we should only stop if it was
2120 * not cleared by SIGCONT while we were sleeping. See also the
2121 * comment in dequeue_signal().
2123 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2124 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2126 /* We're back. Did the debugger cancel the sig? */
2127 signr = current->exit_code;
2131 current->exit_code = 0;
2134 * Update the siginfo structure if the signal has
2135 * changed. If the debugger wanted something
2136 * specific in the siginfo structure then it should
2137 * have updated *info via PTRACE_SETSIGINFO.
2139 if (signr != info->si_signo) {
2140 info->si_signo = signr;
2142 info->si_code = SI_USER;
2144 info->si_pid = task_pid_vnr(current->parent);
2145 info->si_uid = from_kuid_munged(current_user_ns(),
2146 task_uid(current->parent));
2150 /* If the (new) signal is now blocked, requeue it. */
2151 if (sigismember(¤t->blocked, signr)) {
2152 specific_send_sig_info(signr, info, current);
2159 int get_signal(struct ksignal *ksig)
2161 struct sighand_struct *sighand = current->sighand;
2162 struct signal_struct *signal = current->signal;
2165 if (unlikely(current->task_works))
2168 if (unlikely(uprobe_deny_signal()))
2172 * Do this once, we can't return to user-mode if freezing() == T.
2173 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2174 * thus do not need another check after return.
2179 spin_lock_irq(&sighand->siglock);
2181 * Every stopped thread goes here after wakeup. Check to see if
2182 * we should notify the parent, prepare_signal(SIGCONT) encodes
2183 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2185 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2188 if (signal->flags & SIGNAL_CLD_CONTINUED)
2189 why = CLD_CONTINUED;
2193 signal->flags &= ~SIGNAL_CLD_MASK;
2195 spin_unlock_irq(&sighand->siglock);
2198 * Notify the parent that we're continuing. This event is
2199 * always per-process and doesn't make whole lot of sense
2200 * for ptracers, who shouldn't consume the state via
2201 * wait(2) either, but, for backward compatibility, notify
2202 * the ptracer of the group leader too unless it's gonna be
2205 read_lock(&tasklist_lock);
2206 do_notify_parent_cldstop(current, false, why);
2208 if (ptrace_reparented(current->group_leader))
2209 do_notify_parent_cldstop(current->group_leader,
2211 read_unlock(&tasklist_lock);
2217 struct k_sigaction *ka;
2219 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2223 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2225 spin_unlock_irq(&sighand->siglock);
2229 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2232 break; /* will return 0 */
2234 if (unlikely(current->ptrace) && signr != SIGKILL) {
2235 signr = ptrace_signal(signr, &ksig->info);
2240 ka = &sighand->action[signr-1];
2242 /* Trace actually delivered signals. */
2243 trace_signal_deliver(signr, &ksig->info, ka);
2245 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2247 if (ka->sa.sa_handler != SIG_DFL) {
2248 /* Run the handler. */
2251 if (ka->sa.sa_flags & SA_ONESHOT)
2252 ka->sa.sa_handler = SIG_DFL;
2254 break; /* will return non-zero "signr" value */
2258 * Now we are doing the default action for this signal.
2260 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2264 * Global init gets no signals it doesn't want.
2265 * Container-init gets no signals it doesn't want from same
2268 * Note that if global/container-init sees a sig_kernel_only()
2269 * signal here, the signal must have been generated internally
2270 * or must have come from an ancestor namespace. In either
2271 * case, the signal cannot be dropped.
2273 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2274 !sig_kernel_only(signr))
2277 if (sig_kernel_stop(signr)) {
2279 * The default action is to stop all threads in
2280 * the thread group. The job control signals
2281 * do nothing in an orphaned pgrp, but SIGSTOP
2282 * always works. Note that siglock needs to be
2283 * dropped during the call to is_orphaned_pgrp()
2284 * because of lock ordering with tasklist_lock.
2285 * This allows an intervening SIGCONT to be posted.
2286 * We need to check for that and bail out if necessary.
2288 if (signr != SIGSTOP) {
2289 spin_unlock_irq(&sighand->siglock);
2291 /* signals can be posted during this window */
2293 if (is_current_pgrp_orphaned())
2296 spin_lock_irq(&sighand->siglock);
2299 if (likely(do_signal_stop(ksig->info.si_signo))) {
2300 /* It released the siglock. */
2305 * We didn't actually stop, due to a race
2306 * with SIGCONT or something like that.
2311 spin_unlock_irq(&sighand->siglock);
2314 * Anything else is fatal, maybe with a core dump.
2316 current->flags |= PF_SIGNALED;
2318 if (sig_kernel_coredump(signr)) {
2319 if (print_fatal_signals)
2320 print_fatal_signal(ksig->info.si_signo);
2321 proc_coredump_connector(current);
2323 * If it was able to dump core, this kills all
2324 * other threads in the group and synchronizes with
2325 * their demise. If we lost the race with another
2326 * thread getting here, it set group_exit_code
2327 * first and our do_group_exit call below will use
2328 * that value and ignore the one we pass it.
2330 do_coredump(&ksig->info);
2334 * Death signals, no core dump.
2336 do_group_exit(ksig->info.si_signo);
2339 spin_unlock_irq(&sighand->siglock);
2342 return ksig->sig > 0;
2346 * signal_delivered -
2347 * @ksig: kernel signal struct
2348 * @stepping: nonzero if debugger single-step or block-step in use
2350 * This function should be called when a signal has successfully been
2351 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2352 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2353 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2355 static void signal_delivered(struct ksignal *ksig, int stepping)
2359 /* A signal was successfully delivered, and the
2360 saved sigmask was stored on the signal frame,
2361 and will be restored by sigreturn. So we can
2362 simply clear the restore sigmask flag. */
2363 clear_restore_sigmask();
2365 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2366 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2367 sigaddset(&blocked, ksig->sig);
2368 set_current_blocked(&blocked);
2369 tracehook_signal_handler(stepping);
2372 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2375 force_sigsegv(ksig->sig, current);
2377 signal_delivered(ksig, stepping);
2381 * It could be that complete_signal() picked us to notify about the
2382 * group-wide signal. Other threads should be notified now to take
2383 * the shared signals in @which since we will not.
2385 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2388 struct task_struct *t;
2390 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2391 if (sigisemptyset(&retarget))
2395 while_each_thread(tsk, t) {
2396 if (t->flags & PF_EXITING)
2399 if (!has_pending_signals(&retarget, &t->blocked))
2401 /* Remove the signals this thread can handle. */
2402 sigandsets(&retarget, &retarget, &t->blocked);
2404 if (!signal_pending(t))
2405 signal_wake_up(t, 0);
2407 if (sigisemptyset(&retarget))
2412 void exit_signals(struct task_struct *tsk)
2418 * @tsk is about to have PF_EXITING set - lock out users which
2419 * expect stable threadgroup.
2421 cgroup_threadgroup_change_begin(tsk);
2423 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2424 tsk->flags |= PF_EXITING;
2425 cgroup_threadgroup_change_end(tsk);
2429 spin_lock_irq(&tsk->sighand->siglock);
2431 * From now this task is not visible for group-wide signals,
2432 * see wants_signal(), do_signal_stop().
2434 tsk->flags |= PF_EXITING;
2436 cgroup_threadgroup_change_end(tsk);
2438 if (!signal_pending(tsk))
2441 unblocked = tsk->blocked;
2442 signotset(&unblocked);
2443 retarget_shared_pending(tsk, &unblocked);
2445 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2446 task_participate_group_stop(tsk))
2447 group_stop = CLD_STOPPED;
2449 spin_unlock_irq(&tsk->sighand->siglock);
2452 * If group stop has completed, deliver the notification. This
2453 * should always go to the real parent of the group leader.
2455 if (unlikely(group_stop)) {
2456 read_lock(&tasklist_lock);
2457 do_notify_parent_cldstop(tsk, false, group_stop);
2458 read_unlock(&tasklist_lock);
2462 EXPORT_SYMBOL(recalc_sigpending);
2463 EXPORT_SYMBOL_GPL(dequeue_signal);
2464 EXPORT_SYMBOL(flush_signals);
2465 EXPORT_SYMBOL(force_sig);
2466 EXPORT_SYMBOL(send_sig);
2467 EXPORT_SYMBOL(send_sig_info);
2468 EXPORT_SYMBOL(sigprocmask);
2471 * System call entry points.
2475 * sys_restart_syscall - restart a system call
2477 SYSCALL_DEFINE0(restart_syscall)
2479 struct restart_block *restart = ¤t->restart_block;
2480 return restart->fn(restart);
2483 long do_no_restart_syscall(struct restart_block *param)
2488 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2490 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2491 sigset_t newblocked;
2492 /* A set of now blocked but previously unblocked signals. */
2493 sigandnsets(&newblocked, newset, ¤t->blocked);
2494 retarget_shared_pending(tsk, &newblocked);
2496 tsk->blocked = *newset;
2497 recalc_sigpending();
2501 * set_current_blocked - change current->blocked mask
2504 * It is wrong to change ->blocked directly, this helper should be used
2505 * to ensure the process can't miss a shared signal we are going to block.
2507 void set_current_blocked(sigset_t *newset)
2509 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2510 __set_current_blocked(newset);
2513 void __set_current_blocked(const sigset_t *newset)
2515 struct task_struct *tsk = current;
2518 * In case the signal mask hasn't changed, there is nothing we need
2519 * to do. The current->blocked shouldn't be modified by other task.
2521 if (sigequalsets(&tsk->blocked, newset))
2524 spin_lock_irq(&tsk->sighand->siglock);
2525 __set_task_blocked(tsk, newset);
2526 spin_unlock_irq(&tsk->sighand->siglock);
2530 * This is also useful for kernel threads that want to temporarily
2531 * (or permanently) block certain signals.
2533 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2534 * interface happily blocks "unblockable" signals like SIGKILL
2537 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2539 struct task_struct *tsk = current;
2542 /* Lockless, only current can change ->blocked, never from irq */
2544 *oldset = tsk->blocked;
2548 sigorsets(&newset, &tsk->blocked, set);
2551 sigandnsets(&newset, &tsk->blocked, set);
2560 __set_current_blocked(&newset);
2565 * sys_rt_sigprocmask - change the list of currently blocked signals
2566 * @how: whether to add, remove, or set signals
2567 * @nset: stores pending signals
2568 * @oset: previous value of signal mask if non-null
2569 * @sigsetsize: size of sigset_t type
2571 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2572 sigset_t __user *, oset, size_t, sigsetsize)
2574 sigset_t old_set, new_set;
2577 /* XXX: Don't preclude handling different sized sigset_t's. */
2578 if (sigsetsize != sizeof(sigset_t))
2581 old_set = current->blocked;
2584 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2586 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2588 error = sigprocmask(how, &new_set, NULL);
2594 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2601 #ifdef CONFIG_COMPAT
2602 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2603 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2606 sigset_t old_set = current->blocked;
2608 /* XXX: Don't preclude handling different sized sigset_t's. */
2609 if (sigsetsize != sizeof(sigset_t))
2613 compat_sigset_t new32;
2616 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2619 sigset_from_compat(&new_set, &new32);
2620 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2622 error = sigprocmask(how, &new_set, NULL);
2627 compat_sigset_t old32;
2628 sigset_to_compat(&old32, &old_set);
2629 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2634 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2635 (sigset_t __user *)oset, sigsetsize);
2640 static int do_sigpending(void *set, unsigned long sigsetsize)
2642 if (sigsetsize > sizeof(sigset_t))
2645 spin_lock_irq(¤t->sighand->siglock);
2646 sigorsets(set, ¤t->pending.signal,
2647 ¤t->signal->shared_pending.signal);
2648 spin_unlock_irq(¤t->sighand->siglock);
2650 /* Outside the lock because only this thread touches it. */
2651 sigandsets(set, ¤t->blocked, set);
2656 * sys_rt_sigpending - examine a pending signal that has been raised
2658 * @uset: stores pending signals
2659 * @sigsetsize: size of sigset_t type or larger
2661 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2664 int err = do_sigpending(&set, sigsetsize);
2665 if (!err && copy_to_user(uset, &set, sigsetsize))
2670 #ifdef CONFIG_COMPAT
2671 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2672 compat_size_t, sigsetsize)
2676 int err = do_sigpending(&set, sigsetsize);
2678 compat_sigset_t set32;
2679 sigset_to_compat(&set32, &set);
2680 /* we can get here only if sigsetsize <= sizeof(set) */
2681 if (copy_to_user(uset, &set32, sigsetsize))
2686 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2691 enum siginfo_layout siginfo_layout(int sig, int si_code)
2693 enum siginfo_layout layout = SIL_KILL;
2694 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2695 static const struct {
2696 unsigned char limit, layout;
2698 [SIGILL] = { NSIGILL, SIL_FAULT },
2699 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2700 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2701 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2702 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2703 #if defined(SIGEMT) && defined(NSIGEMT)
2704 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2706 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2707 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2708 #ifdef __ARCH_SIGSYS
2709 [SIGSYS] = { NSIGSYS, SIL_SYS },
2712 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
2713 layout = filter[sig].layout;
2714 else if (si_code <= NSIGPOLL)
2717 if (si_code == SI_TIMER)
2719 else if (si_code == SI_SIGIO)
2721 else if (si_code < 0)
2723 /* Tests to support buggy kernel ABIs */
2725 if ((sig == SIGTRAP) && (si_code == TRAP_FIXME))
2729 if ((sig == SIGFPE) && (si_code == FPE_FIXME))
2736 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2738 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2742 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2744 if (from->si_code < 0)
2745 return __copy_to_user(to, from, sizeof(siginfo_t))
2748 * If you change siginfo_t structure, please be sure
2749 * this code is fixed accordingly.
2750 * Please remember to update the signalfd_copyinfo() function
2751 * inside fs/signalfd.c too, in case siginfo_t changes.
2752 * It should never copy any pad contained in the structure
2753 * to avoid security leaks, but must copy the generic
2754 * 3 ints plus the relevant union member.
2756 err = __put_user(from->si_signo, &to->si_signo);
2757 err |= __put_user(from->si_errno, &to->si_errno);
2758 err |= __put_user(from->si_code, &to->si_code);
2759 switch (siginfo_layout(from->si_signo, from->si_code)) {
2761 err |= __put_user(from->si_pid, &to->si_pid);
2762 err |= __put_user(from->si_uid, &to->si_uid);
2765 /* Unreached SI_TIMER is negative */
2768 err |= __put_user(from->si_band, &to->si_band);
2769 err |= __put_user(from->si_fd, &to->si_fd);
2772 err |= __put_user(from->si_addr, &to->si_addr);
2773 #ifdef __ARCH_SI_TRAPNO
2774 err |= __put_user(from->si_trapno, &to->si_trapno);
2776 #ifdef BUS_MCEERR_AO
2778 * Other callers might not initialize the si_lsb field,
2779 * so check explicitly for the right codes here.
2781 if (from->si_signo == SIGBUS &&
2782 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2783 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2786 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2787 err |= __put_user(from->si_lower, &to->si_lower);
2788 err |= __put_user(from->si_upper, &to->si_upper);
2792 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2793 err |= __put_user(from->si_pkey, &to->si_pkey);
2797 err |= __put_user(from->si_pid, &to->si_pid);
2798 err |= __put_user(from->si_uid, &to->si_uid);
2799 err |= __put_user(from->si_status, &to->si_status);
2800 err |= __put_user(from->si_utime, &to->si_utime);
2801 err |= __put_user(from->si_stime, &to->si_stime);
2804 err |= __put_user(from->si_pid, &to->si_pid);
2805 err |= __put_user(from->si_uid, &to->si_uid);
2806 err |= __put_user(from->si_ptr, &to->si_ptr);
2808 #ifdef __ARCH_SIGSYS
2810 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2811 err |= __put_user(from->si_syscall, &to->si_syscall);
2812 err |= __put_user(from->si_arch, &to->si_arch);
2822 * do_sigtimedwait - wait for queued signals specified in @which
2823 * @which: queued signals to wait for
2824 * @info: if non-null, the signal's siginfo is returned here
2825 * @ts: upper bound on process time suspension
2827 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2828 const struct timespec *ts)
2830 ktime_t *to = NULL, timeout = KTIME_MAX;
2831 struct task_struct *tsk = current;
2832 sigset_t mask = *which;
2836 if (!timespec_valid(ts))
2838 timeout = timespec_to_ktime(*ts);
2843 * Invert the set of allowed signals to get those we want to block.
2845 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2848 spin_lock_irq(&tsk->sighand->siglock);
2849 sig = dequeue_signal(tsk, &mask, info);
2850 if (!sig && timeout) {
2852 * None ready, temporarily unblock those we're interested
2853 * while we are sleeping in so that we'll be awakened when
2854 * they arrive. Unblocking is always fine, we can avoid
2855 * set_current_blocked().
2857 tsk->real_blocked = tsk->blocked;
2858 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2859 recalc_sigpending();
2860 spin_unlock_irq(&tsk->sighand->siglock);
2862 __set_current_state(TASK_INTERRUPTIBLE);
2863 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2865 spin_lock_irq(&tsk->sighand->siglock);
2866 __set_task_blocked(tsk, &tsk->real_blocked);
2867 sigemptyset(&tsk->real_blocked);
2868 sig = dequeue_signal(tsk, &mask, info);
2870 spin_unlock_irq(&tsk->sighand->siglock);
2874 return ret ? -EINTR : -EAGAIN;
2878 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2880 * @uthese: queued signals to wait for
2881 * @uinfo: if non-null, the signal's siginfo is returned here
2882 * @uts: upper bound on process time suspension
2883 * @sigsetsize: size of sigset_t type
2885 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2886 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2894 /* XXX: Don't preclude handling different sized sigset_t's. */
2895 if (sigsetsize != sizeof(sigset_t))
2898 if (copy_from_user(&these, uthese, sizeof(these)))
2902 if (copy_from_user(&ts, uts, sizeof(ts)))
2906 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2908 if (ret > 0 && uinfo) {
2909 if (copy_siginfo_to_user(uinfo, &info))
2916 #ifdef CONFIG_COMPAT
2917 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
2918 struct compat_siginfo __user *, uinfo,
2919 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
2921 compat_sigset_t s32;
2927 if (sigsetsize != sizeof(sigset_t))
2930 if (copy_from_user(&s32, uthese, sizeof(compat_sigset_t)))
2932 sigset_from_compat(&s, &s32);
2935 if (compat_get_timespec(&t, uts))
2939 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
2941 if (ret > 0 && uinfo) {
2942 if (copy_siginfo_to_user32(uinfo, &info))
2951 * sys_kill - send a signal to a process
2952 * @pid: the PID of the process
2953 * @sig: signal to be sent
2955 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2957 struct siginfo info;
2959 info.si_signo = sig;
2961 info.si_code = SI_USER;
2962 info.si_pid = task_tgid_vnr(current);
2963 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2965 return kill_something_info(sig, &info, pid);
2969 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2971 struct task_struct *p;
2975 p = find_task_by_vpid(pid);
2976 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2977 error = check_kill_permission(sig, info, p);
2979 * The null signal is a permissions and process existence
2980 * probe. No signal is actually delivered.
2982 if (!error && sig) {
2983 error = do_send_sig_info(sig, info, p, false);
2985 * If lock_task_sighand() failed we pretend the task
2986 * dies after receiving the signal. The window is tiny,
2987 * and the signal is private anyway.
2989 if (unlikely(error == -ESRCH))
2998 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3000 struct siginfo info = {};
3002 info.si_signo = sig;
3004 info.si_code = SI_TKILL;
3005 info.si_pid = task_tgid_vnr(current);
3006 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3008 return do_send_specific(tgid, pid, sig, &info);
3012 * sys_tgkill - send signal to one specific thread
3013 * @tgid: the thread group ID of the thread
3014 * @pid: the PID of the thread
3015 * @sig: signal to be sent
3017 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3018 * exists but it's not belonging to the target process anymore. This
3019 * method solves the problem of threads exiting and PIDs getting reused.
3021 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3023 /* This is only valid for single tasks */
3024 if (pid <= 0 || tgid <= 0)
3027 return do_tkill(tgid, pid, sig);
3031 * sys_tkill - send signal to one specific task
3032 * @pid: the PID of the task
3033 * @sig: signal to be sent
3035 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3037 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3039 /* This is only valid for single tasks */
3043 return do_tkill(0, pid, sig);
3046 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3048 /* Not even root can pretend to send signals from the kernel.
3049 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3051 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3052 (task_pid_vnr(current) != pid))
3055 info->si_signo = sig;
3057 /* POSIX.1b doesn't mention process groups. */
3058 return kill_proc_info(sig, info, pid);
3062 * sys_rt_sigqueueinfo - send signal information to a signal
3063 * @pid: the PID of the thread
3064 * @sig: signal to be sent
3065 * @uinfo: signal info to be sent
3067 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3068 siginfo_t __user *, uinfo)
3071 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3073 return do_rt_sigqueueinfo(pid, sig, &info);
3076 #ifdef CONFIG_COMPAT
3077 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3080 struct compat_siginfo __user *, uinfo)
3082 siginfo_t info = {};
3083 int ret = copy_siginfo_from_user32(&info, uinfo);
3086 return do_rt_sigqueueinfo(pid, sig, &info);
3090 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3092 /* This is only valid for single tasks */
3093 if (pid <= 0 || tgid <= 0)
3096 /* Not even root can pretend to send signals from the kernel.
3097 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3099 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3100 (task_pid_vnr(current) != pid))
3103 info->si_signo = sig;
3105 return do_send_specific(tgid, pid, sig, info);
3108 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3109 siginfo_t __user *, uinfo)
3113 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3116 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3119 #ifdef CONFIG_COMPAT
3120 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3124 struct compat_siginfo __user *, uinfo)
3126 siginfo_t info = {};
3128 if (copy_siginfo_from_user32(&info, uinfo))
3130 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3135 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3137 void kernel_sigaction(int sig, __sighandler_t action)
3139 spin_lock_irq(¤t->sighand->siglock);
3140 current->sighand->action[sig - 1].sa.sa_handler = action;
3141 if (action == SIG_IGN) {
3145 sigaddset(&mask, sig);
3147 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3148 flush_sigqueue_mask(&mask, ¤t->pending);
3149 recalc_sigpending();
3151 spin_unlock_irq(¤t->sighand->siglock);
3153 EXPORT_SYMBOL(kernel_sigaction);
3155 void __weak sigaction_compat_abi(struct k_sigaction *act,
3156 struct k_sigaction *oact)
3160 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3162 struct task_struct *p = current, *t;
3163 struct k_sigaction *k;
3166 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3169 k = &p->sighand->action[sig-1];
3171 spin_lock_irq(&p->sighand->siglock);
3175 sigaction_compat_abi(act, oact);
3178 sigdelsetmask(&act->sa.sa_mask,
3179 sigmask(SIGKILL) | sigmask(SIGSTOP));
3183 * "Setting a signal action to SIG_IGN for a signal that is
3184 * pending shall cause the pending signal to be discarded,
3185 * whether or not it is blocked."
3187 * "Setting a signal action to SIG_DFL for a signal that is
3188 * pending and whose default action is to ignore the signal
3189 * (for example, SIGCHLD), shall cause the pending signal to
3190 * be discarded, whether or not it is blocked"
3192 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3194 sigaddset(&mask, sig);
3195 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3196 for_each_thread(p, t)
3197 flush_sigqueue_mask(&mask, &t->pending);
3201 spin_unlock_irq(&p->sighand->siglock);
3206 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3208 struct task_struct *t = current;
3211 memset(oss, 0, sizeof(stack_t));
3212 oss->ss_sp = (void __user *) t->sas_ss_sp;
3213 oss->ss_size = t->sas_ss_size;
3214 oss->ss_flags = sas_ss_flags(sp) |
3215 (current->sas_ss_flags & SS_FLAG_BITS);
3219 void __user *ss_sp = ss->ss_sp;
3220 size_t ss_size = ss->ss_size;
3221 unsigned ss_flags = ss->ss_flags;
3224 if (unlikely(on_sig_stack(sp)))
3227 ss_mode = ss_flags & ~SS_FLAG_BITS;
3228 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3232 if (ss_mode == SS_DISABLE) {
3236 if (unlikely(ss_size < MINSIGSTKSZ))
3240 t->sas_ss_sp = (unsigned long) ss_sp;
3241 t->sas_ss_size = ss_size;
3242 t->sas_ss_flags = ss_flags;
3247 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3251 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3253 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3254 current_user_stack_pointer());
3255 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3260 int restore_altstack(const stack_t __user *uss)
3263 if (copy_from_user(&new, uss, sizeof(stack_t)))
3265 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3266 /* squash all but EFAULT for now */
3270 int __save_altstack(stack_t __user *uss, unsigned long sp)
3272 struct task_struct *t = current;
3273 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3274 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3275 __put_user(t->sas_ss_size, &uss->ss_size);
3278 if (t->sas_ss_flags & SS_AUTODISARM)
3283 #ifdef CONFIG_COMPAT
3284 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3285 const compat_stack_t __user *, uss_ptr,
3286 compat_stack_t __user *, uoss_ptr)
3292 compat_stack_t uss32;
3293 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3295 uss.ss_sp = compat_ptr(uss32.ss_sp);
3296 uss.ss_flags = uss32.ss_flags;
3297 uss.ss_size = uss32.ss_size;
3299 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3300 compat_user_stack_pointer());
3301 if (ret >= 0 && uoss_ptr) {
3303 memset(&old, 0, sizeof(old));
3304 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3305 old.ss_flags = uoss.ss_flags;
3306 old.ss_size = uoss.ss_size;
3307 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3313 int compat_restore_altstack(const compat_stack_t __user *uss)
3315 int err = compat_sys_sigaltstack(uss, NULL);
3316 /* squash all but -EFAULT for now */
3317 return err == -EFAULT ? err : 0;
3320 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3323 struct task_struct *t = current;
3324 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3326 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3327 __put_user(t->sas_ss_size, &uss->ss_size);
3330 if (t->sas_ss_flags & SS_AUTODISARM)
3336 #ifdef __ARCH_WANT_SYS_SIGPENDING
3339 * sys_sigpending - examine pending signals
3340 * @set: where mask of pending signal is returned
3342 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3344 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3347 #ifdef CONFIG_COMPAT
3348 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3352 int err = do_sigpending(&set, sizeof(set.sig[0]));
3354 err = put_user(set.sig[0], set32);
3357 return sys_rt_sigpending((sigset_t __user *)set32, sizeof(*set32));
3364 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3366 * sys_sigprocmask - examine and change blocked signals
3367 * @how: whether to add, remove, or set signals
3368 * @nset: signals to add or remove (if non-null)
3369 * @oset: previous value of signal mask if non-null
3371 * Some platforms have their own version with special arguments;
3372 * others support only sys_rt_sigprocmask.
3375 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3376 old_sigset_t __user *, oset)
3378 old_sigset_t old_set, new_set;
3379 sigset_t new_blocked;
3381 old_set = current->blocked.sig[0];
3384 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3387 new_blocked = current->blocked;
3391 sigaddsetmask(&new_blocked, new_set);
3394 sigdelsetmask(&new_blocked, new_set);
3397 new_blocked.sig[0] = new_set;
3403 set_current_blocked(&new_blocked);
3407 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3413 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3415 #ifndef CONFIG_ODD_RT_SIGACTION
3417 * sys_rt_sigaction - alter an action taken by a process
3418 * @sig: signal to be sent
3419 * @act: new sigaction
3420 * @oact: used to save the previous sigaction
3421 * @sigsetsize: size of sigset_t type
3423 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3424 const struct sigaction __user *, act,
3425 struct sigaction __user *, oact,
3428 struct k_sigaction new_sa, old_sa;
3431 /* XXX: Don't preclude handling different sized sigset_t's. */
3432 if (sigsetsize != sizeof(sigset_t))
3436 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3440 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3443 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3449 #ifdef CONFIG_COMPAT
3450 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3451 const struct compat_sigaction __user *, act,
3452 struct compat_sigaction __user *, oact,
3453 compat_size_t, sigsetsize)
3455 struct k_sigaction new_ka, old_ka;
3456 compat_sigset_t mask;
3457 #ifdef __ARCH_HAS_SA_RESTORER
3458 compat_uptr_t restorer;
3462 /* XXX: Don't preclude handling different sized sigset_t's. */
3463 if (sigsetsize != sizeof(compat_sigset_t))
3467 compat_uptr_t handler;
3468 ret = get_user(handler, &act->sa_handler);
3469 new_ka.sa.sa_handler = compat_ptr(handler);
3470 #ifdef __ARCH_HAS_SA_RESTORER
3471 ret |= get_user(restorer, &act->sa_restorer);
3472 new_ka.sa.sa_restorer = compat_ptr(restorer);
3474 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3475 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3478 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3481 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3483 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3484 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3486 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3487 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3488 #ifdef __ARCH_HAS_SA_RESTORER
3489 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3490 &oact->sa_restorer);
3496 #endif /* !CONFIG_ODD_RT_SIGACTION */
3498 #ifdef CONFIG_OLD_SIGACTION
3499 SYSCALL_DEFINE3(sigaction, int, sig,
3500 const struct old_sigaction __user *, act,
3501 struct old_sigaction __user *, oact)
3503 struct k_sigaction new_ka, old_ka;
3508 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3509 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3510 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3511 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3512 __get_user(mask, &act->sa_mask))
3514 #ifdef __ARCH_HAS_KA_RESTORER
3515 new_ka.ka_restorer = NULL;
3517 siginitset(&new_ka.sa.sa_mask, mask);
3520 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3523 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3524 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3525 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3526 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3527 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3534 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3535 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3536 const struct compat_old_sigaction __user *, act,
3537 struct compat_old_sigaction __user *, oact)
3539 struct k_sigaction new_ka, old_ka;
3541 compat_old_sigset_t mask;
3542 compat_uptr_t handler, restorer;
3545 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3546 __get_user(handler, &act->sa_handler) ||
3547 __get_user(restorer, &act->sa_restorer) ||
3548 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3549 __get_user(mask, &act->sa_mask))
3552 #ifdef __ARCH_HAS_KA_RESTORER
3553 new_ka.ka_restorer = NULL;
3555 new_ka.sa.sa_handler = compat_ptr(handler);
3556 new_ka.sa.sa_restorer = compat_ptr(restorer);
3557 siginitset(&new_ka.sa.sa_mask, mask);
3560 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3563 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3564 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3565 &oact->sa_handler) ||
3566 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3567 &oact->sa_restorer) ||
3568 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3569 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3576 #ifdef CONFIG_SGETMASK_SYSCALL
3579 * For backwards compatibility. Functionality superseded by sigprocmask.
3581 SYSCALL_DEFINE0(sgetmask)
3584 return current->blocked.sig[0];
3587 SYSCALL_DEFINE1(ssetmask, int, newmask)
3589 int old = current->blocked.sig[0];
3592 siginitset(&newset, newmask);
3593 set_current_blocked(&newset);
3597 #endif /* CONFIG_SGETMASK_SYSCALL */
3599 #ifdef __ARCH_WANT_SYS_SIGNAL
3601 * For backwards compatibility. Functionality superseded by sigaction.
3603 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3605 struct k_sigaction new_sa, old_sa;
3608 new_sa.sa.sa_handler = handler;
3609 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3610 sigemptyset(&new_sa.sa.sa_mask);
3612 ret = do_sigaction(sig, &new_sa, &old_sa);
3614 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3616 #endif /* __ARCH_WANT_SYS_SIGNAL */
3618 #ifdef __ARCH_WANT_SYS_PAUSE
3620 SYSCALL_DEFINE0(pause)
3622 while (!signal_pending(current)) {
3623 __set_current_state(TASK_INTERRUPTIBLE);
3626 return -ERESTARTNOHAND;
3631 static int sigsuspend(sigset_t *set)
3633 current->saved_sigmask = current->blocked;
3634 set_current_blocked(set);
3636 while (!signal_pending(current)) {
3637 __set_current_state(TASK_INTERRUPTIBLE);
3640 set_restore_sigmask();
3641 return -ERESTARTNOHAND;
3645 * sys_rt_sigsuspend - replace the signal mask for a value with the
3646 * @unewset value until a signal is received
3647 * @unewset: new signal mask value
3648 * @sigsetsize: size of sigset_t type
3650 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3654 /* XXX: Don't preclude handling different sized sigset_t's. */
3655 if (sigsetsize != sizeof(sigset_t))
3658 if (copy_from_user(&newset, unewset, sizeof(newset)))
3660 return sigsuspend(&newset);
3663 #ifdef CONFIG_COMPAT
3664 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3668 compat_sigset_t newset32;
3670 /* XXX: Don't preclude handling different sized sigset_t's. */
3671 if (sigsetsize != sizeof(sigset_t))
3674 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3676 sigset_from_compat(&newset, &newset32);
3677 return sigsuspend(&newset);
3679 /* on little-endian bitmaps don't care about granularity */
3680 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3685 #ifdef CONFIG_OLD_SIGSUSPEND
3686 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3689 siginitset(&blocked, mask);
3690 return sigsuspend(&blocked);
3693 #ifdef CONFIG_OLD_SIGSUSPEND3
3694 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3697 siginitset(&blocked, mask);
3698 return sigsuspend(&blocked);
3702 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3707 void __init signals_init(void)
3709 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3710 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3711 != offsetof(struct siginfo, _sifields._pad));
3713 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3716 #ifdef CONFIG_KGDB_KDB
3717 #include <linux/kdb.h>
3719 * kdb_send_sig_info - Allows kdb to send signals without exposing
3720 * signal internals. This function checks if the required locks are
3721 * available before calling the main signal code, to avoid kdb
3725 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3727 static struct task_struct *kdb_prev_t;
3729 if (!spin_trylock(&t->sighand->siglock)) {
3730 kdb_printf("Can't do kill command now.\n"
3731 "The sigmask lock is held somewhere else in "
3732 "kernel, try again later\n");
3735 spin_unlock(&t->sighand->siglock);
3736 new_t = kdb_prev_t != t;
3738 if (t->state != TASK_RUNNING && new_t) {
3739 kdb_printf("Process is not RUNNING, sending a signal from "
3740 "kdb risks deadlock\n"
3741 "on the run queue locks. "
3742 "The signal has _not_ been sent.\n"
3743 "Reissue the kill command if you want to risk "
3747 sig = info->si_signo;
3748 if (send_sig_info(sig, info, t))
3749 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3752 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3754 #endif /* CONFIG_KGDB_KDB */