1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/task_work.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 #include <asm/syscall.h> /* for syscall_get_* */
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
188 EXPORT_SYMBOL(recalc_sigpending);
190 void calculate_sigpending(void)
192 /* Have any signals or users of TIF_SIGPENDING been delayed
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 spin_unlock_irq(¤t->sighand->siglock);
201 /* Given the mask, find the first available signal that should be serviced. */
203 #define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 int next_signal(struct sigpending *pending, sigset_t *mask)
209 unsigned long i, *s, *m, x;
212 s = pending->signal.sig;
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
227 switch (_NSIG_WORDS) {
229 for (i = 1; i < _NSIG_WORDS; ++i) {
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
242 sig = ffz(~x) + _NSIG_BPW + 1;
253 static inline void print_dropped_signal(int sig)
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 if (!print_fatal_signals)
260 if (!__ratelimit(&ratelimit_state))
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
268 * task_set_jobctl_pending - set jobctl pending bits
270 * @mask: pending bits to set
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
279 * Must be called with @task->sighand->siglock held.
282 * %true if @mask is set, %false if made noop because @task was dying.
284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 task->jobctl |= mask;
301 * task_clear_jobctl_trapping - clear jobctl trapping bit
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
310 * Must be called with @task->sighand->siglock held.
312 void task_clear_jobctl_trapping(struct task_struct *task)
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb(); /* advised by wake_up_bit() */
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
322 * task_clear_jobctl_pending - clear jobctl pending bits
324 * @mask: pending bits to clear
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
334 * Must be called with @task->sighand->siglock held.
336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 task->jobctl &= ~mask;
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354 * Group stop states are cleared and the group stop count is consumed if
355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356 * stop, the appropriate `SIGNAL_*` flags are set.
359 * Must be called with @task->sighand->siglock held.
362 * %true if group stop completion should be notified to the parent, %false
365 static bool task_participate_group_stop(struct task_struct *task)
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
391 void task_join_group_stop(struct task_struct *task)
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
402 /* Have the new thread join an on-going signal group stop */
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
437 print_dropped_signal(sig);
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 kmem_cache_free(sigqueue_cachep, q);
461 void flush_sigqueue(struct sigpending *queue)
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
474 * Flush all pending signals for this kthread.
476 void flush_signals(struct task_struct *t)
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 EXPORT_SYMBOL(flush_signals);
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
494 signal = pending->signal;
495 sigemptyset(&retain);
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
509 sigorsets(&pending->signal, &signal, &retain);
512 void flush_itimer_signals(void)
514 struct task_struct *tsk = current;
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 void ignore_signals(struct task_struct *t)
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
535 * Flush all handlers for a task.
539 flush_signal_handlers(struct task_struct *t, int force_default)
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
550 sigemptyset(&ka->sa.sa_mask);
555 bool unhandled_signal(struct task_struct *tsk, int sig)
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
561 if (handler != SIG_IGN && handler != SIG_DFL)
564 /* If dying, we handle all new signals by ignoring them */
565 if (fatal_signal_pending(tsk))
568 /* if ptraced, let the tracer determine */
572 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
575 struct sigqueue *q, *first = NULL;
578 * Collect the siginfo appropriate to this signal. Check if
579 * there is another siginfo for the same signal.
581 list_for_each_entry(q, &list->list, list) {
582 if (q->info.si_signo == sig) {
589 sigdelset(&list->signal, sig);
593 list_del_init(&first->list);
594 copy_siginfo(info, &first->info);
597 (first->flags & SIGQUEUE_PREALLOC) &&
598 (info->si_code == SI_TIMER) &&
599 (info->si_sys_private);
601 __sigqueue_free(first);
604 * Ok, it wasn't in the queue. This must be
605 * a fast-pathed signal or we must have been
606 * out of queue space. So zero out the info.
609 info->si_signo = sig;
611 info->si_code = SI_USER;
617 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
618 kernel_siginfo_t *info, bool *resched_timer)
620 int sig = next_signal(pending, mask);
623 collect_signal(sig, pending, info, resched_timer);
628 * Dequeue a signal and return the element to the caller, which is
629 * expected to free it.
631 * All callers have to hold the siglock.
633 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
634 kernel_siginfo_t *info, enum pid_type *type)
636 bool resched_timer = false;
639 /* We only dequeue private signals from ourselves, we don't let
640 * signalfd steal them
643 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
645 *type = PIDTYPE_TGID;
646 signr = __dequeue_signal(&tsk->signal->shared_pending,
647 mask, info, &resched_timer);
648 #ifdef CONFIG_POSIX_TIMERS
652 * itimers are process shared and we restart periodic
653 * itimers in the signal delivery path to prevent DoS
654 * attacks in the high resolution timer case. This is
655 * compliant with the old way of self-restarting
656 * itimers, as the SIGALRM is a legacy signal and only
657 * queued once. Changing the restart behaviour to
658 * restart the timer in the signal dequeue path is
659 * reducing the timer noise on heavy loaded !highres
662 if (unlikely(signr == SIGALRM)) {
663 struct hrtimer *tmr = &tsk->signal->real_timer;
665 if (!hrtimer_is_queued(tmr) &&
666 tsk->signal->it_real_incr != 0) {
667 hrtimer_forward(tmr, tmr->base->get_time(),
668 tsk->signal->it_real_incr);
669 hrtimer_restart(tmr);
679 if (unlikely(sig_kernel_stop(signr))) {
681 * Set a marker that we have dequeued a stop signal. Our
682 * caller might release the siglock and then the pending
683 * stop signal it is about to process is no longer in the
684 * pending bitmasks, but must still be cleared by a SIGCONT
685 * (and overruled by a SIGKILL). So those cases clear this
686 * shared flag after we've set it. Note that this flag may
687 * remain set after the signal we return is ignored or
688 * handled. That doesn't matter because its only purpose
689 * is to alert stop-signal processing code when another
690 * processor has come along and cleared the flag.
692 current->jobctl |= JOBCTL_STOP_DEQUEUED;
694 #ifdef CONFIG_POSIX_TIMERS
697 * Release the siglock to ensure proper locking order
698 * of timer locks outside of siglocks. Note, we leave
699 * irqs disabled here, since the posix-timers code is
700 * about to disable them again anyway.
702 spin_unlock(&tsk->sighand->siglock);
703 posixtimer_rearm(info);
704 spin_lock(&tsk->sighand->siglock);
706 /* Don't expose the si_sys_private value to userspace */
707 info->si_sys_private = 0;
712 EXPORT_SYMBOL_GPL(dequeue_signal);
714 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
716 struct task_struct *tsk = current;
717 struct sigpending *pending = &tsk->pending;
718 struct sigqueue *q, *sync = NULL;
721 * Might a synchronous signal be in the queue?
723 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
727 * Return the first synchronous signal in the queue.
729 list_for_each_entry(q, &pending->list, list) {
730 /* Synchronous signals have a positive si_code */
731 if ((q->info.si_code > SI_USER) &&
732 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
740 * Check if there is another siginfo for the same signal.
742 list_for_each_entry_continue(q, &pending->list, list) {
743 if (q->info.si_signo == sync->info.si_signo)
747 sigdelset(&pending->signal, sync->info.si_signo);
750 list_del_init(&sync->list);
751 copy_siginfo(info, &sync->info);
752 __sigqueue_free(sync);
753 return info->si_signo;
757 * Tell a process that it has a new active signal..
759 * NOTE! we rely on the previous spin_lock to
760 * lock interrupts for us! We can only be called with
761 * "siglock" held, and the local interrupt must
762 * have been disabled when that got acquired!
764 * No need to set need_resched since signal event passing
765 * goes through ->blocked
767 void signal_wake_up_state(struct task_struct *t, unsigned int state)
769 lockdep_assert_held(&t->sighand->siglock);
771 set_tsk_thread_flag(t, TIF_SIGPENDING);
774 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
775 * case. We don't check t->state here because there is a race with it
776 * executing another processor and just now entering stopped state.
777 * By using wake_up_state, we ensure the process will wake up and
778 * handle its death signal.
780 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
785 * Remove signals in mask from the pending set and queue.
786 * Returns 1 if any signals were found.
788 * All callers must be holding the siglock.
790 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
792 struct sigqueue *q, *n;
795 sigandsets(&m, mask, &s->signal);
796 if (sigisemptyset(&m))
799 sigandnsets(&s->signal, &s->signal, mask);
800 list_for_each_entry_safe(q, n, &s->list, list) {
801 if (sigismember(mask, q->info.si_signo)) {
802 list_del_init(&q->list);
808 static inline int is_si_special(const struct kernel_siginfo *info)
810 return info <= SEND_SIG_PRIV;
813 static inline bool si_fromuser(const struct kernel_siginfo *info)
815 return info == SEND_SIG_NOINFO ||
816 (!is_si_special(info) && SI_FROMUSER(info));
820 * called with RCU read lock from check_kill_permission()
822 static bool kill_ok_by_cred(struct task_struct *t)
824 const struct cred *cred = current_cred();
825 const struct cred *tcred = __task_cred(t);
827 return uid_eq(cred->euid, tcred->suid) ||
828 uid_eq(cred->euid, tcred->uid) ||
829 uid_eq(cred->uid, tcred->suid) ||
830 uid_eq(cred->uid, tcred->uid) ||
831 ns_capable(tcred->user_ns, CAP_KILL);
835 * Bad permissions for sending the signal
836 * - the caller must hold the RCU read lock
838 static int check_kill_permission(int sig, struct kernel_siginfo *info,
839 struct task_struct *t)
844 if (!valid_signal(sig))
847 if (!si_fromuser(info))
850 error = audit_signal_info(sig, t); /* Let audit system see the signal */
854 if (!same_thread_group(current, t) &&
855 !kill_ok_by_cred(t)) {
858 sid = task_session(t);
860 * We don't return the error if sid == NULL. The
861 * task was unhashed, the caller must notice this.
863 if (!sid || sid == task_session(current))
871 return security_task_kill(t, info, sig, NULL);
875 * ptrace_trap_notify - schedule trap to notify ptracer
876 * @t: tracee wanting to notify tracer
878 * This function schedules sticky ptrace trap which is cleared on the next
879 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
882 * If @t is running, STOP trap will be taken. If trapped for STOP and
883 * ptracer is listening for events, tracee is woken up so that it can
884 * re-trap for the new event. If trapped otherwise, STOP trap will be
885 * eventually taken without returning to userland after the existing traps
886 * are finished by PTRACE_CONT.
889 * Must be called with @task->sighand->siglock held.
891 static void ptrace_trap_notify(struct task_struct *t)
893 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
894 lockdep_assert_held(&t->sighand->siglock);
896 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
897 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
901 * Handle magic process-wide effects of stop/continue signals. Unlike
902 * the signal actions, these happen immediately at signal-generation
903 * time regardless of blocking, ignoring, or handling. This does the
904 * actual continuing for SIGCONT, but not the actual stopping for stop
905 * signals. The process stop is done as a signal action for SIG_DFL.
907 * Returns true if the signal should be actually delivered, otherwise
908 * it should be dropped.
910 static bool prepare_signal(int sig, struct task_struct *p, bool force)
912 struct signal_struct *signal = p->signal;
913 struct task_struct *t;
916 if (signal->flags & SIGNAL_GROUP_EXIT) {
917 if (signal->core_state)
918 return sig == SIGKILL;
920 * The process is in the middle of dying, drop the signal.
923 } else if (sig_kernel_stop(sig)) {
925 * This is a stop signal. Remove SIGCONT from all queues.
927 siginitset(&flush, sigmask(SIGCONT));
928 flush_sigqueue_mask(&flush, &signal->shared_pending);
929 for_each_thread(p, t)
930 flush_sigqueue_mask(&flush, &t->pending);
931 } else if (sig == SIGCONT) {
934 * Remove all stop signals from all queues, wake all threads.
936 siginitset(&flush, SIG_KERNEL_STOP_MASK);
937 flush_sigqueue_mask(&flush, &signal->shared_pending);
938 for_each_thread(p, t) {
939 flush_sigqueue_mask(&flush, &t->pending);
940 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
941 if (likely(!(t->ptrace & PT_SEIZED))) {
942 t->jobctl &= ~JOBCTL_STOPPED;
943 wake_up_state(t, __TASK_STOPPED);
945 ptrace_trap_notify(t);
949 * Notify the parent with CLD_CONTINUED if we were stopped.
951 * If we were in the middle of a group stop, we pretend it
952 * was already finished, and then continued. Since SIGCHLD
953 * doesn't queue we report only CLD_STOPPED, as if the next
954 * CLD_CONTINUED was dropped.
957 if (signal->flags & SIGNAL_STOP_STOPPED)
958 why |= SIGNAL_CLD_CONTINUED;
959 else if (signal->group_stop_count)
960 why |= SIGNAL_CLD_STOPPED;
964 * The first thread which returns from do_signal_stop()
965 * will take ->siglock, notice SIGNAL_CLD_MASK, and
966 * notify its parent. See get_signal().
968 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
969 signal->group_stop_count = 0;
970 signal->group_exit_code = 0;
974 return !sig_ignored(p, sig, force);
978 * Test if P wants to take SIG. After we've checked all threads with this,
979 * it's equivalent to finding no threads not blocking SIG. Any threads not
980 * blocking SIG were ruled out because they are not running and already
981 * have pending signals. Such threads will dequeue from the shared queue
982 * as soon as they're available, so putting the signal on the shared queue
983 * will be equivalent to sending it to one such thread.
985 static inline bool wants_signal(int sig, struct task_struct *p)
987 if (sigismember(&p->blocked, sig))
990 if (p->flags & PF_EXITING)
996 if (task_is_stopped_or_traced(p))
999 return task_curr(p) || !task_sigpending(p);
1002 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1004 struct signal_struct *signal = p->signal;
1005 struct task_struct *t;
1008 * Now find a thread we can wake up to take the signal off the queue.
1010 * If the main thread wants the signal, it gets first crack.
1011 * Probably the least surprising to the average bear.
1013 if (wants_signal(sig, p))
1015 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1017 * There is just one thread and it does not need to be woken.
1018 * It will dequeue unblocked signals before it runs again.
1023 * Otherwise try to find a suitable thread.
1025 t = signal->curr_target;
1026 while (!wants_signal(sig, t)) {
1028 if (t == signal->curr_target)
1030 * No thread needs to be woken.
1031 * Any eligible threads will see
1032 * the signal in the queue soon.
1036 signal->curr_target = t;
1040 * Found a killable thread. If the signal will be fatal,
1041 * then start taking the whole group down immediately.
1043 if (sig_fatal(p, sig) &&
1044 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1045 !sigismember(&t->real_blocked, sig) &&
1046 (sig == SIGKILL || !p->ptrace)) {
1048 * This signal will be fatal to the whole group.
1050 if (!sig_kernel_coredump(sig)) {
1052 * Start a group exit and wake everybody up.
1053 * This way we don't have other threads
1054 * running and doing things after a slower
1055 * thread has the fatal signal pending.
1057 signal->flags = SIGNAL_GROUP_EXIT;
1058 signal->group_exit_code = sig;
1059 signal->group_stop_count = 0;
1062 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1063 sigaddset(&t->pending.signal, SIGKILL);
1064 signal_wake_up(t, 1);
1065 } while_each_thread(p, t);
1071 * The signal is already in the shared-pending queue.
1072 * Tell the chosen thread to wake up and dequeue it.
1074 signal_wake_up(t, sig == SIGKILL);
1078 static inline bool legacy_queue(struct sigpending *signals, int sig)
1080 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1083 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1084 struct task_struct *t, enum pid_type type, bool force)
1086 struct sigpending *pending;
1088 int override_rlimit;
1089 int ret = 0, result;
1091 lockdep_assert_held(&t->sighand->siglock);
1093 result = TRACE_SIGNAL_IGNORED;
1094 if (!prepare_signal(sig, t, force))
1097 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1099 * Short-circuit ignored signals and support queuing
1100 * exactly one non-rt signal, so that we can get more
1101 * detailed information about the cause of the signal.
1103 result = TRACE_SIGNAL_ALREADY_PENDING;
1104 if (legacy_queue(pending, sig))
1107 result = TRACE_SIGNAL_DELIVERED;
1109 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1111 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1115 * Real-time signals must be queued if sent by sigqueue, or
1116 * some other real-time mechanism. It is implementation
1117 * defined whether kill() does so. We attempt to do so, on
1118 * the principle of least surprise, but since kill is not
1119 * allowed to fail with EAGAIN when low on memory we just
1120 * make sure at least one signal gets delivered and don't
1121 * pass on the info struct.
1124 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1126 override_rlimit = 0;
1128 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1131 list_add_tail(&q->list, &pending->list);
1132 switch ((unsigned long) info) {
1133 case (unsigned long) SEND_SIG_NOINFO:
1134 clear_siginfo(&q->info);
1135 q->info.si_signo = sig;
1136 q->info.si_errno = 0;
1137 q->info.si_code = SI_USER;
1138 q->info.si_pid = task_tgid_nr_ns(current,
1139 task_active_pid_ns(t));
1142 from_kuid_munged(task_cred_xxx(t, user_ns),
1146 case (unsigned long) SEND_SIG_PRIV:
1147 clear_siginfo(&q->info);
1148 q->info.si_signo = sig;
1149 q->info.si_errno = 0;
1150 q->info.si_code = SI_KERNEL;
1155 copy_siginfo(&q->info, info);
1158 } else if (!is_si_special(info) &&
1159 sig >= SIGRTMIN && info->si_code != SI_USER) {
1161 * Queue overflow, abort. We may abort if the
1162 * signal was rt and sent by user using something
1163 * other than kill().
1165 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1170 * This is a silent loss of information. We still
1171 * send the signal, but the *info bits are lost.
1173 result = TRACE_SIGNAL_LOSE_INFO;
1177 signalfd_notify(t, sig);
1178 sigaddset(&pending->signal, sig);
1180 /* Let multiprocess signals appear after on-going forks */
1181 if (type > PIDTYPE_TGID) {
1182 struct multiprocess_signals *delayed;
1183 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1184 sigset_t *signal = &delayed->signal;
1185 /* Can't queue both a stop and a continue signal */
1187 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1188 else if (sig_kernel_stop(sig))
1189 sigdelset(signal, SIGCONT);
1190 sigaddset(signal, sig);
1194 complete_signal(sig, t, type);
1196 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1200 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1203 switch (siginfo_layout(info->si_signo, info->si_code)) {
1212 case SIL_FAULT_TRAPNO:
1213 case SIL_FAULT_MCEERR:
1214 case SIL_FAULT_BNDERR:
1215 case SIL_FAULT_PKUERR:
1216 case SIL_FAULT_PERF_EVENT:
1224 int send_signal_locked(int sig, struct kernel_siginfo *info,
1225 struct task_struct *t, enum pid_type type)
1227 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1230 if (info == SEND_SIG_NOINFO) {
1231 /* Force if sent from an ancestor pid namespace */
1232 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1233 } else if (info == SEND_SIG_PRIV) {
1234 /* Don't ignore kernel generated signals */
1236 } else if (has_si_pid_and_uid(info)) {
1237 /* SIGKILL and SIGSTOP is special or has ids */
1238 struct user_namespace *t_user_ns;
1241 t_user_ns = task_cred_xxx(t, user_ns);
1242 if (current_user_ns() != t_user_ns) {
1243 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1244 info->si_uid = from_kuid_munged(t_user_ns, uid);
1248 /* A kernel generated signal? */
1249 force = (info->si_code == SI_KERNEL);
1251 /* From an ancestor pid namespace? */
1252 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1257 return __send_signal_locked(sig, info, t, type, force);
1260 static void print_fatal_signal(int signr)
1262 struct pt_regs *regs = signal_pt_regs();
1263 pr_info("potentially unexpected fatal signal %d.\n", signr);
1265 #if defined(__i386__) && !defined(__arch_um__)
1266 pr_info("code at %08lx: ", regs->ip);
1269 for (i = 0; i < 16; i++) {
1272 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1274 pr_cont("%02x ", insn);
1284 static int __init setup_print_fatal_signals(char *str)
1286 get_option (&str, &print_fatal_signals);
1291 __setup("print-fatal-signals=", setup_print_fatal_signals);
1293 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1296 unsigned long flags;
1299 if (lock_task_sighand(p, &flags)) {
1300 ret = send_signal_locked(sig, info, p, type);
1301 unlock_task_sighand(p, &flags);
1308 HANDLER_CURRENT, /* If reachable use the current handler */
1309 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1310 HANDLER_EXIT, /* Only visible as the process exit code */
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1325 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1326 enum sig_handler handler)
1328 unsigned long int flags;
1329 int ret, blocked, ignored;
1330 struct k_sigaction *action;
1331 int sig = info->si_signo;
1333 spin_lock_irqsave(&t->sighand->siglock, flags);
1334 action = &t->sighand->action[sig-1];
1335 ignored = action->sa.sa_handler == SIG_IGN;
1336 blocked = sigismember(&t->blocked, sig);
1337 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1338 action->sa.sa_handler = SIG_DFL;
1339 if (handler == HANDLER_EXIT)
1340 action->sa.sa_flags |= SA_IMMUTABLE;
1342 sigdelset(&t->blocked, sig);
1343 recalc_sigpending_and_wake(t);
1347 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1348 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1350 if (action->sa.sa_handler == SIG_DFL &&
1351 (!t->ptrace || (handler == HANDLER_EXIT)))
1352 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1353 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1354 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1359 int force_sig_info(struct kernel_siginfo *info)
1361 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1365 * Nuke all other threads in the group.
1367 int zap_other_threads(struct task_struct *p)
1369 struct task_struct *t = p;
1372 p->signal->group_stop_count = 0;
1374 while_each_thread(p, t) {
1375 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1378 /* Don't bother with already dead threads */
1381 sigaddset(&t->pending.signal, SIGKILL);
1382 signal_wake_up(t, 1);
1388 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1389 unsigned long *flags)
1391 struct sighand_struct *sighand;
1395 sighand = rcu_dereference(tsk->sighand);
1396 if (unlikely(sighand == NULL))
1400 * This sighand can be already freed and even reused, but
1401 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1402 * initializes ->siglock: this slab can't go away, it has
1403 * the same object type, ->siglock can't be reinitialized.
1405 * We need to ensure that tsk->sighand is still the same
1406 * after we take the lock, we can race with de_thread() or
1407 * __exit_signal(). In the latter case the next iteration
1408 * must see ->sighand == NULL.
1410 spin_lock_irqsave(&sighand->siglock, *flags);
1411 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1413 spin_unlock_irqrestore(&sighand->siglock, *flags);
1420 #ifdef CONFIG_LOCKDEP
1421 void lockdep_assert_task_sighand_held(struct task_struct *task)
1423 struct sighand_struct *sighand;
1426 sighand = rcu_dereference(task->sighand);
1428 lockdep_assert_held(&sighand->siglock);
1436 * send signal info to all the members of a group
1438 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1439 struct task_struct *p, enum pid_type type)
1444 ret = check_kill_permission(sig, info, p);
1448 ret = do_send_sig_info(sig, info, p, type);
1454 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1455 * control characters do (^C, ^Z etc)
1456 * - the caller must hold at least a readlock on tasklist_lock
1458 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1460 struct task_struct *p = NULL;
1461 int retval, success;
1465 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1466 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1469 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1470 return success ? 0 : retval;
1473 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1476 struct task_struct *p;
1480 p = pid_task(pid, PIDTYPE_PID);
1482 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1484 if (likely(!p || error != -ESRCH))
1488 * The task was unhashed in between, try again. If it
1489 * is dead, pid_task() will return NULL, if we race with
1490 * de_thread() it will find the new leader.
1495 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1499 error = kill_pid_info(sig, info, find_vpid(pid));
1504 static inline bool kill_as_cred_perm(const struct cred *cred,
1505 struct task_struct *target)
1507 const struct cred *pcred = __task_cred(target);
1509 return uid_eq(cred->euid, pcred->suid) ||
1510 uid_eq(cred->euid, pcred->uid) ||
1511 uid_eq(cred->uid, pcred->suid) ||
1512 uid_eq(cred->uid, pcred->uid);
1516 * The usb asyncio usage of siginfo is wrong. The glibc support
1517 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1518 * AKA after the generic fields:
1519 * kernel_pid_t si_pid;
1520 * kernel_uid32_t si_uid;
1521 * sigval_t si_value;
1523 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1524 * after the generic fields is:
1525 * void __user *si_addr;
1527 * This is a practical problem when there is a 64bit big endian kernel
1528 * and a 32bit userspace. As the 32bit address will encoded in the low
1529 * 32bits of the pointer. Those low 32bits will be stored at higher
1530 * address than appear in a 32 bit pointer. So userspace will not
1531 * see the address it was expecting for it's completions.
1533 * There is nothing in the encoding that can allow
1534 * copy_siginfo_to_user32 to detect this confusion of formats, so
1535 * handle this by requiring the caller of kill_pid_usb_asyncio to
1536 * notice when this situration takes place and to store the 32bit
1537 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1540 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1541 struct pid *pid, const struct cred *cred)
1543 struct kernel_siginfo info;
1544 struct task_struct *p;
1545 unsigned long flags;
1548 if (!valid_signal(sig))
1551 clear_siginfo(&info);
1552 info.si_signo = sig;
1553 info.si_errno = errno;
1554 info.si_code = SI_ASYNCIO;
1555 *((sigval_t *)&info.si_pid) = addr;
1558 p = pid_task(pid, PIDTYPE_PID);
1563 if (!kill_as_cred_perm(cred, p)) {
1567 ret = security_task_kill(p, &info, sig, cred);
1572 if (lock_task_sighand(p, &flags)) {
1573 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1574 unlock_task_sighand(p, &flags);
1582 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1585 * kill_something_info() interprets pid in interesting ways just like kill(2).
1587 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1588 * is probably wrong. Should make it like BSD or SYSV.
1591 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1596 return kill_proc_info(sig, info, pid);
1598 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1602 read_lock(&tasklist_lock);
1604 ret = __kill_pgrp_info(sig, info,
1605 pid ? find_vpid(-pid) : task_pgrp(current));
1607 int retval = 0, count = 0;
1608 struct task_struct * p;
1610 for_each_process(p) {
1611 if (task_pid_vnr(p) > 1 &&
1612 !same_thread_group(p, current)) {
1613 int err = group_send_sig_info(sig, info, p,
1620 ret = count ? retval : -ESRCH;
1622 read_unlock(&tasklist_lock);
1628 * These are for backward compatibility with the rest of the kernel source.
1631 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1634 * Make sure legacy kernel users don't send in bad values
1635 * (normal paths check this in check_kill_permission).
1637 if (!valid_signal(sig))
1640 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1642 EXPORT_SYMBOL(send_sig_info);
1644 #define __si_special(priv) \
1645 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1648 send_sig(int sig, struct task_struct *p, int priv)
1650 return send_sig_info(sig, __si_special(priv), p);
1652 EXPORT_SYMBOL(send_sig);
1654 void force_sig(int sig)
1656 struct kernel_siginfo info;
1658 clear_siginfo(&info);
1659 info.si_signo = sig;
1661 info.si_code = SI_KERNEL;
1664 force_sig_info(&info);
1666 EXPORT_SYMBOL(force_sig);
1668 void force_fatal_sig(int sig)
1670 struct kernel_siginfo info;
1672 clear_siginfo(&info);
1673 info.si_signo = sig;
1675 info.si_code = SI_KERNEL;
1678 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1681 void force_exit_sig(int sig)
1683 struct kernel_siginfo info;
1685 clear_siginfo(&info);
1686 info.si_signo = sig;
1688 info.si_code = SI_KERNEL;
1691 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1695 * When things go south during signal handling, we
1696 * will force a SIGSEGV. And if the signal that caused
1697 * the problem was already a SIGSEGV, we'll want to
1698 * make sure we don't even try to deliver the signal..
1700 void force_sigsegv(int sig)
1703 force_fatal_sig(SIGSEGV);
1708 int force_sig_fault_to_task(int sig, int code, void __user *addr
1709 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1710 , struct task_struct *t)
1712 struct kernel_siginfo info;
1714 clear_siginfo(&info);
1715 info.si_signo = sig;
1717 info.si_code = code;
1718 info.si_addr = addr;
1721 info.si_flags = flags;
1724 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1727 int force_sig_fault(int sig, int code, void __user *addr
1728 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1730 return force_sig_fault_to_task(sig, code, addr
1731 ___ARCH_SI_IA64(imm, flags, isr), current);
1734 int send_sig_fault(int sig, int code, void __user *addr
1735 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1736 , struct task_struct *t)
1738 struct kernel_siginfo info;
1740 clear_siginfo(&info);
1741 info.si_signo = sig;
1743 info.si_code = code;
1744 info.si_addr = addr;
1747 info.si_flags = flags;
1750 return send_sig_info(info.si_signo, &info, t);
1753 int force_sig_mceerr(int code, void __user *addr, short lsb)
1755 struct kernel_siginfo info;
1757 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1758 clear_siginfo(&info);
1759 info.si_signo = SIGBUS;
1761 info.si_code = code;
1762 info.si_addr = addr;
1763 info.si_addr_lsb = lsb;
1764 return force_sig_info(&info);
1767 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1769 struct kernel_siginfo info;
1771 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1772 clear_siginfo(&info);
1773 info.si_signo = SIGBUS;
1775 info.si_code = code;
1776 info.si_addr = addr;
1777 info.si_addr_lsb = lsb;
1778 return send_sig_info(info.si_signo, &info, t);
1780 EXPORT_SYMBOL(send_sig_mceerr);
1782 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1784 struct kernel_siginfo info;
1786 clear_siginfo(&info);
1787 info.si_signo = SIGSEGV;
1789 info.si_code = SEGV_BNDERR;
1790 info.si_addr = addr;
1791 info.si_lower = lower;
1792 info.si_upper = upper;
1793 return force_sig_info(&info);
1797 int force_sig_pkuerr(void __user *addr, u32 pkey)
1799 struct kernel_siginfo info;
1801 clear_siginfo(&info);
1802 info.si_signo = SIGSEGV;
1804 info.si_code = SEGV_PKUERR;
1805 info.si_addr = addr;
1806 info.si_pkey = pkey;
1807 return force_sig_info(&info);
1811 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1813 struct kernel_siginfo info;
1815 clear_siginfo(&info);
1816 info.si_signo = SIGTRAP;
1818 info.si_code = TRAP_PERF;
1819 info.si_addr = addr;
1820 info.si_perf_data = sig_data;
1821 info.si_perf_type = type;
1824 * Signals generated by perf events should not terminate the whole
1825 * process if SIGTRAP is blocked, however, delivering the signal
1826 * asynchronously is better than not delivering at all. But tell user
1827 * space if the signal was asynchronous, so it can clearly be
1828 * distinguished from normal synchronous ones.
1830 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1831 TRAP_PERF_FLAG_ASYNC :
1834 return send_sig_info(info.si_signo, &info, current);
1838 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1839 * @syscall: syscall number to send to userland
1840 * @reason: filter-supplied reason code to send to userland (via si_errno)
1841 * @force_coredump: true to trigger a coredump
1843 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1845 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1847 struct kernel_siginfo info;
1849 clear_siginfo(&info);
1850 info.si_signo = SIGSYS;
1851 info.si_code = SYS_SECCOMP;
1852 info.si_call_addr = (void __user *)KSTK_EIP(current);
1853 info.si_errno = reason;
1854 info.si_arch = syscall_get_arch(current);
1855 info.si_syscall = syscall;
1856 return force_sig_info_to_task(&info, current,
1857 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1860 /* For the crazy architectures that include trap information in
1861 * the errno field, instead of an actual errno value.
1863 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1865 struct kernel_siginfo info;
1867 clear_siginfo(&info);
1868 info.si_signo = SIGTRAP;
1869 info.si_errno = errno;
1870 info.si_code = TRAP_HWBKPT;
1871 info.si_addr = addr;
1872 return force_sig_info(&info);
1875 /* For the rare architectures that include trap information using
1878 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1880 struct kernel_siginfo info;
1882 clear_siginfo(&info);
1883 info.si_signo = sig;
1885 info.si_code = code;
1886 info.si_addr = addr;
1887 info.si_trapno = trapno;
1888 return force_sig_info(&info);
1891 /* For the rare architectures that include trap information using
1894 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1895 struct task_struct *t)
1897 struct kernel_siginfo info;
1899 clear_siginfo(&info);
1900 info.si_signo = sig;
1902 info.si_code = code;
1903 info.si_addr = addr;
1904 info.si_trapno = trapno;
1905 return send_sig_info(info.si_signo, &info, t);
1908 int kill_pgrp(struct pid *pid, int sig, int priv)
1912 read_lock(&tasklist_lock);
1913 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1914 read_unlock(&tasklist_lock);
1918 EXPORT_SYMBOL(kill_pgrp);
1920 int kill_pid(struct pid *pid, int sig, int priv)
1922 return kill_pid_info(sig, __si_special(priv), pid);
1924 EXPORT_SYMBOL(kill_pid);
1927 * These functions support sending signals using preallocated sigqueue
1928 * structures. This is needed "because realtime applications cannot
1929 * afford to lose notifications of asynchronous events, like timer
1930 * expirations or I/O completions". In the case of POSIX Timers
1931 * we allocate the sigqueue structure from the timer_create. If this
1932 * allocation fails we are able to report the failure to the application
1933 * with an EAGAIN error.
1935 struct sigqueue *sigqueue_alloc(void)
1937 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1940 void sigqueue_free(struct sigqueue *q)
1942 unsigned long flags;
1943 spinlock_t *lock = ¤t->sighand->siglock;
1945 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1947 * We must hold ->siglock while testing q->list
1948 * to serialize with collect_signal() or with
1949 * __exit_signal()->flush_sigqueue().
1951 spin_lock_irqsave(lock, flags);
1952 q->flags &= ~SIGQUEUE_PREALLOC;
1954 * If it is queued it will be freed when dequeued,
1955 * like the "regular" sigqueue.
1957 if (!list_empty(&q->list))
1959 spin_unlock_irqrestore(lock, flags);
1965 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1967 int sig = q->info.si_signo;
1968 struct sigpending *pending;
1969 struct task_struct *t;
1970 unsigned long flags;
1973 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1977 t = pid_task(pid, type);
1978 if (!t || !likely(lock_task_sighand(t, &flags)))
1981 ret = 1; /* the signal is ignored */
1982 result = TRACE_SIGNAL_IGNORED;
1983 if (!prepare_signal(sig, t, false))
1987 if (unlikely(!list_empty(&q->list))) {
1989 * If an SI_TIMER entry is already queue just increment
1990 * the overrun count.
1992 BUG_ON(q->info.si_code != SI_TIMER);
1993 q->info.si_overrun++;
1994 result = TRACE_SIGNAL_ALREADY_PENDING;
1997 q->info.si_overrun = 0;
1999 signalfd_notify(t, sig);
2000 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2001 list_add_tail(&q->list, &pending->list);
2002 sigaddset(&pending->signal, sig);
2003 complete_signal(sig, t, type);
2004 result = TRACE_SIGNAL_DELIVERED;
2006 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2007 unlock_task_sighand(t, &flags);
2013 static void do_notify_pidfd(struct task_struct *task)
2017 WARN_ON(task->exit_state == 0);
2018 pid = task_pid(task);
2019 wake_up_all(&pid->wait_pidfd);
2023 * Let a parent know about the death of a child.
2024 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2026 * Returns true if our parent ignored us and so we've switched to
2029 bool do_notify_parent(struct task_struct *tsk, int sig)
2031 struct kernel_siginfo info;
2032 unsigned long flags;
2033 struct sighand_struct *psig;
2034 bool autoreap = false;
2037 WARN_ON_ONCE(sig == -1);
2039 /* do_notify_parent_cldstop should have been called instead. */
2040 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2042 WARN_ON_ONCE(!tsk->ptrace &&
2043 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2045 /* Wake up all pidfd waiters */
2046 do_notify_pidfd(tsk);
2048 if (sig != SIGCHLD) {
2050 * This is only possible if parent == real_parent.
2051 * Check if it has changed security domain.
2053 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2057 clear_siginfo(&info);
2058 info.si_signo = sig;
2061 * We are under tasklist_lock here so our parent is tied to
2062 * us and cannot change.
2064 * task_active_pid_ns will always return the same pid namespace
2065 * until a task passes through release_task.
2067 * write_lock() currently calls preempt_disable() which is the
2068 * same as rcu_read_lock(), but according to Oleg, this is not
2069 * correct to rely on this
2072 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2073 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2077 task_cputime(tsk, &utime, &stime);
2078 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2079 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2081 info.si_status = tsk->exit_code & 0x7f;
2082 if (tsk->exit_code & 0x80)
2083 info.si_code = CLD_DUMPED;
2084 else if (tsk->exit_code & 0x7f)
2085 info.si_code = CLD_KILLED;
2087 info.si_code = CLD_EXITED;
2088 info.si_status = tsk->exit_code >> 8;
2091 psig = tsk->parent->sighand;
2092 spin_lock_irqsave(&psig->siglock, flags);
2093 if (!tsk->ptrace && sig == SIGCHLD &&
2094 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2095 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2097 * We are exiting and our parent doesn't care. POSIX.1
2098 * defines special semantics for setting SIGCHLD to SIG_IGN
2099 * or setting the SA_NOCLDWAIT flag: we should be reaped
2100 * automatically and not left for our parent's wait4 call.
2101 * Rather than having the parent do it as a magic kind of
2102 * signal handler, we just set this to tell do_exit that we
2103 * can be cleaned up without becoming a zombie. Note that
2104 * we still call __wake_up_parent in this case, because a
2105 * blocked sys_wait4 might now return -ECHILD.
2107 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2108 * is implementation-defined: we do (if you don't want
2109 * it, just use SIG_IGN instead).
2112 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2116 * Send with __send_signal as si_pid and si_uid are in the
2117 * parent's namespaces.
2119 if (valid_signal(sig) && sig)
2120 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2121 __wake_up_parent(tsk, tsk->parent);
2122 spin_unlock_irqrestore(&psig->siglock, flags);
2128 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2129 * @tsk: task reporting the state change
2130 * @for_ptracer: the notification is for ptracer
2131 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2133 * Notify @tsk's parent that the stopped/continued state has changed. If
2134 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2135 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2138 * Must be called with tasklist_lock at least read locked.
2140 static void do_notify_parent_cldstop(struct task_struct *tsk,
2141 bool for_ptracer, int why)
2143 struct kernel_siginfo info;
2144 unsigned long flags;
2145 struct task_struct *parent;
2146 struct sighand_struct *sighand;
2150 parent = tsk->parent;
2152 tsk = tsk->group_leader;
2153 parent = tsk->real_parent;
2156 clear_siginfo(&info);
2157 info.si_signo = SIGCHLD;
2160 * see comment in do_notify_parent() about the following 4 lines
2163 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2164 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2167 task_cputime(tsk, &utime, &stime);
2168 info.si_utime = nsec_to_clock_t(utime);
2169 info.si_stime = nsec_to_clock_t(stime);
2174 info.si_status = SIGCONT;
2177 info.si_status = tsk->signal->group_exit_code & 0x7f;
2180 info.si_status = tsk->exit_code & 0x7f;
2186 sighand = parent->sighand;
2187 spin_lock_irqsave(&sighand->siglock, flags);
2188 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2189 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2190 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2192 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2194 __wake_up_parent(tsk, parent);
2195 spin_unlock_irqrestore(&sighand->siglock, flags);
2199 * This must be called with current->sighand->siglock held.
2201 * This should be the path for all ptrace stops.
2202 * We always set current->last_siginfo while stopped here.
2203 * That makes it a way to test a stopped process for
2204 * being ptrace-stopped vs being job-control-stopped.
2206 * Returns the signal the ptracer requested the code resume
2207 * with. If the code did not stop because the tracer is gone,
2208 * the stop signal remains unchanged unless clear_code.
2210 static int ptrace_stop(int exit_code, int why, unsigned long message,
2211 kernel_siginfo_t *info)
2212 __releases(¤t->sighand->siglock)
2213 __acquires(¤t->sighand->siglock)
2215 bool gstop_done = false;
2217 if (arch_ptrace_stop_needed()) {
2219 * The arch code has something special to do before a
2220 * ptrace stop. This is allowed to block, e.g. for faults
2221 * on user stack pages. We can't keep the siglock while
2222 * calling arch_ptrace_stop, so we must release it now.
2223 * To preserve proper semantics, we must do this before
2224 * any signal bookkeeping like checking group_stop_count.
2226 spin_unlock_irq(¤t->sighand->siglock);
2228 spin_lock_irq(¤t->sighand->siglock);
2232 * After this point ptrace_signal_wake_up or signal_wake_up
2233 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2234 * signal comes in. Handle previous ptrace_unlinks and fatal
2235 * signals here to prevent ptrace_stop sleeping in schedule.
2237 if (!current->ptrace || __fatal_signal_pending(current))
2240 set_special_state(TASK_TRACED);
2241 current->jobctl |= JOBCTL_TRACED;
2244 * We're committing to trapping. TRACED should be visible before
2245 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2246 * Also, transition to TRACED and updates to ->jobctl should be
2247 * atomic with respect to siglock and should be done after the arch
2248 * hook as siglock is released and regrabbed across it.
2253 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2255 * set_current_state() smp_wmb();
2257 * wait_task_stopped()
2258 * task_stopped_code()
2259 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2263 current->ptrace_message = message;
2264 current->last_siginfo = info;
2265 current->exit_code = exit_code;
2268 * If @why is CLD_STOPPED, we're trapping to participate in a group
2269 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2270 * across siglock relocks since INTERRUPT was scheduled, PENDING
2271 * could be clear now. We act as if SIGCONT is received after
2272 * TASK_TRACED is entered - ignore it.
2274 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2275 gstop_done = task_participate_group_stop(current);
2277 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2278 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2279 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2280 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2282 /* entering a trap, clear TRAPPING */
2283 task_clear_jobctl_trapping(current);
2285 spin_unlock_irq(¤t->sighand->siglock);
2286 read_lock(&tasklist_lock);
2288 * Notify parents of the stop.
2290 * While ptraced, there are two parents - the ptracer and
2291 * the real_parent of the group_leader. The ptracer should
2292 * know about every stop while the real parent is only
2293 * interested in the completion of group stop. The states
2294 * for the two don't interact with each other. Notify
2295 * separately unless they're gonna be duplicates.
2297 if (current->ptrace)
2298 do_notify_parent_cldstop(current, true, why);
2299 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2300 do_notify_parent_cldstop(current, false, why);
2303 * Don't want to allow preemption here, because
2304 * sys_ptrace() needs this task to be inactive.
2306 * XXX: implement read_unlock_no_resched().
2309 read_unlock(&tasklist_lock);
2310 cgroup_enter_frozen();
2311 preempt_enable_no_resched();
2313 cgroup_leave_frozen(true);
2316 * We are back. Now reacquire the siglock before touching
2317 * last_siginfo, so that we are sure to have synchronized with
2318 * any signal-sending on another CPU that wants to examine it.
2320 spin_lock_irq(¤t->sighand->siglock);
2321 exit_code = current->exit_code;
2322 current->last_siginfo = NULL;
2323 current->ptrace_message = 0;
2324 current->exit_code = 0;
2326 /* LISTENING can be set only during STOP traps, clear it */
2327 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2330 * Queued signals ignored us while we were stopped for tracing.
2331 * So check for any that we should take before resuming user mode.
2332 * This sets TIF_SIGPENDING, but never clears it.
2334 recalc_sigpending_tsk(current);
2338 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2340 kernel_siginfo_t info;
2342 clear_siginfo(&info);
2343 info.si_signo = signr;
2344 info.si_code = exit_code;
2345 info.si_pid = task_pid_vnr(current);
2346 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2348 /* Let the debugger run. */
2349 return ptrace_stop(exit_code, why, message, &info);
2352 int ptrace_notify(int exit_code, unsigned long message)
2356 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2357 if (unlikely(task_work_pending(current)))
2360 spin_lock_irq(¤t->sighand->siglock);
2361 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2362 spin_unlock_irq(¤t->sighand->siglock);
2367 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2368 * @signr: signr causing group stop if initiating
2370 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2371 * and participate in it. If already set, participate in the existing
2372 * group stop. If participated in a group stop (and thus slept), %true is
2373 * returned with siglock released.
2375 * If ptraced, this function doesn't handle stop itself. Instead,
2376 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2377 * untouched. The caller must ensure that INTERRUPT trap handling takes
2378 * places afterwards.
2381 * Must be called with @current->sighand->siglock held, which is released
2385 * %false if group stop is already cancelled or ptrace trap is scheduled.
2386 * %true if participated in group stop.
2388 static bool do_signal_stop(int signr)
2389 __releases(¤t->sighand->siglock)
2391 struct signal_struct *sig = current->signal;
2393 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2394 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2395 struct task_struct *t;
2397 /* signr will be recorded in task->jobctl for retries */
2398 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2400 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2401 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2402 unlikely(sig->group_exec_task))
2405 * There is no group stop already in progress. We must
2408 * While ptraced, a task may be resumed while group stop is
2409 * still in effect and then receive a stop signal and
2410 * initiate another group stop. This deviates from the
2411 * usual behavior as two consecutive stop signals can't
2412 * cause two group stops when !ptraced. That is why we
2413 * also check !task_is_stopped(t) below.
2415 * The condition can be distinguished by testing whether
2416 * SIGNAL_STOP_STOPPED is already set. Don't generate
2417 * group_exit_code in such case.
2419 * This is not necessary for SIGNAL_STOP_CONTINUED because
2420 * an intervening stop signal is required to cause two
2421 * continued events regardless of ptrace.
2423 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2424 sig->group_exit_code = signr;
2426 sig->group_stop_count = 0;
2428 if (task_set_jobctl_pending(current, signr | gstop))
2429 sig->group_stop_count++;
2432 while_each_thread(current, t) {
2434 * Setting state to TASK_STOPPED for a group
2435 * stop is always done with the siglock held,
2436 * so this check has no races.
2438 if (!task_is_stopped(t) &&
2439 task_set_jobctl_pending(t, signr | gstop)) {
2440 sig->group_stop_count++;
2441 if (likely(!(t->ptrace & PT_SEIZED)))
2442 signal_wake_up(t, 0);
2444 ptrace_trap_notify(t);
2449 if (likely(!current->ptrace)) {
2453 * If there are no other threads in the group, or if there
2454 * is a group stop in progress and we are the last to stop,
2455 * report to the parent.
2457 if (task_participate_group_stop(current))
2458 notify = CLD_STOPPED;
2460 current->jobctl |= JOBCTL_STOPPED;
2461 set_special_state(TASK_STOPPED);
2462 spin_unlock_irq(¤t->sighand->siglock);
2465 * Notify the parent of the group stop completion. Because
2466 * we're not holding either the siglock or tasklist_lock
2467 * here, ptracer may attach inbetween; however, this is for
2468 * group stop and should always be delivered to the real
2469 * parent of the group leader. The new ptracer will get
2470 * its notification when this task transitions into
2474 read_lock(&tasklist_lock);
2475 do_notify_parent_cldstop(current, false, notify);
2476 read_unlock(&tasklist_lock);
2479 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2480 cgroup_enter_frozen();
2485 * While ptraced, group stop is handled by STOP trap.
2486 * Schedule it and let the caller deal with it.
2488 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2494 * do_jobctl_trap - take care of ptrace jobctl traps
2496 * When PT_SEIZED, it's used for both group stop and explicit
2497 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2498 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2499 * the stop signal; otherwise, %SIGTRAP.
2501 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2502 * number as exit_code and no siginfo.
2505 * Must be called with @current->sighand->siglock held, which may be
2506 * released and re-acquired before returning with intervening sleep.
2508 static void do_jobctl_trap(void)
2510 struct signal_struct *signal = current->signal;
2511 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2513 if (current->ptrace & PT_SEIZED) {
2514 if (!signal->group_stop_count &&
2515 !(signal->flags & SIGNAL_STOP_STOPPED))
2517 WARN_ON_ONCE(!signr);
2518 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2521 WARN_ON_ONCE(!signr);
2522 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2527 * do_freezer_trap - handle the freezer jobctl trap
2529 * Puts the task into frozen state, if only the task is not about to quit.
2530 * In this case it drops JOBCTL_TRAP_FREEZE.
2533 * Must be called with @current->sighand->siglock held,
2534 * which is always released before returning.
2536 static void do_freezer_trap(void)
2537 __releases(¤t->sighand->siglock)
2540 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2541 * let's make another loop to give it a chance to be handled.
2542 * In any case, we'll return back.
2544 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2545 JOBCTL_TRAP_FREEZE) {
2546 spin_unlock_irq(¤t->sighand->siglock);
2551 * Now we're sure that there is no pending fatal signal and no
2552 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2553 * immediately (if there is a non-fatal signal pending), and
2554 * put the task into sleep.
2556 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2557 clear_thread_flag(TIF_SIGPENDING);
2558 spin_unlock_irq(¤t->sighand->siglock);
2559 cgroup_enter_frozen();
2563 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2566 * We do not check sig_kernel_stop(signr) but set this marker
2567 * unconditionally because we do not know whether debugger will
2568 * change signr. This flag has no meaning unless we are going
2569 * to stop after return from ptrace_stop(). In this case it will
2570 * be checked in do_signal_stop(), we should only stop if it was
2571 * not cleared by SIGCONT while we were sleeping. See also the
2572 * comment in dequeue_signal().
2574 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2575 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2577 /* We're back. Did the debugger cancel the sig? */
2582 * Update the siginfo structure if the signal has
2583 * changed. If the debugger wanted something
2584 * specific in the siginfo structure then it should
2585 * have updated *info via PTRACE_SETSIGINFO.
2587 if (signr != info->si_signo) {
2588 clear_siginfo(info);
2589 info->si_signo = signr;
2591 info->si_code = SI_USER;
2593 info->si_pid = task_pid_vnr(current->parent);
2594 info->si_uid = from_kuid_munged(current_user_ns(),
2595 task_uid(current->parent));
2599 /* If the (new) signal is now blocked, requeue it. */
2600 if (sigismember(¤t->blocked, signr) ||
2601 fatal_signal_pending(current)) {
2602 send_signal_locked(signr, info, current, type);
2609 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2611 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2613 case SIL_FAULT_TRAPNO:
2614 case SIL_FAULT_MCEERR:
2615 case SIL_FAULT_BNDERR:
2616 case SIL_FAULT_PKUERR:
2617 case SIL_FAULT_PERF_EVENT:
2618 ksig->info.si_addr = arch_untagged_si_addr(
2619 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2631 bool get_signal(struct ksignal *ksig)
2633 struct sighand_struct *sighand = current->sighand;
2634 struct signal_struct *signal = current->signal;
2637 clear_notify_signal();
2638 if (unlikely(task_work_pending(current)))
2641 if (!task_sigpending(current))
2644 if (unlikely(uprobe_deny_signal()))
2648 * Do this once, we can't return to user-mode if freezing() == T.
2649 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2650 * thus do not need another check after return.
2655 spin_lock_irq(&sighand->siglock);
2658 * Every stopped thread goes here after wakeup. Check to see if
2659 * we should notify the parent, prepare_signal(SIGCONT) encodes
2660 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2662 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2665 if (signal->flags & SIGNAL_CLD_CONTINUED)
2666 why = CLD_CONTINUED;
2670 signal->flags &= ~SIGNAL_CLD_MASK;
2672 spin_unlock_irq(&sighand->siglock);
2675 * Notify the parent that we're continuing. This event is
2676 * always per-process and doesn't make whole lot of sense
2677 * for ptracers, who shouldn't consume the state via
2678 * wait(2) either, but, for backward compatibility, notify
2679 * the ptracer of the group leader too unless it's gonna be
2682 read_lock(&tasklist_lock);
2683 do_notify_parent_cldstop(current, false, why);
2685 if (ptrace_reparented(current->group_leader))
2686 do_notify_parent_cldstop(current->group_leader,
2688 read_unlock(&tasklist_lock);
2694 struct k_sigaction *ka;
2697 /* Has this task already been marked for death? */
2698 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2699 signal->group_exec_task) {
2700 ksig->info.si_signo = signr = SIGKILL;
2701 sigdelset(¤t->pending.signal, SIGKILL);
2702 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2703 &sighand->action[SIGKILL - 1]);
2704 recalc_sigpending();
2708 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2712 if (unlikely(current->jobctl &
2713 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2714 if (current->jobctl & JOBCTL_TRAP_MASK) {
2716 spin_unlock_irq(&sighand->siglock);
2717 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2724 * If the task is leaving the frozen state, let's update
2725 * cgroup counters and reset the frozen bit.
2727 if (unlikely(cgroup_task_frozen(current))) {
2728 spin_unlock_irq(&sighand->siglock);
2729 cgroup_leave_frozen(false);
2734 * Signals generated by the execution of an instruction
2735 * need to be delivered before any other pending signals
2736 * so that the instruction pointer in the signal stack
2737 * frame points to the faulting instruction.
2740 signr = dequeue_synchronous_signal(&ksig->info);
2742 signr = dequeue_signal(current, ¤t->blocked,
2743 &ksig->info, &type);
2746 break; /* will return 0 */
2748 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2749 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2750 signr = ptrace_signal(signr, &ksig->info, type);
2755 ka = &sighand->action[signr-1];
2757 /* Trace actually delivered signals. */
2758 trace_signal_deliver(signr, &ksig->info, ka);
2760 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2762 if (ka->sa.sa_handler != SIG_DFL) {
2763 /* Run the handler. */
2766 if (ka->sa.sa_flags & SA_ONESHOT)
2767 ka->sa.sa_handler = SIG_DFL;
2769 break; /* will return non-zero "signr" value */
2773 * Now we are doing the default action for this signal.
2775 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2779 * Global init gets no signals it doesn't want.
2780 * Container-init gets no signals it doesn't want from same
2783 * Note that if global/container-init sees a sig_kernel_only()
2784 * signal here, the signal must have been generated internally
2785 * or must have come from an ancestor namespace. In either
2786 * case, the signal cannot be dropped.
2788 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2789 !sig_kernel_only(signr))
2792 if (sig_kernel_stop(signr)) {
2794 * The default action is to stop all threads in
2795 * the thread group. The job control signals
2796 * do nothing in an orphaned pgrp, but SIGSTOP
2797 * always works. Note that siglock needs to be
2798 * dropped during the call to is_orphaned_pgrp()
2799 * because of lock ordering with tasklist_lock.
2800 * This allows an intervening SIGCONT to be posted.
2801 * We need to check for that and bail out if necessary.
2803 if (signr != SIGSTOP) {
2804 spin_unlock_irq(&sighand->siglock);
2806 /* signals can be posted during this window */
2808 if (is_current_pgrp_orphaned())
2811 spin_lock_irq(&sighand->siglock);
2814 if (likely(do_signal_stop(ksig->info.si_signo))) {
2815 /* It released the siglock. */
2820 * We didn't actually stop, due to a race
2821 * with SIGCONT or something like that.
2827 spin_unlock_irq(&sighand->siglock);
2828 if (unlikely(cgroup_task_frozen(current)))
2829 cgroup_leave_frozen(true);
2832 * Anything else is fatal, maybe with a core dump.
2834 current->flags |= PF_SIGNALED;
2836 if (sig_kernel_coredump(signr)) {
2837 if (print_fatal_signals)
2838 print_fatal_signal(ksig->info.si_signo);
2839 proc_coredump_connector(current);
2841 * If it was able to dump core, this kills all
2842 * other threads in the group and synchronizes with
2843 * their demise. If we lost the race with another
2844 * thread getting here, it set group_exit_code
2845 * first and our do_group_exit call below will use
2846 * that value and ignore the one we pass it.
2848 do_coredump(&ksig->info);
2852 * PF_IO_WORKER threads will catch and exit on fatal signals
2853 * themselves. They have cleanup that must be performed, so
2854 * we cannot call do_exit() on their behalf.
2856 if (current->flags & PF_IO_WORKER)
2860 * Death signals, no core dump.
2862 do_group_exit(ksig->info.si_signo);
2865 spin_unlock_irq(&sighand->siglock);
2869 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2870 hide_si_addr_tag_bits(ksig);
2872 return ksig->sig > 0;
2876 * signal_delivered - called after signal delivery to update blocked signals
2877 * @ksig: kernel signal struct
2878 * @stepping: nonzero if debugger single-step or block-step in use
2880 * This function should be called when a signal has successfully been
2881 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2882 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2883 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2885 static void signal_delivered(struct ksignal *ksig, int stepping)
2889 /* A signal was successfully delivered, and the
2890 saved sigmask was stored on the signal frame,
2891 and will be restored by sigreturn. So we can
2892 simply clear the restore sigmask flag. */
2893 clear_restore_sigmask();
2895 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2896 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2897 sigaddset(&blocked, ksig->sig);
2898 set_current_blocked(&blocked);
2899 if (current->sas_ss_flags & SS_AUTODISARM)
2900 sas_ss_reset(current);
2902 ptrace_notify(SIGTRAP, 0);
2905 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2908 force_sigsegv(ksig->sig);
2910 signal_delivered(ksig, stepping);
2914 * It could be that complete_signal() picked us to notify about the
2915 * group-wide signal. Other threads should be notified now to take
2916 * the shared signals in @which since we will not.
2918 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2921 struct task_struct *t;
2923 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2924 if (sigisemptyset(&retarget))
2928 while_each_thread(tsk, t) {
2929 if (t->flags & PF_EXITING)
2932 if (!has_pending_signals(&retarget, &t->blocked))
2934 /* Remove the signals this thread can handle. */
2935 sigandsets(&retarget, &retarget, &t->blocked);
2937 if (!task_sigpending(t))
2938 signal_wake_up(t, 0);
2940 if (sigisemptyset(&retarget))
2945 void exit_signals(struct task_struct *tsk)
2951 * @tsk is about to have PF_EXITING set - lock out users which
2952 * expect stable threadgroup.
2954 cgroup_threadgroup_change_begin(tsk);
2956 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2957 tsk->flags |= PF_EXITING;
2958 cgroup_threadgroup_change_end(tsk);
2962 spin_lock_irq(&tsk->sighand->siglock);
2964 * From now this task is not visible for group-wide signals,
2965 * see wants_signal(), do_signal_stop().
2967 tsk->flags |= PF_EXITING;
2969 cgroup_threadgroup_change_end(tsk);
2971 if (!task_sigpending(tsk))
2974 unblocked = tsk->blocked;
2975 signotset(&unblocked);
2976 retarget_shared_pending(tsk, &unblocked);
2978 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2979 task_participate_group_stop(tsk))
2980 group_stop = CLD_STOPPED;
2982 spin_unlock_irq(&tsk->sighand->siglock);
2985 * If group stop has completed, deliver the notification. This
2986 * should always go to the real parent of the group leader.
2988 if (unlikely(group_stop)) {
2989 read_lock(&tasklist_lock);
2990 do_notify_parent_cldstop(tsk, false, group_stop);
2991 read_unlock(&tasklist_lock);
2996 * System call entry points.
3000 * sys_restart_syscall - restart a system call
3002 SYSCALL_DEFINE0(restart_syscall)
3004 struct restart_block *restart = ¤t->restart_block;
3005 return restart->fn(restart);
3008 long do_no_restart_syscall(struct restart_block *param)
3013 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3015 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3016 sigset_t newblocked;
3017 /* A set of now blocked but previously unblocked signals. */
3018 sigandnsets(&newblocked, newset, ¤t->blocked);
3019 retarget_shared_pending(tsk, &newblocked);
3021 tsk->blocked = *newset;
3022 recalc_sigpending();
3026 * set_current_blocked - change current->blocked mask
3029 * It is wrong to change ->blocked directly, this helper should be used
3030 * to ensure the process can't miss a shared signal we are going to block.
3032 void set_current_blocked(sigset_t *newset)
3034 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3035 __set_current_blocked(newset);
3038 void __set_current_blocked(const sigset_t *newset)
3040 struct task_struct *tsk = current;
3043 * In case the signal mask hasn't changed, there is nothing we need
3044 * to do. The current->blocked shouldn't be modified by other task.
3046 if (sigequalsets(&tsk->blocked, newset))
3049 spin_lock_irq(&tsk->sighand->siglock);
3050 __set_task_blocked(tsk, newset);
3051 spin_unlock_irq(&tsk->sighand->siglock);
3055 * This is also useful for kernel threads that want to temporarily
3056 * (or permanently) block certain signals.
3058 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3059 * interface happily blocks "unblockable" signals like SIGKILL
3062 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3064 struct task_struct *tsk = current;
3067 /* Lockless, only current can change ->blocked, never from irq */
3069 *oldset = tsk->blocked;
3073 sigorsets(&newset, &tsk->blocked, set);
3076 sigandnsets(&newset, &tsk->blocked, set);
3085 __set_current_blocked(&newset);
3088 EXPORT_SYMBOL(sigprocmask);
3091 * The api helps set app-provided sigmasks.
3093 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3094 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3096 * Note that it does set_restore_sigmask() in advance, so it must be always
3097 * paired with restore_saved_sigmask_unless() before return from syscall.
3099 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3105 if (sigsetsize != sizeof(sigset_t))
3107 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3110 set_restore_sigmask();
3111 current->saved_sigmask = current->blocked;
3112 set_current_blocked(&kmask);
3117 #ifdef CONFIG_COMPAT
3118 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3125 if (sigsetsize != sizeof(compat_sigset_t))
3127 if (get_compat_sigset(&kmask, umask))
3130 set_restore_sigmask();
3131 current->saved_sigmask = current->blocked;
3132 set_current_blocked(&kmask);
3139 * sys_rt_sigprocmask - change the list of currently blocked signals
3140 * @how: whether to add, remove, or set signals
3141 * @nset: stores pending signals
3142 * @oset: previous value of signal mask if non-null
3143 * @sigsetsize: size of sigset_t type
3145 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3146 sigset_t __user *, oset, size_t, sigsetsize)
3148 sigset_t old_set, new_set;
3151 /* XXX: Don't preclude handling different sized sigset_t's. */
3152 if (sigsetsize != sizeof(sigset_t))
3155 old_set = current->blocked;
3158 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3160 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3162 error = sigprocmask(how, &new_set, NULL);
3168 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3175 #ifdef CONFIG_COMPAT
3176 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3177 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3179 sigset_t old_set = current->blocked;
3181 /* XXX: Don't preclude handling different sized sigset_t's. */
3182 if (sigsetsize != sizeof(sigset_t))
3188 if (get_compat_sigset(&new_set, nset))
3190 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3192 error = sigprocmask(how, &new_set, NULL);
3196 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3200 static void do_sigpending(sigset_t *set)
3202 spin_lock_irq(¤t->sighand->siglock);
3203 sigorsets(set, ¤t->pending.signal,
3204 ¤t->signal->shared_pending.signal);
3205 spin_unlock_irq(¤t->sighand->siglock);
3207 /* Outside the lock because only this thread touches it. */
3208 sigandsets(set, ¤t->blocked, set);
3212 * sys_rt_sigpending - examine a pending signal that has been raised
3214 * @uset: stores pending signals
3215 * @sigsetsize: size of sigset_t type or larger
3217 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3221 if (sigsetsize > sizeof(*uset))
3224 do_sigpending(&set);
3226 if (copy_to_user(uset, &set, sigsetsize))
3232 #ifdef CONFIG_COMPAT
3233 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3234 compat_size_t, sigsetsize)
3238 if (sigsetsize > sizeof(*uset))
3241 do_sigpending(&set);
3243 return put_compat_sigset(uset, &set, sigsetsize);
3247 static const struct {
3248 unsigned char limit, layout;
3250 [SIGILL] = { NSIGILL, SIL_FAULT },
3251 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3252 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3253 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3254 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3256 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3258 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3259 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3260 [SIGSYS] = { NSIGSYS, SIL_SYS },
3263 static bool known_siginfo_layout(unsigned sig, int si_code)
3265 if (si_code == SI_KERNEL)
3267 else if ((si_code > SI_USER)) {
3268 if (sig_specific_sicodes(sig)) {
3269 if (si_code <= sig_sicodes[sig].limit)
3272 else if (si_code <= NSIGPOLL)
3275 else if (si_code >= SI_DETHREAD)
3277 else if (si_code == SI_ASYNCNL)
3282 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3284 enum siginfo_layout layout = SIL_KILL;
3285 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3286 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3287 (si_code <= sig_sicodes[sig].limit)) {
3288 layout = sig_sicodes[sig].layout;
3289 /* Handle the exceptions */
3290 if ((sig == SIGBUS) &&
3291 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3292 layout = SIL_FAULT_MCEERR;
3293 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3294 layout = SIL_FAULT_BNDERR;
3296 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3297 layout = SIL_FAULT_PKUERR;
3299 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3300 layout = SIL_FAULT_PERF_EVENT;
3301 else if (IS_ENABLED(CONFIG_SPARC) &&
3302 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3303 layout = SIL_FAULT_TRAPNO;
3304 else if (IS_ENABLED(CONFIG_ALPHA) &&
3306 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3307 layout = SIL_FAULT_TRAPNO;
3309 else if (si_code <= NSIGPOLL)
3312 if (si_code == SI_TIMER)
3314 else if (si_code == SI_SIGIO)
3316 else if (si_code < 0)
3322 static inline char __user *si_expansion(const siginfo_t __user *info)
3324 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3327 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3329 char __user *expansion = si_expansion(to);
3330 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3332 if (clear_user(expansion, SI_EXPANSION_SIZE))
3337 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3338 const siginfo_t __user *from)
3340 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3341 char __user *expansion = si_expansion(from);
3342 char buf[SI_EXPANSION_SIZE];
3345 * An unknown si_code might need more than
3346 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3347 * extra bytes are 0. This guarantees copy_siginfo_to_user
3348 * will return this data to userspace exactly.
3350 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3352 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3360 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3361 const siginfo_t __user *from)
3363 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3365 to->si_signo = signo;
3366 return post_copy_siginfo_from_user(to, from);
3369 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3371 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3373 return post_copy_siginfo_from_user(to, from);
3376 #ifdef CONFIG_COMPAT
3378 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3379 * @to: compat siginfo destination
3380 * @from: kernel siginfo source
3382 * Note: This function does not work properly for the SIGCHLD on x32, but
3383 * fortunately it doesn't have to. The only valid callers for this function are
3384 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3385 * The latter does not care because SIGCHLD will never cause a coredump.
3387 void copy_siginfo_to_external32(struct compat_siginfo *to,
3388 const struct kernel_siginfo *from)
3390 memset(to, 0, sizeof(*to));
3392 to->si_signo = from->si_signo;
3393 to->si_errno = from->si_errno;
3394 to->si_code = from->si_code;
3395 switch(siginfo_layout(from->si_signo, from->si_code)) {
3397 to->si_pid = from->si_pid;
3398 to->si_uid = from->si_uid;
3401 to->si_tid = from->si_tid;
3402 to->si_overrun = from->si_overrun;
3403 to->si_int = from->si_int;
3406 to->si_band = from->si_band;
3407 to->si_fd = from->si_fd;
3410 to->si_addr = ptr_to_compat(from->si_addr);
3412 case SIL_FAULT_TRAPNO:
3413 to->si_addr = ptr_to_compat(from->si_addr);
3414 to->si_trapno = from->si_trapno;
3416 case SIL_FAULT_MCEERR:
3417 to->si_addr = ptr_to_compat(from->si_addr);
3418 to->si_addr_lsb = from->si_addr_lsb;
3420 case SIL_FAULT_BNDERR:
3421 to->si_addr = ptr_to_compat(from->si_addr);
3422 to->si_lower = ptr_to_compat(from->si_lower);
3423 to->si_upper = ptr_to_compat(from->si_upper);
3425 case SIL_FAULT_PKUERR:
3426 to->si_addr = ptr_to_compat(from->si_addr);
3427 to->si_pkey = from->si_pkey;
3429 case SIL_FAULT_PERF_EVENT:
3430 to->si_addr = ptr_to_compat(from->si_addr);
3431 to->si_perf_data = from->si_perf_data;
3432 to->si_perf_type = from->si_perf_type;
3433 to->si_perf_flags = from->si_perf_flags;
3436 to->si_pid = from->si_pid;
3437 to->si_uid = from->si_uid;
3438 to->si_status = from->si_status;
3439 to->si_utime = from->si_utime;
3440 to->si_stime = from->si_stime;
3443 to->si_pid = from->si_pid;
3444 to->si_uid = from->si_uid;
3445 to->si_int = from->si_int;
3448 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3449 to->si_syscall = from->si_syscall;
3450 to->si_arch = from->si_arch;
3455 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3456 const struct kernel_siginfo *from)
3458 struct compat_siginfo new;
3460 copy_siginfo_to_external32(&new, from);
3461 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3466 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3467 const struct compat_siginfo *from)
3470 to->si_signo = from->si_signo;
3471 to->si_errno = from->si_errno;
3472 to->si_code = from->si_code;
3473 switch(siginfo_layout(from->si_signo, from->si_code)) {
3475 to->si_pid = from->si_pid;
3476 to->si_uid = from->si_uid;
3479 to->si_tid = from->si_tid;
3480 to->si_overrun = from->si_overrun;
3481 to->si_int = from->si_int;
3484 to->si_band = from->si_band;
3485 to->si_fd = from->si_fd;
3488 to->si_addr = compat_ptr(from->si_addr);
3490 case SIL_FAULT_TRAPNO:
3491 to->si_addr = compat_ptr(from->si_addr);
3492 to->si_trapno = from->si_trapno;
3494 case SIL_FAULT_MCEERR:
3495 to->si_addr = compat_ptr(from->si_addr);
3496 to->si_addr_lsb = from->si_addr_lsb;
3498 case SIL_FAULT_BNDERR:
3499 to->si_addr = compat_ptr(from->si_addr);
3500 to->si_lower = compat_ptr(from->si_lower);
3501 to->si_upper = compat_ptr(from->si_upper);
3503 case SIL_FAULT_PKUERR:
3504 to->si_addr = compat_ptr(from->si_addr);
3505 to->si_pkey = from->si_pkey;
3507 case SIL_FAULT_PERF_EVENT:
3508 to->si_addr = compat_ptr(from->si_addr);
3509 to->si_perf_data = from->si_perf_data;
3510 to->si_perf_type = from->si_perf_type;
3511 to->si_perf_flags = from->si_perf_flags;
3514 to->si_pid = from->si_pid;
3515 to->si_uid = from->si_uid;
3516 to->si_status = from->si_status;
3517 #ifdef CONFIG_X86_X32_ABI
3518 if (in_x32_syscall()) {
3519 to->si_utime = from->_sifields._sigchld_x32._utime;
3520 to->si_stime = from->_sifields._sigchld_x32._stime;
3524 to->si_utime = from->si_utime;
3525 to->si_stime = from->si_stime;
3529 to->si_pid = from->si_pid;
3530 to->si_uid = from->si_uid;
3531 to->si_int = from->si_int;
3534 to->si_call_addr = compat_ptr(from->si_call_addr);
3535 to->si_syscall = from->si_syscall;
3536 to->si_arch = from->si_arch;
3542 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3543 const struct compat_siginfo __user *ufrom)
3545 struct compat_siginfo from;
3547 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3550 from.si_signo = signo;
3551 return post_copy_siginfo_from_user32(to, &from);
3554 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3555 const struct compat_siginfo __user *ufrom)
3557 struct compat_siginfo from;
3559 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3562 return post_copy_siginfo_from_user32(to, &from);
3564 #endif /* CONFIG_COMPAT */
3567 * do_sigtimedwait - wait for queued signals specified in @which
3568 * @which: queued signals to wait for
3569 * @info: if non-null, the signal's siginfo is returned here
3570 * @ts: upper bound on process time suspension
3572 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3573 const struct timespec64 *ts)
3575 ktime_t *to = NULL, timeout = KTIME_MAX;
3576 struct task_struct *tsk = current;
3577 sigset_t mask = *which;
3582 if (!timespec64_valid(ts))
3584 timeout = timespec64_to_ktime(*ts);
3589 * Invert the set of allowed signals to get those we want to block.
3591 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3594 spin_lock_irq(&tsk->sighand->siglock);
3595 sig = dequeue_signal(tsk, &mask, info, &type);
3596 if (!sig && timeout) {
3598 * None ready, temporarily unblock those we're interested
3599 * while we are sleeping in so that we'll be awakened when
3600 * they arrive. Unblocking is always fine, we can avoid
3601 * set_current_blocked().
3603 tsk->real_blocked = tsk->blocked;
3604 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3605 recalc_sigpending();
3606 spin_unlock_irq(&tsk->sighand->siglock);
3608 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3609 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3611 spin_lock_irq(&tsk->sighand->siglock);
3612 __set_task_blocked(tsk, &tsk->real_blocked);
3613 sigemptyset(&tsk->real_blocked);
3614 sig = dequeue_signal(tsk, &mask, info, &type);
3616 spin_unlock_irq(&tsk->sighand->siglock);
3620 return ret ? -EINTR : -EAGAIN;
3624 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3626 * @uthese: queued signals to wait for
3627 * @uinfo: if non-null, the signal's siginfo is returned here
3628 * @uts: upper bound on process time suspension
3629 * @sigsetsize: size of sigset_t type
3631 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3632 siginfo_t __user *, uinfo,
3633 const struct __kernel_timespec __user *, uts,
3637 struct timespec64 ts;
3638 kernel_siginfo_t info;
3641 /* XXX: Don't preclude handling different sized sigset_t's. */
3642 if (sigsetsize != sizeof(sigset_t))
3645 if (copy_from_user(&these, uthese, sizeof(these)))
3649 if (get_timespec64(&ts, uts))
3653 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3655 if (ret > 0 && uinfo) {
3656 if (copy_siginfo_to_user(uinfo, &info))
3663 #ifdef CONFIG_COMPAT_32BIT_TIME
3664 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3665 siginfo_t __user *, uinfo,
3666 const struct old_timespec32 __user *, uts,
3670 struct timespec64 ts;
3671 kernel_siginfo_t info;
3674 if (sigsetsize != sizeof(sigset_t))
3677 if (copy_from_user(&these, uthese, sizeof(these)))
3681 if (get_old_timespec32(&ts, uts))
3685 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3687 if (ret > 0 && uinfo) {
3688 if (copy_siginfo_to_user(uinfo, &info))
3696 #ifdef CONFIG_COMPAT
3697 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3698 struct compat_siginfo __user *, uinfo,
3699 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3702 struct timespec64 t;
3703 kernel_siginfo_t info;
3706 if (sigsetsize != sizeof(sigset_t))
3709 if (get_compat_sigset(&s, uthese))
3713 if (get_timespec64(&t, uts))
3717 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3719 if (ret > 0 && uinfo) {
3720 if (copy_siginfo_to_user32(uinfo, &info))
3727 #ifdef CONFIG_COMPAT_32BIT_TIME
3728 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3729 struct compat_siginfo __user *, uinfo,
3730 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3733 struct timespec64 t;
3734 kernel_siginfo_t info;
3737 if (sigsetsize != sizeof(sigset_t))
3740 if (get_compat_sigset(&s, uthese))
3744 if (get_old_timespec32(&t, uts))
3748 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3750 if (ret > 0 && uinfo) {
3751 if (copy_siginfo_to_user32(uinfo, &info))
3760 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3762 clear_siginfo(info);
3763 info->si_signo = sig;
3765 info->si_code = SI_USER;
3766 info->si_pid = task_tgid_vnr(current);
3767 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3771 * sys_kill - send a signal to a process
3772 * @pid: the PID of the process
3773 * @sig: signal to be sent
3775 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3777 struct kernel_siginfo info;
3779 prepare_kill_siginfo(sig, &info);
3781 return kill_something_info(sig, &info, pid);
3785 * Verify that the signaler and signalee either are in the same pid namespace
3786 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3789 static bool access_pidfd_pidns(struct pid *pid)
3791 struct pid_namespace *active = task_active_pid_ns(current);
3792 struct pid_namespace *p = ns_of_pid(pid);
3805 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3806 siginfo_t __user *info)
3808 #ifdef CONFIG_COMPAT
3810 * Avoid hooking up compat syscalls and instead handle necessary
3811 * conversions here. Note, this is a stop-gap measure and should not be
3812 * considered a generic solution.
3814 if (in_compat_syscall())
3815 return copy_siginfo_from_user32(
3816 kinfo, (struct compat_siginfo __user *)info);
3818 return copy_siginfo_from_user(kinfo, info);
3821 static struct pid *pidfd_to_pid(const struct file *file)
3825 pid = pidfd_pid(file);
3829 return tgid_pidfd_to_pid(file);
3833 * sys_pidfd_send_signal - Signal a process through a pidfd
3834 * @pidfd: file descriptor of the process
3835 * @sig: signal to send
3836 * @info: signal info
3837 * @flags: future flags
3839 * The syscall currently only signals via PIDTYPE_PID which covers
3840 * kill(<positive-pid>, <signal>. It does not signal threads or process
3842 * In order to extend the syscall to threads and process groups the @flags
3843 * argument should be used. In essence, the @flags argument will determine
3844 * what is signaled and not the file descriptor itself. Put in other words,
3845 * grouping is a property of the flags argument not a property of the file
3848 * Return: 0 on success, negative errno on failure
3850 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3851 siginfo_t __user *, info, unsigned int, flags)
3856 kernel_siginfo_t kinfo;
3858 /* Enforce flags be set to 0 until we add an extension. */
3866 /* Is this a pidfd? */
3867 pid = pidfd_to_pid(f.file);
3874 if (!access_pidfd_pidns(pid))
3878 ret = copy_siginfo_from_user_any(&kinfo, info);
3883 if (unlikely(sig != kinfo.si_signo))
3886 /* Only allow sending arbitrary signals to yourself. */
3888 if ((task_pid(current) != pid) &&
3889 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3892 prepare_kill_siginfo(sig, &kinfo);
3895 ret = kill_pid_info(sig, &kinfo, pid);
3903 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3905 struct task_struct *p;
3909 p = find_task_by_vpid(pid);
3910 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3911 error = check_kill_permission(sig, info, p);
3913 * The null signal is a permissions and process existence
3914 * probe. No signal is actually delivered.
3916 if (!error && sig) {
3917 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3919 * If lock_task_sighand() failed we pretend the task
3920 * dies after receiving the signal. The window is tiny,
3921 * and the signal is private anyway.
3923 if (unlikely(error == -ESRCH))
3932 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3934 struct kernel_siginfo info;
3936 clear_siginfo(&info);
3937 info.si_signo = sig;
3939 info.si_code = SI_TKILL;
3940 info.si_pid = task_tgid_vnr(current);
3941 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3943 return do_send_specific(tgid, pid, sig, &info);
3947 * sys_tgkill - send signal to one specific thread
3948 * @tgid: the thread group ID of the thread
3949 * @pid: the PID of the thread
3950 * @sig: signal to be sent
3952 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3953 * exists but it's not belonging to the target process anymore. This
3954 * method solves the problem of threads exiting and PIDs getting reused.
3956 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3958 /* This is only valid for single tasks */
3959 if (pid <= 0 || tgid <= 0)
3962 return do_tkill(tgid, pid, sig);
3966 * sys_tkill - send signal to one specific task
3967 * @pid: the PID of the task
3968 * @sig: signal to be sent
3970 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3972 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3974 /* This is only valid for single tasks */
3978 return do_tkill(0, pid, sig);
3981 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3983 /* Not even root can pretend to send signals from the kernel.
3984 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3986 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3987 (task_pid_vnr(current) != pid))
3990 /* POSIX.1b doesn't mention process groups. */
3991 return kill_proc_info(sig, info, pid);
3995 * sys_rt_sigqueueinfo - send signal information to a signal
3996 * @pid: the PID of the thread
3997 * @sig: signal to be sent
3998 * @uinfo: signal info to be sent
4000 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4001 siginfo_t __user *, uinfo)
4003 kernel_siginfo_t info;
4004 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4007 return do_rt_sigqueueinfo(pid, sig, &info);
4010 #ifdef CONFIG_COMPAT
4011 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4014 struct compat_siginfo __user *, uinfo)
4016 kernel_siginfo_t info;
4017 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4020 return do_rt_sigqueueinfo(pid, sig, &info);
4024 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4026 /* This is only valid for single tasks */
4027 if (pid <= 0 || tgid <= 0)
4030 /* Not even root can pretend to send signals from the kernel.
4031 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4033 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4034 (task_pid_vnr(current) != pid))
4037 return do_send_specific(tgid, pid, sig, info);
4040 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4041 siginfo_t __user *, uinfo)
4043 kernel_siginfo_t info;
4044 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4047 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4050 #ifdef CONFIG_COMPAT
4051 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4055 struct compat_siginfo __user *, uinfo)
4057 kernel_siginfo_t info;
4058 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4061 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4066 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4068 void kernel_sigaction(int sig, __sighandler_t action)
4070 spin_lock_irq(¤t->sighand->siglock);
4071 current->sighand->action[sig - 1].sa.sa_handler = action;
4072 if (action == SIG_IGN) {
4076 sigaddset(&mask, sig);
4078 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4079 flush_sigqueue_mask(&mask, ¤t->pending);
4080 recalc_sigpending();
4082 spin_unlock_irq(¤t->sighand->siglock);
4084 EXPORT_SYMBOL(kernel_sigaction);
4086 void __weak sigaction_compat_abi(struct k_sigaction *act,
4087 struct k_sigaction *oact)
4091 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4093 struct task_struct *p = current, *t;
4094 struct k_sigaction *k;
4097 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4100 k = &p->sighand->action[sig-1];
4102 spin_lock_irq(&p->sighand->siglock);
4103 if (k->sa.sa_flags & SA_IMMUTABLE) {
4104 spin_unlock_irq(&p->sighand->siglock);
4111 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4112 * e.g. by having an architecture use the bit in their uapi.
4114 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4117 * Clear unknown flag bits in order to allow userspace to detect missing
4118 * support for flag bits and to allow the kernel to use non-uapi bits
4122 act->sa.sa_flags &= UAPI_SA_FLAGS;
4124 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4126 sigaction_compat_abi(act, oact);
4129 sigdelsetmask(&act->sa.sa_mask,
4130 sigmask(SIGKILL) | sigmask(SIGSTOP));
4134 * "Setting a signal action to SIG_IGN for a signal that is
4135 * pending shall cause the pending signal to be discarded,
4136 * whether or not it is blocked."
4138 * "Setting a signal action to SIG_DFL for a signal that is
4139 * pending and whose default action is to ignore the signal
4140 * (for example, SIGCHLD), shall cause the pending signal to
4141 * be discarded, whether or not it is blocked"
4143 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4145 sigaddset(&mask, sig);
4146 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4147 for_each_thread(p, t)
4148 flush_sigqueue_mask(&mask, &t->pending);
4152 spin_unlock_irq(&p->sighand->siglock);
4156 #ifdef CONFIG_DYNAMIC_SIGFRAME
4157 static inline void sigaltstack_lock(void)
4158 __acquires(¤t->sighand->siglock)
4160 spin_lock_irq(¤t->sighand->siglock);
4163 static inline void sigaltstack_unlock(void)
4164 __releases(¤t->sighand->siglock)
4166 spin_unlock_irq(¤t->sighand->siglock);
4169 static inline void sigaltstack_lock(void) { }
4170 static inline void sigaltstack_unlock(void) { }
4174 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4177 struct task_struct *t = current;
4181 memset(oss, 0, sizeof(stack_t));
4182 oss->ss_sp = (void __user *) t->sas_ss_sp;
4183 oss->ss_size = t->sas_ss_size;
4184 oss->ss_flags = sas_ss_flags(sp) |
4185 (current->sas_ss_flags & SS_FLAG_BITS);
4189 void __user *ss_sp = ss->ss_sp;
4190 size_t ss_size = ss->ss_size;
4191 unsigned ss_flags = ss->ss_flags;
4194 if (unlikely(on_sig_stack(sp)))
4197 ss_mode = ss_flags & ~SS_FLAG_BITS;
4198 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4203 * Return before taking any locks if no actual
4204 * sigaltstack changes were requested.
4206 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4207 t->sas_ss_size == ss_size &&
4208 t->sas_ss_flags == ss_flags)
4212 if (ss_mode == SS_DISABLE) {
4216 if (unlikely(ss_size < min_ss_size))
4218 if (!sigaltstack_size_valid(ss_size))
4222 t->sas_ss_sp = (unsigned long) ss_sp;
4223 t->sas_ss_size = ss_size;
4224 t->sas_ss_flags = ss_flags;
4226 sigaltstack_unlock();
4231 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4235 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4237 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4238 current_user_stack_pointer(),
4240 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4245 int restore_altstack(const stack_t __user *uss)
4248 if (copy_from_user(&new, uss, sizeof(stack_t)))
4250 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4252 /* squash all but EFAULT for now */
4256 int __save_altstack(stack_t __user *uss, unsigned long sp)
4258 struct task_struct *t = current;
4259 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4260 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4261 __put_user(t->sas_ss_size, &uss->ss_size);
4265 #ifdef CONFIG_COMPAT
4266 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4267 compat_stack_t __user *uoss_ptr)
4273 compat_stack_t uss32;
4274 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4276 uss.ss_sp = compat_ptr(uss32.ss_sp);
4277 uss.ss_flags = uss32.ss_flags;
4278 uss.ss_size = uss32.ss_size;
4280 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4281 compat_user_stack_pointer(),
4282 COMPAT_MINSIGSTKSZ);
4283 if (ret >= 0 && uoss_ptr) {
4285 memset(&old, 0, sizeof(old));
4286 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4287 old.ss_flags = uoss.ss_flags;
4288 old.ss_size = uoss.ss_size;
4289 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4295 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4296 const compat_stack_t __user *, uss_ptr,
4297 compat_stack_t __user *, uoss_ptr)
4299 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4302 int compat_restore_altstack(const compat_stack_t __user *uss)
4304 int err = do_compat_sigaltstack(uss, NULL);
4305 /* squash all but -EFAULT for now */
4306 return err == -EFAULT ? err : 0;
4309 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4312 struct task_struct *t = current;
4313 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4315 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4316 __put_user(t->sas_ss_size, &uss->ss_size);
4321 #ifdef __ARCH_WANT_SYS_SIGPENDING
4324 * sys_sigpending - examine pending signals
4325 * @uset: where mask of pending signal is returned
4327 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4331 if (sizeof(old_sigset_t) > sizeof(*uset))
4334 do_sigpending(&set);
4336 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4342 #ifdef CONFIG_COMPAT
4343 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4347 do_sigpending(&set);
4349 return put_user(set.sig[0], set32);
4355 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4357 * sys_sigprocmask - examine and change blocked signals
4358 * @how: whether to add, remove, or set signals
4359 * @nset: signals to add or remove (if non-null)
4360 * @oset: previous value of signal mask if non-null
4362 * Some platforms have their own version with special arguments;
4363 * others support only sys_rt_sigprocmask.
4366 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4367 old_sigset_t __user *, oset)
4369 old_sigset_t old_set, new_set;
4370 sigset_t new_blocked;
4372 old_set = current->blocked.sig[0];
4375 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4378 new_blocked = current->blocked;
4382 sigaddsetmask(&new_blocked, new_set);
4385 sigdelsetmask(&new_blocked, new_set);
4388 new_blocked.sig[0] = new_set;
4394 set_current_blocked(&new_blocked);
4398 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4404 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4406 #ifndef CONFIG_ODD_RT_SIGACTION
4408 * sys_rt_sigaction - alter an action taken by a process
4409 * @sig: signal to be sent
4410 * @act: new sigaction
4411 * @oact: used to save the previous sigaction
4412 * @sigsetsize: size of sigset_t type
4414 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4415 const struct sigaction __user *, act,
4416 struct sigaction __user *, oact,
4419 struct k_sigaction new_sa, old_sa;
4422 /* XXX: Don't preclude handling different sized sigset_t's. */
4423 if (sigsetsize != sizeof(sigset_t))
4426 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4429 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4433 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4438 #ifdef CONFIG_COMPAT
4439 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4440 const struct compat_sigaction __user *, act,
4441 struct compat_sigaction __user *, oact,
4442 compat_size_t, sigsetsize)
4444 struct k_sigaction new_ka, old_ka;
4445 #ifdef __ARCH_HAS_SA_RESTORER
4446 compat_uptr_t restorer;
4450 /* XXX: Don't preclude handling different sized sigset_t's. */
4451 if (sigsetsize != sizeof(compat_sigset_t))
4455 compat_uptr_t handler;
4456 ret = get_user(handler, &act->sa_handler);
4457 new_ka.sa.sa_handler = compat_ptr(handler);
4458 #ifdef __ARCH_HAS_SA_RESTORER
4459 ret |= get_user(restorer, &act->sa_restorer);
4460 new_ka.sa.sa_restorer = compat_ptr(restorer);
4462 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4463 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4468 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4470 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4472 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4473 sizeof(oact->sa_mask));
4474 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4475 #ifdef __ARCH_HAS_SA_RESTORER
4476 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4477 &oact->sa_restorer);
4483 #endif /* !CONFIG_ODD_RT_SIGACTION */
4485 #ifdef CONFIG_OLD_SIGACTION
4486 SYSCALL_DEFINE3(sigaction, int, sig,
4487 const struct old_sigaction __user *, act,
4488 struct old_sigaction __user *, oact)
4490 struct k_sigaction new_ka, old_ka;
4495 if (!access_ok(act, sizeof(*act)) ||
4496 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4497 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4498 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4499 __get_user(mask, &act->sa_mask))
4501 #ifdef __ARCH_HAS_KA_RESTORER
4502 new_ka.ka_restorer = NULL;
4504 siginitset(&new_ka.sa.sa_mask, mask);
4507 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4510 if (!access_ok(oact, sizeof(*oact)) ||
4511 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4512 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4513 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4514 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4521 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4522 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4523 const struct compat_old_sigaction __user *, act,
4524 struct compat_old_sigaction __user *, oact)
4526 struct k_sigaction new_ka, old_ka;
4528 compat_old_sigset_t mask;
4529 compat_uptr_t handler, restorer;
4532 if (!access_ok(act, sizeof(*act)) ||
4533 __get_user(handler, &act->sa_handler) ||
4534 __get_user(restorer, &act->sa_restorer) ||
4535 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4536 __get_user(mask, &act->sa_mask))
4539 #ifdef __ARCH_HAS_KA_RESTORER
4540 new_ka.ka_restorer = NULL;
4542 new_ka.sa.sa_handler = compat_ptr(handler);
4543 new_ka.sa.sa_restorer = compat_ptr(restorer);
4544 siginitset(&new_ka.sa.sa_mask, mask);
4547 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4550 if (!access_ok(oact, sizeof(*oact)) ||
4551 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4552 &oact->sa_handler) ||
4553 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4554 &oact->sa_restorer) ||
4555 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4556 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4563 #ifdef CONFIG_SGETMASK_SYSCALL
4566 * For backwards compatibility. Functionality superseded by sigprocmask.
4568 SYSCALL_DEFINE0(sgetmask)
4571 return current->blocked.sig[0];
4574 SYSCALL_DEFINE1(ssetmask, int, newmask)
4576 int old = current->blocked.sig[0];
4579 siginitset(&newset, newmask);
4580 set_current_blocked(&newset);
4584 #endif /* CONFIG_SGETMASK_SYSCALL */
4586 #ifdef __ARCH_WANT_SYS_SIGNAL
4588 * For backwards compatibility. Functionality superseded by sigaction.
4590 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4592 struct k_sigaction new_sa, old_sa;
4595 new_sa.sa.sa_handler = handler;
4596 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4597 sigemptyset(&new_sa.sa.sa_mask);
4599 ret = do_sigaction(sig, &new_sa, &old_sa);
4601 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4603 #endif /* __ARCH_WANT_SYS_SIGNAL */
4605 #ifdef __ARCH_WANT_SYS_PAUSE
4607 SYSCALL_DEFINE0(pause)
4609 while (!signal_pending(current)) {
4610 __set_current_state(TASK_INTERRUPTIBLE);
4613 return -ERESTARTNOHAND;
4618 static int sigsuspend(sigset_t *set)
4620 current->saved_sigmask = current->blocked;
4621 set_current_blocked(set);
4623 while (!signal_pending(current)) {
4624 __set_current_state(TASK_INTERRUPTIBLE);
4627 set_restore_sigmask();
4628 return -ERESTARTNOHAND;
4632 * sys_rt_sigsuspend - replace the signal mask for a value with the
4633 * @unewset value until a signal is received
4634 * @unewset: new signal mask value
4635 * @sigsetsize: size of sigset_t type
4637 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4641 /* XXX: Don't preclude handling different sized sigset_t's. */
4642 if (sigsetsize != sizeof(sigset_t))
4645 if (copy_from_user(&newset, unewset, sizeof(newset)))
4647 return sigsuspend(&newset);
4650 #ifdef CONFIG_COMPAT
4651 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4655 /* XXX: Don't preclude handling different sized sigset_t's. */
4656 if (sigsetsize != sizeof(sigset_t))
4659 if (get_compat_sigset(&newset, unewset))
4661 return sigsuspend(&newset);
4665 #ifdef CONFIG_OLD_SIGSUSPEND
4666 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4669 siginitset(&blocked, mask);
4670 return sigsuspend(&blocked);
4673 #ifdef CONFIG_OLD_SIGSUSPEND3
4674 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4677 siginitset(&blocked, mask);
4678 return sigsuspend(&blocked);
4682 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4687 static inline void siginfo_buildtime_checks(void)
4689 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4691 /* Verify the offsets in the two siginfos match */
4692 #define CHECK_OFFSET(field) \
4693 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4696 CHECK_OFFSET(si_pid);
4697 CHECK_OFFSET(si_uid);
4700 CHECK_OFFSET(si_tid);
4701 CHECK_OFFSET(si_overrun);
4702 CHECK_OFFSET(si_value);
4705 CHECK_OFFSET(si_pid);
4706 CHECK_OFFSET(si_uid);
4707 CHECK_OFFSET(si_value);
4710 CHECK_OFFSET(si_pid);
4711 CHECK_OFFSET(si_uid);
4712 CHECK_OFFSET(si_status);
4713 CHECK_OFFSET(si_utime);
4714 CHECK_OFFSET(si_stime);
4717 CHECK_OFFSET(si_addr);
4718 CHECK_OFFSET(si_trapno);
4719 CHECK_OFFSET(si_addr_lsb);
4720 CHECK_OFFSET(si_lower);
4721 CHECK_OFFSET(si_upper);
4722 CHECK_OFFSET(si_pkey);
4723 CHECK_OFFSET(si_perf_data);
4724 CHECK_OFFSET(si_perf_type);
4725 CHECK_OFFSET(si_perf_flags);
4728 CHECK_OFFSET(si_band);
4729 CHECK_OFFSET(si_fd);
4732 CHECK_OFFSET(si_call_addr);
4733 CHECK_OFFSET(si_syscall);
4734 CHECK_OFFSET(si_arch);
4738 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4739 offsetof(struct siginfo, si_addr));
4740 if (sizeof(int) == sizeof(void __user *)) {
4741 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4742 sizeof(void __user *));
4744 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4745 sizeof_field(struct siginfo, si_uid)) !=
4746 sizeof(void __user *));
4747 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4748 offsetof(struct siginfo, si_uid));
4750 #ifdef CONFIG_COMPAT
4751 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4752 offsetof(struct compat_siginfo, si_addr));
4753 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4754 sizeof(compat_uptr_t));
4755 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4756 sizeof_field(struct siginfo, si_pid));
4760 void __init signals_init(void)
4762 siginfo_buildtime_checks();
4764 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4767 #ifdef CONFIG_KGDB_KDB
4768 #include <linux/kdb.h>
4770 * kdb_send_sig - Allows kdb to send signals without exposing
4771 * signal internals. This function checks if the required locks are
4772 * available before calling the main signal code, to avoid kdb
4775 void kdb_send_sig(struct task_struct *t, int sig)
4777 static struct task_struct *kdb_prev_t;
4779 if (!spin_trylock(&t->sighand->siglock)) {
4780 kdb_printf("Can't do kill command now.\n"
4781 "The sigmask lock is held somewhere else in "
4782 "kernel, try again later\n");
4785 new_t = kdb_prev_t != t;
4787 if (!task_is_running(t) && new_t) {
4788 spin_unlock(&t->sighand->siglock);
4789 kdb_printf("Process is not RUNNING, sending a signal from "
4790 "kdb risks deadlock\n"
4791 "on the run queue locks. "
4792 "The signal has _not_ been sent.\n"
4793 "Reissue the kill command if you want to risk "
4797 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4798 spin_unlock(&t->sighand->siglock);
4800 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4803 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4805 #endif /* CONFIG_KGDB_KDB */