1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
49 #define CREATE_TRACE_POINTS
50 #include <trace/events/signal.h>
52 #include <asm/param.h>
53 #include <linux/uaccess.h>
54 #include <asm/unistd.h>
55 #include <asm/siginfo.h>
56 #include <asm/cacheflush.h>
57 #include <asm/syscall.h> /* for syscall_get_* */
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current))
185 clear_thread_flag(TIF_SIGPENDING);
188 EXPORT_SYMBOL(recalc_sigpending);
190 void calculate_sigpending(void)
192 /* Have any signals or users of TIF_SIGPENDING been delayed
195 spin_lock_irq(¤t->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
198 spin_unlock_irq(¤t->sighand->siglock);
201 /* Given the mask, find the first available signal that should be serviced. */
203 #define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
207 int next_signal(struct sigpending *pending, sigset_t *mask)
209 unsigned long i, *s, *m, x;
212 s = pending->signal.sig;
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
227 switch (_NSIG_WORDS) {
229 for (i = 1; i < _NSIG_WORDS; ++i) {
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
242 sig = ffz(~x) + _NSIG_BPW + 1;
253 static inline void print_dropped_signal(int sig)
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
257 if (!print_fatal_signals)
260 if (!__ratelimit(&ratelimit_state))
263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
264 current->comm, current->pid, sig);
268 * task_set_jobctl_pending - set jobctl pending bits
270 * @mask: pending bits to set
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
279 * Must be called with @task->sighand->siglock held.
282 * %true if @mask is set, %false if made noop because @task was dying.
284 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
296 task->jobctl |= mask;
301 * task_clear_jobctl_trapping - clear jobctl trapping bit
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
310 * Must be called with @task->sighand->siglock held.
312 void task_clear_jobctl_trapping(struct task_struct *task)
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
316 smp_mb(); /* advised by wake_up_bit() */
317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
322 * task_clear_jobctl_pending - clear jobctl pending bits
324 * @mask: pending bits to clear
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
334 * Must be called with @task->sighand->siglock held.
336 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
343 task->jobctl &= ~mask;
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
354 * Group stop states are cleared and the group stop count is consumed if
355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
356 * stop, the appropriate `SIGNAL_*` flags are set.
359 * Must be called with @task->sighand->siglock held.
362 * %true if group stop completion should be notified to the parent, %false
365 static bool task_participate_group_stop(struct task_struct *task)
367 struct signal_struct *sig = task->signal;
368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
391 void task_join_group_stop(struct task_struct *task)
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
402 /* Have the new thread join an on-going signal group stop */
403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
415 struct sigqueue *q = NULL;
416 struct ucounts *ucounts = NULL;
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
434 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
435 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
437 print_dropped_signal(sig);
440 if (unlikely(q == NULL)) {
441 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
443 INIT_LIST_HEAD(&q->list);
444 q->flags = sigqueue_flags;
445 q->ucounts = ucounts;
450 static void __sigqueue_free(struct sigqueue *q)
452 if (q->flags & SIGQUEUE_PREALLOC)
455 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
458 kmem_cache_free(sigqueue_cachep, q);
461 void flush_sigqueue(struct sigpending *queue)
465 sigemptyset(&queue->signal);
466 while (!list_empty(&queue->list)) {
467 q = list_entry(queue->list.next, struct sigqueue , list);
468 list_del_init(&q->list);
474 * Flush all pending signals for this kthread.
476 void flush_signals(struct task_struct *t)
480 spin_lock_irqsave(&t->sighand->siglock, flags);
481 clear_tsk_thread_flag(t, TIF_SIGPENDING);
482 flush_sigqueue(&t->pending);
483 flush_sigqueue(&t->signal->shared_pending);
484 spin_unlock_irqrestore(&t->sighand->siglock, flags);
486 EXPORT_SYMBOL(flush_signals);
488 #ifdef CONFIG_POSIX_TIMERS
489 static void __flush_itimer_signals(struct sigpending *pending)
491 sigset_t signal, retain;
492 struct sigqueue *q, *n;
494 signal = pending->signal;
495 sigemptyset(&retain);
497 list_for_each_entry_safe(q, n, &pending->list, list) {
498 int sig = q->info.si_signo;
500 if (likely(q->info.si_code != SI_TIMER)) {
501 sigaddset(&retain, sig);
503 sigdelset(&signal, sig);
504 list_del_init(&q->list);
509 sigorsets(&pending->signal, &signal, &retain);
512 void flush_itimer_signals(void)
514 struct task_struct *tsk = current;
517 spin_lock_irqsave(&tsk->sighand->siglock, flags);
518 __flush_itimer_signals(&tsk->pending);
519 __flush_itimer_signals(&tsk->signal->shared_pending);
520 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
524 void ignore_signals(struct task_struct *t)
528 for (i = 0; i < _NSIG; ++i)
529 t->sighand->action[i].sa.sa_handler = SIG_IGN;
535 * Flush all handlers for a task.
539 flush_signal_handlers(struct task_struct *t, int force_default)
542 struct k_sigaction *ka = &t->sighand->action[0];
543 for (i = _NSIG ; i != 0 ; i--) {
544 if (force_default || ka->sa.sa_handler != SIG_IGN)
545 ka->sa.sa_handler = SIG_DFL;
547 #ifdef __ARCH_HAS_SA_RESTORER
548 ka->sa.sa_restorer = NULL;
550 sigemptyset(&ka->sa.sa_mask);
555 bool unhandled_signal(struct task_struct *tsk, int sig)
557 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
558 if (is_global_init(tsk))
561 if (handler != SIG_IGN && handler != SIG_DFL)
564 /* if ptraced, let the tracer determine */
568 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
571 struct sigqueue *q, *first = NULL;
574 * Collect the siginfo appropriate to this signal. Check if
575 * there is another siginfo for the same signal.
577 list_for_each_entry(q, &list->list, list) {
578 if (q->info.si_signo == sig) {
585 sigdelset(&list->signal, sig);
589 list_del_init(&first->list);
590 copy_siginfo(info, &first->info);
593 (first->flags & SIGQUEUE_PREALLOC) &&
594 (info->si_code == SI_TIMER) &&
595 (info->si_sys_private);
597 __sigqueue_free(first);
600 * Ok, it wasn't in the queue. This must be
601 * a fast-pathed signal or we must have been
602 * out of queue space. So zero out the info.
605 info->si_signo = sig;
607 info->si_code = SI_USER;
613 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
614 kernel_siginfo_t *info, bool *resched_timer)
616 int sig = next_signal(pending, mask);
619 collect_signal(sig, pending, info, resched_timer);
624 * Dequeue a signal and return the element to the caller, which is
625 * expected to free it.
627 * All callers have to hold the siglock.
629 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
631 bool resched_timer = false;
634 /* We only dequeue private signals from ourselves, we don't let
635 * signalfd steal them
637 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
639 signr = __dequeue_signal(&tsk->signal->shared_pending,
640 mask, info, &resched_timer);
641 #ifdef CONFIG_POSIX_TIMERS
645 * itimers are process shared and we restart periodic
646 * itimers in the signal delivery path to prevent DoS
647 * attacks in the high resolution timer case. This is
648 * compliant with the old way of self-restarting
649 * itimers, as the SIGALRM is a legacy signal and only
650 * queued once. Changing the restart behaviour to
651 * restart the timer in the signal dequeue path is
652 * reducing the timer noise on heavy loaded !highres
655 if (unlikely(signr == SIGALRM)) {
656 struct hrtimer *tmr = &tsk->signal->real_timer;
658 if (!hrtimer_is_queued(tmr) &&
659 tsk->signal->it_real_incr != 0) {
660 hrtimer_forward(tmr, tmr->base->get_time(),
661 tsk->signal->it_real_incr);
662 hrtimer_restart(tmr);
672 if (unlikely(sig_kernel_stop(signr))) {
674 * Set a marker that we have dequeued a stop signal. Our
675 * caller might release the siglock and then the pending
676 * stop signal it is about to process is no longer in the
677 * pending bitmasks, but must still be cleared by a SIGCONT
678 * (and overruled by a SIGKILL). So those cases clear this
679 * shared flag after we've set it. Note that this flag may
680 * remain set after the signal we return is ignored or
681 * handled. That doesn't matter because its only purpose
682 * is to alert stop-signal processing code when another
683 * processor has come along and cleared the flag.
685 current->jobctl |= JOBCTL_STOP_DEQUEUED;
687 #ifdef CONFIG_POSIX_TIMERS
690 * Release the siglock to ensure proper locking order
691 * of timer locks outside of siglocks. Note, we leave
692 * irqs disabled here, since the posix-timers code is
693 * about to disable them again anyway.
695 spin_unlock(&tsk->sighand->siglock);
696 posixtimer_rearm(info);
697 spin_lock(&tsk->sighand->siglock);
699 /* Don't expose the si_sys_private value to userspace */
700 info->si_sys_private = 0;
705 EXPORT_SYMBOL_GPL(dequeue_signal);
707 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
709 struct task_struct *tsk = current;
710 struct sigpending *pending = &tsk->pending;
711 struct sigqueue *q, *sync = NULL;
714 * Might a synchronous signal be in the queue?
716 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
720 * Return the first synchronous signal in the queue.
722 list_for_each_entry(q, &pending->list, list) {
723 /* Synchronous signals have a positive si_code */
724 if ((q->info.si_code > SI_USER) &&
725 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
733 * Check if there is another siginfo for the same signal.
735 list_for_each_entry_continue(q, &pending->list, list) {
736 if (q->info.si_signo == sync->info.si_signo)
740 sigdelset(&pending->signal, sync->info.si_signo);
743 list_del_init(&sync->list);
744 copy_siginfo(info, &sync->info);
745 __sigqueue_free(sync);
746 return info->si_signo;
750 * Tell a process that it has a new active signal..
752 * NOTE! we rely on the previous spin_lock to
753 * lock interrupts for us! We can only be called with
754 * "siglock" held, and the local interrupt must
755 * have been disabled when that got acquired!
757 * No need to set need_resched since signal event passing
758 * goes through ->blocked
760 void signal_wake_up_state(struct task_struct *t, unsigned int state)
762 set_tsk_thread_flag(t, TIF_SIGPENDING);
764 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
765 * case. We don't check t->state here because there is a race with it
766 * executing another processor and just now entering stopped state.
767 * By using wake_up_state, we ensure the process will wake up and
768 * handle its death signal.
770 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
775 * Remove signals in mask from the pending set and queue.
776 * Returns 1 if any signals were found.
778 * All callers must be holding the siglock.
780 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
782 struct sigqueue *q, *n;
785 sigandsets(&m, mask, &s->signal);
786 if (sigisemptyset(&m))
789 sigandnsets(&s->signal, &s->signal, mask);
790 list_for_each_entry_safe(q, n, &s->list, list) {
791 if (sigismember(mask, q->info.si_signo)) {
792 list_del_init(&q->list);
798 static inline int is_si_special(const struct kernel_siginfo *info)
800 return info <= SEND_SIG_PRIV;
803 static inline bool si_fromuser(const struct kernel_siginfo *info)
805 return info == SEND_SIG_NOINFO ||
806 (!is_si_special(info) && SI_FROMUSER(info));
810 * called with RCU read lock from check_kill_permission()
812 static bool kill_ok_by_cred(struct task_struct *t)
814 const struct cred *cred = current_cred();
815 const struct cred *tcred = __task_cred(t);
817 return uid_eq(cred->euid, tcred->suid) ||
818 uid_eq(cred->euid, tcred->uid) ||
819 uid_eq(cred->uid, tcred->suid) ||
820 uid_eq(cred->uid, tcred->uid) ||
821 ns_capable(tcred->user_ns, CAP_KILL);
825 * Bad permissions for sending the signal
826 * - the caller must hold the RCU read lock
828 static int check_kill_permission(int sig, struct kernel_siginfo *info,
829 struct task_struct *t)
834 if (!valid_signal(sig))
837 if (!si_fromuser(info))
840 error = audit_signal_info(sig, t); /* Let audit system see the signal */
844 if (!same_thread_group(current, t) &&
845 !kill_ok_by_cred(t)) {
848 sid = task_session(t);
850 * We don't return the error if sid == NULL. The
851 * task was unhashed, the caller must notice this.
853 if (!sid || sid == task_session(current))
861 return security_task_kill(t, info, sig, NULL);
865 * ptrace_trap_notify - schedule trap to notify ptracer
866 * @t: tracee wanting to notify tracer
868 * This function schedules sticky ptrace trap which is cleared on the next
869 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
872 * If @t is running, STOP trap will be taken. If trapped for STOP and
873 * ptracer is listening for events, tracee is woken up so that it can
874 * re-trap for the new event. If trapped otherwise, STOP trap will be
875 * eventually taken without returning to userland after the existing traps
876 * are finished by PTRACE_CONT.
879 * Must be called with @task->sighand->siglock held.
881 static void ptrace_trap_notify(struct task_struct *t)
883 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
884 assert_spin_locked(&t->sighand->siglock);
886 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
887 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
891 * Handle magic process-wide effects of stop/continue signals. Unlike
892 * the signal actions, these happen immediately at signal-generation
893 * time regardless of blocking, ignoring, or handling. This does the
894 * actual continuing for SIGCONT, but not the actual stopping for stop
895 * signals. The process stop is done as a signal action for SIG_DFL.
897 * Returns true if the signal should be actually delivered, otherwise
898 * it should be dropped.
900 static bool prepare_signal(int sig, struct task_struct *p, bool force)
902 struct signal_struct *signal = p->signal;
903 struct task_struct *t;
906 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
907 if (!(signal->flags & SIGNAL_GROUP_EXIT))
908 return sig == SIGKILL;
910 * The process is in the middle of dying, nothing to do.
912 } else if (sig_kernel_stop(sig)) {
914 * This is a stop signal. Remove SIGCONT from all queues.
916 siginitset(&flush, sigmask(SIGCONT));
917 flush_sigqueue_mask(&flush, &signal->shared_pending);
918 for_each_thread(p, t)
919 flush_sigqueue_mask(&flush, &t->pending);
920 } else if (sig == SIGCONT) {
923 * Remove all stop signals from all queues, wake all threads.
925 siginitset(&flush, SIG_KERNEL_STOP_MASK);
926 flush_sigqueue_mask(&flush, &signal->shared_pending);
927 for_each_thread(p, t) {
928 flush_sigqueue_mask(&flush, &t->pending);
929 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
930 if (likely(!(t->ptrace & PT_SEIZED)))
931 wake_up_state(t, __TASK_STOPPED);
933 ptrace_trap_notify(t);
937 * Notify the parent with CLD_CONTINUED if we were stopped.
939 * If we were in the middle of a group stop, we pretend it
940 * was already finished, and then continued. Since SIGCHLD
941 * doesn't queue we report only CLD_STOPPED, as if the next
942 * CLD_CONTINUED was dropped.
945 if (signal->flags & SIGNAL_STOP_STOPPED)
946 why |= SIGNAL_CLD_CONTINUED;
947 else if (signal->group_stop_count)
948 why |= SIGNAL_CLD_STOPPED;
952 * The first thread which returns from do_signal_stop()
953 * will take ->siglock, notice SIGNAL_CLD_MASK, and
954 * notify its parent. See get_signal().
956 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
957 signal->group_stop_count = 0;
958 signal->group_exit_code = 0;
962 return !sig_ignored(p, sig, force);
966 * Test if P wants to take SIG. After we've checked all threads with this,
967 * it's equivalent to finding no threads not blocking SIG. Any threads not
968 * blocking SIG were ruled out because they are not running and already
969 * have pending signals. Such threads will dequeue from the shared queue
970 * as soon as they're available, so putting the signal on the shared queue
971 * will be equivalent to sending it to one such thread.
973 static inline bool wants_signal(int sig, struct task_struct *p)
975 if (sigismember(&p->blocked, sig))
978 if (p->flags & PF_EXITING)
984 if (task_is_stopped_or_traced(p))
987 return task_curr(p) || !task_sigpending(p);
990 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
992 struct signal_struct *signal = p->signal;
993 struct task_struct *t;
996 * Now find a thread we can wake up to take the signal off the queue.
998 * If the main thread wants the signal, it gets first crack.
999 * Probably the least surprising to the average bear.
1001 if (wants_signal(sig, p))
1003 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1005 * There is just one thread and it does not need to be woken.
1006 * It will dequeue unblocked signals before it runs again.
1011 * Otherwise try to find a suitable thread.
1013 t = signal->curr_target;
1014 while (!wants_signal(sig, t)) {
1016 if (t == signal->curr_target)
1018 * No thread needs to be woken.
1019 * Any eligible threads will see
1020 * the signal in the queue soon.
1024 signal->curr_target = t;
1028 * Found a killable thread. If the signal will be fatal,
1029 * then start taking the whole group down immediately.
1031 if (sig_fatal(p, sig) &&
1032 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1033 !sigismember(&t->real_blocked, sig) &&
1034 (sig == SIGKILL || !p->ptrace)) {
1036 * This signal will be fatal to the whole group.
1038 if (!sig_kernel_coredump(sig)) {
1040 * Start a group exit and wake everybody up.
1041 * This way we don't have other threads
1042 * running and doing things after a slower
1043 * thread has the fatal signal pending.
1045 signal->flags = SIGNAL_GROUP_EXIT;
1046 signal->group_exit_code = sig;
1047 signal->group_stop_count = 0;
1050 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1051 sigaddset(&t->pending.signal, SIGKILL);
1052 signal_wake_up(t, 1);
1053 } while_each_thread(p, t);
1059 * The signal is already in the shared-pending queue.
1060 * Tell the chosen thread to wake up and dequeue it.
1062 signal_wake_up(t, sig == SIGKILL);
1066 static inline bool legacy_queue(struct sigpending *signals, int sig)
1068 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1072 enum pid_type type, bool force)
1074 struct sigpending *pending;
1076 int override_rlimit;
1077 int ret = 0, result;
1079 assert_spin_locked(&t->sighand->siglock);
1081 result = TRACE_SIGNAL_IGNORED;
1082 if (!prepare_signal(sig, t, force))
1085 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1087 * Short-circuit ignored signals and support queuing
1088 * exactly one non-rt signal, so that we can get more
1089 * detailed information about the cause of the signal.
1091 result = TRACE_SIGNAL_ALREADY_PENDING;
1092 if (legacy_queue(pending, sig))
1095 result = TRACE_SIGNAL_DELIVERED;
1097 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1099 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1103 * Real-time signals must be queued if sent by sigqueue, or
1104 * some other real-time mechanism. It is implementation
1105 * defined whether kill() does so. We attempt to do so, on
1106 * the principle of least surprise, but since kill is not
1107 * allowed to fail with EAGAIN when low on memory we just
1108 * make sure at least one signal gets delivered and don't
1109 * pass on the info struct.
1112 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1114 override_rlimit = 0;
1116 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119 list_add_tail(&q->list, &pending->list);
1120 switch ((unsigned long) info) {
1121 case (unsigned long) SEND_SIG_NOINFO:
1122 clear_siginfo(&q->info);
1123 q->info.si_signo = sig;
1124 q->info.si_errno = 0;
1125 q->info.si_code = SI_USER;
1126 q->info.si_pid = task_tgid_nr_ns(current,
1127 task_active_pid_ns(t));
1130 from_kuid_munged(task_cred_xxx(t, user_ns),
1134 case (unsigned long) SEND_SIG_PRIV:
1135 clear_siginfo(&q->info);
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_KERNEL;
1143 copy_siginfo(&q->info, info);
1146 } else if (!is_si_special(info) &&
1147 sig >= SIGRTMIN && info->si_code != SI_USER) {
1149 * Queue overflow, abort. We may abort if the
1150 * signal was rt and sent by user using something
1151 * other than kill().
1153 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1158 * This is a silent loss of information. We still
1159 * send the signal, but the *info bits are lost.
1161 result = TRACE_SIGNAL_LOSE_INFO;
1165 signalfd_notify(t, sig);
1166 sigaddset(&pending->signal, sig);
1168 /* Let multiprocess signals appear after on-going forks */
1169 if (type > PIDTYPE_TGID) {
1170 struct multiprocess_signals *delayed;
1171 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1172 sigset_t *signal = &delayed->signal;
1173 /* Can't queue both a stop and a continue signal */
1175 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1176 else if (sig_kernel_stop(sig))
1177 sigdelset(signal, SIGCONT);
1178 sigaddset(signal, sig);
1182 complete_signal(sig, t, type);
1184 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1188 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191 switch (siginfo_layout(info->si_signo, info->si_code)) {
1200 case SIL_FAULT_TRAPNO:
1201 case SIL_FAULT_MCEERR:
1202 case SIL_FAULT_BNDERR:
1203 case SIL_FAULT_PKUERR:
1204 case SIL_FAULT_PERF_EVENT:
1212 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1215 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218 if (info == SEND_SIG_NOINFO) {
1219 /* Force if sent from an ancestor pid namespace */
1220 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1221 } else if (info == SEND_SIG_PRIV) {
1222 /* Don't ignore kernel generated signals */
1224 } else if (has_si_pid_and_uid(info)) {
1225 /* SIGKILL and SIGSTOP is special or has ids */
1226 struct user_namespace *t_user_ns;
1229 t_user_ns = task_cred_xxx(t, user_ns);
1230 if (current_user_ns() != t_user_ns) {
1231 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1232 info->si_uid = from_kuid_munged(t_user_ns, uid);
1236 /* A kernel generated signal? */
1237 force = (info->si_code == SI_KERNEL);
1239 /* From an ancestor pid namespace? */
1240 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1245 return __send_signal(sig, info, t, type, force);
1248 static void print_fatal_signal(int signr)
1250 struct pt_regs *regs = signal_pt_regs();
1251 pr_info("potentially unexpected fatal signal %d.\n", signr);
1253 #if defined(__i386__) && !defined(__arch_um__)
1254 pr_info("code at %08lx: ", regs->ip);
1257 for (i = 0; i < 16; i++) {
1260 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1262 pr_cont("%02x ", insn);
1272 static int __init setup_print_fatal_signals(char *str)
1274 get_option (&str, &print_fatal_signals);
1279 __setup("print-fatal-signals=", setup_print_fatal_signals);
1282 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1284 return send_signal(sig, info, p, PIDTYPE_TGID);
1287 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1290 unsigned long flags;
1293 if (lock_task_sighand(p, &flags)) {
1294 ret = send_signal(sig, info, p, type);
1295 unlock_task_sighand(p, &flags);
1302 HANDLER_CURRENT, /* If reachable use the current handler */
1303 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1304 HANDLER_EXIT, /* Only visible as the process exit code */
1308 * Force a signal that the process can't ignore: if necessary
1309 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1311 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1312 * since we do not want to have a signal handler that was blocked
1313 * be invoked when user space had explicitly blocked it.
1315 * We don't want to have recursive SIGSEGV's etc, for example,
1316 * that is why we also clear SIGNAL_UNKILLABLE.
1319 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1320 enum sig_handler handler)
1322 unsigned long int flags;
1323 int ret, blocked, ignored;
1324 struct k_sigaction *action;
1325 int sig = info->si_signo;
1327 spin_lock_irqsave(&t->sighand->siglock, flags);
1328 action = &t->sighand->action[sig-1];
1329 ignored = action->sa.sa_handler == SIG_IGN;
1330 blocked = sigismember(&t->blocked, sig);
1331 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1332 action->sa.sa_handler = SIG_DFL;
1333 if (handler == HANDLER_EXIT)
1334 action->sa.sa_flags |= SA_IMMUTABLE;
1336 sigdelset(&t->blocked, sig);
1337 recalc_sigpending_and_wake(t);
1341 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1342 * debugging to leave init killable.
1344 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1345 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1346 ret = send_signal(sig, info, t, PIDTYPE_PID);
1347 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1352 int force_sig_info(struct kernel_siginfo *info)
1354 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1358 * Nuke all other threads in the group.
1360 int zap_other_threads(struct task_struct *p)
1362 struct task_struct *t = p;
1365 p->signal->group_stop_count = 0;
1367 while_each_thread(p, t) {
1368 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1371 /* Don't bother with already dead threads */
1374 sigaddset(&t->pending.signal, SIGKILL);
1375 signal_wake_up(t, 1);
1381 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1382 unsigned long *flags)
1384 struct sighand_struct *sighand;
1388 sighand = rcu_dereference(tsk->sighand);
1389 if (unlikely(sighand == NULL))
1393 * This sighand can be already freed and even reused, but
1394 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1395 * initializes ->siglock: this slab can't go away, it has
1396 * the same object type, ->siglock can't be reinitialized.
1398 * We need to ensure that tsk->sighand is still the same
1399 * after we take the lock, we can race with de_thread() or
1400 * __exit_signal(). In the latter case the next iteration
1401 * must see ->sighand == NULL.
1403 spin_lock_irqsave(&sighand->siglock, *flags);
1404 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1406 spin_unlock_irqrestore(&sighand->siglock, *flags);
1413 #ifdef CONFIG_LOCKDEP
1414 void lockdep_assert_task_sighand_held(struct task_struct *task)
1416 struct sighand_struct *sighand;
1419 sighand = rcu_dereference(task->sighand);
1421 lockdep_assert_held(&sighand->siglock);
1429 * send signal info to all the members of a group
1431 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1432 struct task_struct *p, enum pid_type type)
1437 ret = check_kill_permission(sig, info, p);
1441 ret = do_send_sig_info(sig, info, p, type);
1447 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1448 * control characters do (^C, ^Z etc)
1449 * - the caller must hold at least a readlock on tasklist_lock
1451 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1453 struct task_struct *p = NULL;
1454 int retval, success;
1458 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1459 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1462 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1463 return success ? 0 : retval;
1466 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1469 struct task_struct *p;
1473 p = pid_task(pid, PIDTYPE_PID);
1475 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1477 if (likely(!p || error != -ESRCH))
1481 * The task was unhashed in between, try again. If it
1482 * is dead, pid_task() will return NULL, if we race with
1483 * de_thread() it will find the new leader.
1488 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492 error = kill_pid_info(sig, info, find_vpid(pid));
1497 static inline bool kill_as_cred_perm(const struct cred *cred,
1498 struct task_struct *target)
1500 const struct cred *pcred = __task_cred(target);
1502 return uid_eq(cred->euid, pcred->suid) ||
1503 uid_eq(cred->euid, pcred->uid) ||
1504 uid_eq(cred->uid, pcred->suid) ||
1505 uid_eq(cred->uid, pcred->uid);
1509 * The usb asyncio usage of siginfo is wrong. The glibc support
1510 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1511 * AKA after the generic fields:
1512 * kernel_pid_t si_pid;
1513 * kernel_uid32_t si_uid;
1514 * sigval_t si_value;
1516 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1517 * after the generic fields is:
1518 * void __user *si_addr;
1520 * This is a practical problem when there is a 64bit big endian kernel
1521 * and a 32bit userspace. As the 32bit address will encoded in the low
1522 * 32bits of the pointer. Those low 32bits will be stored at higher
1523 * address than appear in a 32 bit pointer. So userspace will not
1524 * see the address it was expecting for it's completions.
1526 * There is nothing in the encoding that can allow
1527 * copy_siginfo_to_user32 to detect this confusion of formats, so
1528 * handle this by requiring the caller of kill_pid_usb_asyncio to
1529 * notice when this situration takes place and to store the 32bit
1530 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1533 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1534 struct pid *pid, const struct cred *cred)
1536 struct kernel_siginfo info;
1537 struct task_struct *p;
1538 unsigned long flags;
1541 if (!valid_signal(sig))
1544 clear_siginfo(&info);
1545 info.si_signo = sig;
1546 info.si_errno = errno;
1547 info.si_code = SI_ASYNCIO;
1548 *((sigval_t *)&info.si_pid) = addr;
1551 p = pid_task(pid, PIDTYPE_PID);
1556 if (!kill_as_cred_perm(cred, p)) {
1560 ret = security_task_kill(p, &info, sig, cred);
1565 if (lock_task_sighand(p, &flags)) {
1566 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1567 unlock_task_sighand(p, &flags);
1575 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1578 * kill_something_info() interprets pid in interesting ways just like kill(2).
1580 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1581 * is probably wrong. Should make it like BSD or SYSV.
1584 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1589 return kill_proc_info(sig, info, pid);
1591 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1595 read_lock(&tasklist_lock);
1597 ret = __kill_pgrp_info(sig, info,
1598 pid ? find_vpid(-pid) : task_pgrp(current));
1600 int retval = 0, count = 0;
1601 struct task_struct * p;
1603 for_each_process(p) {
1604 if (task_pid_vnr(p) > 1 &&
1605 !same_thread_group(p, current)) {
1606 int err = group_send_sig_info(sig, info, p,
1613 ret = count ? retval : -ESRCH;
1615 read_unlock(&tasklist_lock);
1621 * These are for backward compatibility with the rest of the kernel source.
1624 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1627 * Make sure legacy kernel users don't send in bad values
1628 * (normal paths check this in check_kill_permission).
1630 if (!valid_signal(sig))
1633 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1635 EXPORT_SYMBOL(send_sig_info);
1637 #define __si_special(priv) \
1638 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1641 send_sig(int sig, struct task_struct *p, int priv)
1643 return send_sig_info(sig, __si_special(priv), p);
1645 EXPORT_SYMBOL(send_sig);
1647 void force_sig(int sig)
1649 struct kernel_siginfo info;
1651 clear_siginfo(&info);
1652 info.si_signo = sig;
1654 info.si_code = SI_KERNEL;
1657 force_sig_info(&info);
1659 EXPORT_SYMBOL(force_sig);
1661 void force_fatal_sig(int sig)
1663 struct kernel_siginfo info;
1665 clear_siginfo(&info);
1666 info.si_signo = sig;
1668 info.si_code = SI_KERNEL;
1671 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1674 void force_exit_sig(int sig)
1676 struct kernel_siginfo info;
1678 clear_siginfo(&info);
1679 info.si_signo = sig;
1681 info.si_code = SI_KERNEL;
1684 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1688 * When things go south during signal handling, we
1689 * will force a SIGSEGV. And if the signal that caused
1690 * the problem was already a SIGSEGV, we'll want to
1691 * make sure we don't even try to deliver the signal..
1693 void force_sigsegv(int sig)
1696 force_fatal_sig(SIGSEGV);
1701 int force_sig_fault_to_task(int sig, int code, void __user *addr
1702 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1703 , struct task_struct *t)
1705 struct kernel_siginfo info;
1707 clear_siginfo(&info);
1708 info.si_signo = sig;
1710 info.si_code = code;
1711 info.si_addr = addr;
1714 info.si_flags = flags;
1717 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1720 int force_sig_fault(int sig, int code, void __user *addr
1721 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1723 return force_sig_fault_to_task(sig, code, addr
1724 ___ARCH_SI_IA64(imm, flags, isr), current);
1727 int send_sig_fault(int sig, int code, void __user *addr
1728 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1729 , struct task_struct *t)
1731 struct kernel_siginfo info;
1733 clear_siginfo(&info);
1734 info.si_signo = sig;
1736 info.si_code = code;
1737 info.si_addr = addr;
1740 info.si_flags = flags;
1743 return send_sig_info(info.si_signo, &info, t);
1746 int force_sig_mceerr(int code, void __user *addr, short lsb)
1748 struct kernel_siginfo info;
1750 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751 clear_siginfo(&info);
1752 info.si_signo = SIGBUS;
1754 info.si_code = code;
1755 info.si_addr = addr;
1756 info.si_addr_lsb = lsb;
1757 return force_sig_info(&info);
1760 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1762 struct kernel_siginfo info;
1764 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765 clear_siginfo(&info);
1766 info.si_signo = SIGBUS;
1768 info.si_code = code;
1769 info.si_addr = addr;
1770 info.si_addr_lsb = lsb;
1771 return send_sig_info(info.si_signo, &info, t);
1773 EXPORT_SYMBOL(send_sig_mceerr);
1775 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1777 struct kernel_siginfo info;
1779 clear_siginfo(&info);
1780 info.si_signo = SIGSEGV;
1782 info.si_code = SEGV_BNDERR;
1783 info.si_addr = addr;
1784 info.si_lower = lower;
1785 info.si_upper = upper;
1786 return force_sig_info(&info);
1790 int force_sig_pkuerr(void __user *addr, u32 pkey)
1792 struct kernel_siginfo info;
1794 clear_siginfo(&info);
1795 info.si_signo = SIGSEGV;
1797 info.si_code = SEGV_PKUERR;
1798 info.si_addr = addr;
1799 info.si_pkey = pkey;
1800 return force_sig_info(&info);
1804 int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1806 struct kernel_siginfo info;
1808 clear_siginfo(&info);
1809 info.si_signo = SIGTRAP;
1811 info.si_code = TRAP_PERF;
1812 info.si_addr = addr;
1813 info.si_perf_data = sig_data;
1814 info.si_perf_type = type;
1816 return force_sig_info(&info);
1820 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1821 * @syscall: syscall number to send to userland
1822 * @reason: filter-supplied reason code to send to userland (via si_errno)
1824 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1826 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1828 struct kernel_siginfo info;
1830 clear_siginfo(&info);
1831 info.si_signo = SIGSYS;
1832 info.si_code = SYS_SECCOMP;
1833 info.si_call_addr = (void __user *)KSTK_EIP(current);
1834 info.si_errno = reason;
1835 info.si_arch = syscall_get_arch(current);
1836 info.si_syscall = syscall;
1837 return force_sig_info_to_task(&info, current,
1838 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1841 /* For the crazy architectures that include trap information in
1842 * the errno field, instead of an actual errno value.
1844 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1846 struct kernel_siginfo info;
1848 clear_siginfo(&info);
1849 info.si_signo = SIGTRAP;
1850 info.si_errno = errno;
1851 info.si_code = TRAP_HWBKPT;
1852 info.si_addr = addr;
1853 return force_sig_info(&info);
1856 /* For the rare architectures that include trap information using
1859 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1861 struct kernel_siginfo info;
1863 clear_siginfo(&info);
1864 info.si_signo = sig;
1866 info.si_code = code;
1867 info.si_addr = addr;
1868 info.si_trapno = trapno;
1869 return force_sig_info(&info);
1872 /* For the rare architectures that include trap information using
1875 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1876 struct task_struct *t)
1878 struct kernel_siginfo info;
1880 clear_siginfo(&info);
1881 info.si_signo = sig;
1883 info.si_code = code;
1884 info.si_addr = addr;
1885 info.si_trapno = trapno;
1886 return send_sig_info(info.si_signo, &info, t);
1889 int kill_pgrp(struct pid *pid, int sig, int priv)
1893 read_lock(&tasklist_lock);
1894 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1895 read_unlock(&tasklist_lock);
1899 EXPORT_SYMBOL(kill_pgrp);
1901 int kill_pid(struct pid *pid, int sig, int priv)
1903 return kill_pid_info(sig, __si_special(priv), pid);
1905 EXPORT_SYMBOL(kill_pid);
1908 * These functions support sending signals using preallocated sigqueue
1909 * structures. This is needed "because realtime applications cannot
1910 * afford to lose notifications of asynchronous events, like timer
1911 * expirations or I/O completions". In the case of POSIX Timers
1912 * we allocate the sigqueue structure from the timer_create. If this
1913 * allocation fails we are able to report the failure to the application
1914 * with an EAGAIN error.
1916 struct sigqueue *sigqueue_alloc(void)
1918 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1921 void sigqueue_free(struct sigqueue *q)
1923 unsigned long flags;
1924 spinlock_t *lock = ¤t->sighand->siglock;
1926 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1928 * We must hold ->siglock while testing q->list
1929 * to serialize with collect_signal() or with
1930 * __exit_signal()->flush_sigqueue().
1932 spin_lock_irqsave(lock, flags);
1933 q->flags &= ~SIGQUEUE_PREALLOC;
1935 * If it is queued it will be freed when dequeued,
1936 * like the "regular" sigqueue.
1938 if (!list_empty(&q->list))
1940 spin_unlock_irqrestore(lock, flags);
1946 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1948 int sig = q->info.si_signo;
1949 struct sigpending *pending;
1950 struct task_struct *t;
1951 unsigned long flags;
1954 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1958 t = pid_task(pid, type);
1959 if (!t || !likely(lock_task_sighand(t, &flags)))
1962 ret = 1; /* the signal is ignored */
1963 result = TRACE_SIGNAL_IGNORED;
1964 if (!prepare_signal(sig, t, false))
1968 if (unlikely(!list_empty(&q->list))) {
1970 * If an SI_TIMER entry is already queue just increment
1971 * the overrun count.
1973 BUG_ON(q->info.si_code != SI_TIMER);
1974 q->info.si_overrun++;
1975 result = TRACE_SIGNAL_ALREADY_PENDING;
1978 q->info.si_overrun = 0;
1980 signalfd_notify(t, sig);
1981 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1982 list_add_tail(&q->list, &pending->list);
1983 sigaddset(&pending->signal, sig);
1984 complete_signal(sig, t, type);
1985 result = TRACE_SIGNAL_DELIVERED;
1987 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1988 unlock_task_sighand(t, &flags);
1994 static void do_notify_pidfd(struct task_struct *task)
1998 WARN_ON(task->exit_state == 0);
1999 pid = task_pid(task);
2000 wake_up_all(&pid->wait_pidfd);
2004 * Let a parent know about the death of a child.
2005 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2007 * Returns true if our parent ignored us and so we've switched to
2010 bool do_notify_parent(struct task_struct *tsk, int sig)
2012 struct kernel_siginfo info;
2013 unsigned long flags;
2014 struct sighand_struct *psig;
2015 bool autoreap = false;
2020 /* do_notify_parent_cldstop should have been called instead. */
2021 BUG_ON(task_is_stopped_or_traced(tsk));
2023 BUG_ON(!tsk->ptrace &&
2024 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2026 /* Wake up all pidfd waiters */
2027 do_notify_pidfd(tsk);
2029 if (sig != SIGCHLD) {
2031 * This is only possible if parent == real_parent.
2032 * Check if it has changed security domain.
2034 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2038 clear_siginfo(&info);
2039 info.si_signo = sig;
2042 * We are under tasklist_lock here so our parent is tied to
2043 * us and cannot change.
2045 * task_active_pid_ns will always return the same pid namespace
2046 * until a task passes through release_task.
2048 * write_lock() currently calls preempt_disable() which is the
2049 * same as rcu_read_lock(), but according to Oleg, this is not
2050 * correct to rely on this
2053 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2054 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2058 task_cputime(tsk, &utime, &stime);
2059 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2060 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2062 info.si_status = tsk->exit_code & 0x7f;
2063 if (tsk->exit_code & 0x80)
2064 info.si_code = CLD_DUMPED;
2065 else if (tsk->exit_code & 0x7f)
2066 info.si_code = CLD_KILLED;
2068 info.si_code = CLD_EXITED;
2069 info.si_status = tsk->exit_code >> 8;
2072 psig = tsk->parent->sighand;
2073 spin_lock_irqsave(&psig->siglock, flags);
2074 if (!tsk->ptrace && sig == SIGCHLD &&
2075 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2076 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2078 * We are exiting and our parent doesn't care. POSIX.1
2079 * defines special semantics for setting SIGCHLD to SIG_IGN
2080 * or setting the SA_NOCLDWAIT flag: we should be reaped
2081 * automatically and not left for our parent's wait4 call.
2082 * Rather than having the parent do it as a magic kind of
2083 * signal handler, we just set this to tell do_exit that we
2084 * can be cleaned up without becoming a zombie. Note that
2085 * we still call __wake_up_parent in this case, because a
2086 * blocked sys_wait4 might now return -ECHILD.
2088 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2089 * is implementation-defined: we do (if you don't want
2090 * it, just use SIG_IGN instead).
2093 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2097 * Send with __send_signal as si_pid and si_uid are in the
2098 * parent's namespaces.
2100 if (valid_signal(sig) && sig)
2101 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2102 __wake_up_parent(tsk, tsk->parent);
2103 spin_unlock_irqrestore(&psig->siglock, flags);
2109 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2110 * @tsk: task reporting the state change
2111 * @for_ptracer: the notification is for ptracer
2112 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2114 * Notify @tsk's parent that the stopped/continued state has changed. If
2115 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2116 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2119 * Must be called with tasklist_lock at least read locked.
2121 static void do_notify_parent_cldstop(struct task_struct *tsk,
2122 bool for_ptracer, int why)
2124 struct kernel_siginfo info;
2125 unsigned long flags;
2126 struct task_struct *parent;
2127 struct sighand_struct *sighand;
2131 parent = tsk->parent;
2133 tsk = tsk->group_leader;
2134 parent = tsk->real_parent;
2137 clear_siginfo(&info);
2138 info.si_signo = SIGCHLD;
2141 * see comment in do_notify_parent() about the following 4 lines
2144 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2145 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2148 task_cputime(tsk, &utime, &stime);
2149 info.si_utime = nsec_to_clock_t(utime);
2150 info.si_stime = nsec_to_clock_t(stime);
2155 info.si_status = SIGCONT;
2158 info.si_status = tsk->signal->group_exit_code & 0x7f;
2161 info.si_status = tsk->exit_code & 0x7f;
2167 sighand = parent->sighand;
2168 spin_lock_irqsave(&sighand->siglock, flags);
2169 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2170 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2171 __group_send_sig_info(SIGCHLD, &info, parent);
2173 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2175 __wake_up_parent(tsk, parent);
2176 spin_unlock_irqrestore(&sighand->siglock, flags);
2179 static inline bool may_ptrace_stop(void)
2181 if (!likely(current->ptrace))
2184 * Are we in the middle of do_coredump?
2185 * If so and our tracer is also part of the coredump stopping
2186 * is a deadlock situation, and pointless because our tracer
2187 * is dead so don't allow us to stop.
2188 * If SIGKILL was already sent before the caller unlocked
2189 * ->siglock we must see ->core_state != NULL. Otherwise it
2190 * is safe to enter schedule().
2192 * This is almost outdated, a task with the pending SIGKILL can't
2193 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2194 * after SIGKILL was already dequeued.
2196 if (unlikely(current->mm->core_state) &&
2197 unlikely(current->mm == current->parent->mm))
2205 * This must be called with current->sighand->siglock held.
2207 * This should be the path for all ptrace stops.
2208 * We always set current->last_siginfo while stopped here.
2209 * That makes it a way to test a stopped process for
2210 * being ptrace-stopped vs being job-control-stopped.
2212 * If we actually decide not to stop at all because the tracer
2213 * is gone, we keep current->exit_code unless clear_code.
2215 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2216 __releases(¤t->sighand->siglock)
2217 __acquires(¤t->sighand->siglock)
2219 bool gstop_done = false;
2221 if (arch_ptrace_stop_needed(exit_code, info)) {
2223 * The arch code has something special to do before a
2224 * ptrace stop. This is allowed to block, e.g. for faults
2225 * on user stack pages. We can't keep the siglock while
2226 * calling arch_ptrace_stop, so we must release it now.
2227 * To preserve proper semantics, we must do this before
2228 * any signal bookkeeping like checking group_stop_count.
2230 spin_unlock_irq(¤t->sighand->siglock);
2231 arch_ptrace_stop(exit_code, info);
2232 spin_lock_irq(¤t->sighand->siglock);
2236 * schedule() will not sleep if there is a pending signal that
2237 * can awaken the task.
2239 set_special_state(TASK_TRACED);
2242 * We're committing to trapping. TRACED should be visible before
2243 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2244 * Also, transition to TRACED and updates to ->jobctl should be
2245 * atomic with respect to siglock and should be done after the arch
2246 * hook as siglock is released and regrabbed across it.
2251 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2253 * set_current_state() smp_wmb();
2255 * wait_task_stopped()
2256 * task_stopped_code()
2257 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2261 current->last_siginfo = info;
2262 current->exit_code = exit_code;
2265 * If @why is CLD_STOPPED, we're trapping to participate in a group
2266 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2267 * across siglock relocks since INTERRUPT was scheduled, PENDING
2268 * could be clear now. We act as if SIGCONT is received after
2269 * TASK_TRACED is entered - ignore it.
2271 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2272 gstop_done = task_participate_group_stop(current);
2274 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2275 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2276 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2277 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2279 /* entering a trap, clear TRAPPING */
2280 task_clear_jobctl_trapping(current);
2282 spin_unlock_irq(¤t->sighand->siglock);
2283 read_lock(&tasklist_lock);
2284 if (may_ptrace_stop()) {
2286 * Notify parents of the stop.
2288 * While ptraced, there are two parents - the ptracer and
2289 * the real_parent of the group_leader. The ptracer should
2290 * know about every stop while the real parent is only
2291 * interested in the completion of group stop. The states
2292 * for the two don't interact with each other. Notify
2293 * separately unless they're gonna be duplicates.
2295 do_notify_parent_cldstop(current, true, why);
2296 if (gstop_done && ptrace_reparented(current))
2297 do_notify_parent_cldstop(current, false, why);
2300 * Don't want to allow preemption here, because
2301 * sys_ptrace() needs this task to be inactive.
2303 * XXX: implement read_unlock_no_resched().
2306 read_unlock(&tasklist_lock);
2307 cgroup_enter_frozen();
2308 preempt_enable_no_resched();
2309 freezable_schedule();
2310 cgroup_leave_frozen(true);
2313 * By the time we got the lock, our tracer went away.
2314 * Don't drop the lock yet, another tracer may come.
2316 * If @gstop_done, the ptracer went away between group stop
2317 * completion and here. During detach, it would have set
2318 * JOBCTL_STOP_PENDING on us and we'll re-enter
2319 * TASK_STOPPED in do_signal_stop() on return, so notifying
2320 * the real parent of the group stop completion is enough.
2323 do_notify_parent_cldstop(current, false, why);
2325 /* tasklist protects us from ptrace_freeze_traced() */
2326 __set_current_state(TASK_RUNNING);
2328 current->exit_code = 0;
2329 read_unlock(&tasklist_lock);
2333 * We are back. Now reacquire the siglock before touching
2334 * last_siginfo, so that we are sure to have synchronized with
2335 * any signal-sending on another CPU that wants to examine it.
2337 spin_lock_irq(¤t->sighand->siglock);
2338 current->last_siginfo = NULL;
2340 /* LISTENING can be set only during STOP traps, clear it */
2341 current->jobctl &= ~JOBCTL_LISTENING;
2344 * Queued signals ignored us while we were stopped for tracing.
2345 * So check for any that we should take before resuming user mode.
2346 * This sets TIF_SIGPENDING, but never clears it.
2348 recalc_sigpending_tsk(current);
2351 static void ptrace_do_notify(int signr, int exit_code, int why)
2353 kernel_siginfo_t info;
2355 clear_siginfo(&info);
2356 info.si_signo = signr;
2357 info.si_code = exit_code;
2358 info.si_pid = task_pid_vnr(current);
2359 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2361 /* Let the debugger run. */
2362 ptrace_stop(exit_code, why, 1, &info);
2365 void ptrace_notify(int exit_code)
2367 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2368 if (unlikely(current->task_works))
2371 spin_lock_irq(¤t->sighand->siglock);
2372 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2373 spin_unlock_irq(¤t->sighand->siglock);
2377 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2378 * @signr: signr causing group stop if initiating
2380 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2381 * and participate in it. If already set, participate in the existing
2382 * group stop. If participated in a group stop (and thus slept), %true is
2383 * returned with siglock released.
2385 * If ptraced, this function doesn't handle stop itself. Instead,
2386 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2387 * untouched. The caller must ensure that INTERRUPT trap handling takes
2388 * places afterwards.
2391 * Must be called with @current->sighand->siglock held, which is released
2395 * %false if group stop is already cancelled or ptrace trap is scheduled.
2396 * %true if participated in group stop.
2398 static bool do_signal_stop(int signr)
2399 __releases(¤t->sighand->siglock)
2401 struct signal_struct *sig = current->signal;
2403 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2404 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2405 struct task_struct *t;
2407 /* signr will be recorded in task->jobctl for retries */
2408 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2410 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2411 unlikely(signal_group_exit(sig)))
2414 * There is no group stop already in progress. We must
2417 * While ptraced, a task may be resumed while group stop is
2418 * still in effect and then receive a stop signal and
2419 * initiate another group stop. This deviates from the
2420 * usual behavior as two consecutive stop signals can't
2421 * cause two group stops when !ptraced. That is why we
2422 * also check !task_is_stopped(t) below.
2424 * The condition can be distinguished by testing whether
2425 * SIGNAL_STOP_STOPPED is already set. Don't generate
2426 * group_exit_code in such case.
2428 * This is not necessary for SIGNAL_STOP_CONTINUED because
2429 * an intervening stop signal is required to cause two
2430 * continued events regardless of ptrace.
2432 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2433 sig->group_exit_code = signr;
2435 sig->group_stop_count = 0;
2437 if (task_set_jobctl_pending(current, signr | gstop))
2438 sig->group_stop_count++;
2441 while_each_thread(current, t) {
2443 * Setting state to TASK_STOPPED for a group
2444 * stop is always done with the siglock held,
2445 * so this check has no races.
2447 if (!task_is_stopped(t) &&
2448 task_set_jobctl_pending(t, signr | gstop)) {
2449 sig->group_stop_count++;
2450 if (likely(!(t->ptrace & PT_SEIZED)))
2451 signal_wake_up(t, 0);
2453 ptrace_trap_notify(t);
2458 if (likely(!current->ptrace)) {
2462 * If there are no other threads in the group, or if there
2463 * is a group stop in progress and we are the last to stop,
2464 * report to the parent.
2466 if (task_participate_group_stop(current))
2467 notify = CLD_STOPPED;
2469 set_special_state(TASK_STOPPED);
2470 spin_unlock_irq(¤t->sighand->siglock);
2473 * Notify the parent of the group stop completion. Because
2474 * we're not holding either the siglock or tasklist_lock
2475 * here, ptracer may attach inbetween; however, this is for
2476 * group stop and should always be delivered to the real
2477 * parent of the group leader. The new ptracer will get
2478 * its notification when this task transitions into
2482 read_lock(&tasklist_lock);
2483 do_notify_parent_cldstop(current, false, notify);
2484 read_unlock(&tasklist_lock);
2487 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2488 cgroup_enter_frozen();
2489 freezable_schedule();
2493 * While ptraced, group stop is handled by STOP trap.
2494 * Schedule it and let the caller deal with it.
2496 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2502 * do_jobctl_trap - take care of ptrace jobctl traps
2504 * When PT_SEIZED, it's used for both group stop and explicit
2505 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2506 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2507 * the stop signal; otherwise, %SIGTRAP.
2509 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2510 * number as exit_code and no siginfo.
2513 * Must be called with @current->sighand->siglock held, which may be
2514 * released and re-acquired before returning with intervening sleep.
2516 static void do_jobctl_trap(void)
2518 struct signal_struct *signal = current->signal;
2519 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2521 if (current->ptrace & PT_SEIZED) {
2522 if (!signal->group_stop_count &&
2523 !(signal->flags & SIGNAL_STOP_STOPPED))
2525 WARN_ON_ONCE(!signr);
2526 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2529 WARN_ON_ONCE(!signr);
2530 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2531 current->exit_code = 0;
2536 * do_freezer_trap - handle the freezer jobctl trap
2538 * Puts the task into frozen state, if only the task is not about to quit.
2539 * In this case it drops JOBCTL_TRAP_FREEZE.
2542 * Must be called with @current->sighand->siglock held,
2543 * which is always released before returning.
2545 static void do_freezer_trap(void)
2546 __releases(¤t->sighand->siglock)
2549 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2550 * let's make another loop to give it a chance to be handled.
2551 * In any case, we'll return back.
2553 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2554 JOBCTL_TRAP_FREEZE) {
2555 spin_unlock_irq(¤t->sighand->siglock);
2560 * Now we're sure that there is no pending fatal signal and no
2561 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2562 * immediately (if there is a non-fatal signal pending), and
2563 * put the task into sleep.
2565 __set_current_state(TASK_INTERRUPTIBLE);
2566 clear_thread_flag(TIF_SIGPENDING);
2567 spin_unlock_irq(¤t->sighand->siglock);
2568 cgroup_enter_frozen();
2569 freezable_schedule();
2572 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2575 * We do not check sig_kernel_stop(signr) but set this marker
2576 * unconditionally because we do not know whether debugger will
2577 * change signr. This flag has no meaning unless we are going
2578 * to stop after return from ptrace_stop(). In this case it will
2579 * be checked in do_signal_stop(), we should only stop if it was
2580 * not cleared by SIGCONT while we were sleeping. See also the
2581 * comment in dequeue_signal().
2583 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2584 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2586 /* We're back. Did the debugger cancel the sig? */
2587 signr = current->exit_code;
2591 current->exit_code = 0;
2594 * Update the siginfo structure if the signal has
2595 * changed. If the debugger wanted something
2596 * specific in the siginfo structure then it should
2597 * have updated *info via PTRACE_SETSIGINFO.
2599 if (signr != info->si_signo) {
2600 clear_siginfo(info);
2601 info->si_signo = signr;
2603 info->si_code = SI_USER;
2605 info->si_pid = task_pid_vnr(current->parent);
2606 info->si_uid = from_kuid_munged(current_user_ns(),
2607 task_uid(current->parent));
2611 /* If the (new) signal is now blocked, requeue it. */
2612 if (sigismember(¤t->blocked, signr)) {
2613 send_signal(signr, info, current, PIDTYPE_PID);
2620 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2622 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2624 case SIL_FAULT_TRAPNO:
2625 case SIL_FAULT_MCEERR:
2626 case SIL_FAULT_BNDERR:
2627 case SIL_FAULT_PKUERR:
2628 case SIL_FAULT_PERF_EVENT:
2629 ksig->info.si_addr = arch_untagged_si_addr(
2630 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2642 bool get_signal(struct ksignal *ksig)
2644 struct sighand_struct *sighand = current->sighand;
2645 struct signal_struct *signal = current->signal;
2648 if (unlikely(current->task_works))
2652 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2653 * that the arch handlers don't all have to do it. If we get here
2654 * without TIF_SIGPENDING, just exit after running signal work.
2656 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2657 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2658 tracehook_notify_signal();
2659 if (!task_sigpending(current))
2663 if (unlikely(uprobe_deny_signal()))
2667 * Do this once, we can't return to user-mode if freezing() == T.
2668 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2669 * thus do not need another check after return.
2674 spin_lock_irq(&sighand->siglock);
2677 * Every stopped thread goes here after wakeup. Check to see if
2678 * we should notify the parent, prepare_signal(SIGCONT) encodes
2679 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2681 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2684 if (signal->flags & SIGNAL_CLD_CONTINUED)
2685 why = CLD_CONTINUED;
2689 signal->flags &= ~SIGNAL_CLD_MASK;
2691 spin_unlock_irq(&sighand->siglock);
2694 * Notify the parent that we're continuing. This event is
2695 * always per-process and doesn't make whole lot of sense
2696 * for ptracers, who shouldn't consume the state via
2697 * wait(2) either, but, for backward compatibility, notify
2698 * the ptracer of the group leader too unless it's gonna be
2701 read_lock(&tasklist_lock);
2702 do_notify_parent_cldstop(current, false, why);
2704 if (ptrace_reparented(current->group_leader))
2705 do_notify_parent_cldstop(current->group_leader,
2707 read_unlock(&tasklist_lock);
2712 /* Has this task already been marked for death? */
2713 if (signal_group_exit(signal)) {
2714 ksig->info.si_signo = signr = SIGKILL;
2715 sigdelset(¤t->pending.signal, SIGKILL);
2716 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2717 &sighand->action[SIGKILL - 1]);
2718 recalc_sigpending();
2723 struct k_sigaction *ka;
2725 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2729 if (unlikely(current->jobctl &
2730 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2731 if (current->jobctl & JOBCTL_TRAP_MASK) {
2733 spin_unlock_irq(&sighand->siglock);
2734 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2741 * If the task is leaving the frozen state, let's update
2742 * cgroup counters and reset the frozen bit.
2744 if (unlikely(cgroup_task_frozen(current))) {
2745 spin_unlock_irq(&sighand->siglock);
2746 cgroup_leave_frozen(false);
2751 * Signals generated by the execution of an instruction
2752 * need to be delivered before any other pending signals
2753 * so that the instruction pointer in the signal stack
2754 * frame points to the faulting instruction.
2756 signr = dequeue_synchronous_signal(&ksig->info);
2758 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2761 break; /* will return 0 */
2763 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2764 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2765 signr = ptrace_signal(signr, &ksig->info);
2770 ka = &sighand->action[signr-1];
2772 /* Trace actually delivered signals. */
2773 trace_signal_deliver(signr, &ksig->info, ka);
2775 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2777 if (ka->sa.sa_handler != SIG_DFL) {
2778 /* Run the handler. */
2781 if (ka->sa.sa_flags & SA_ONESHOT)
2782 ka->sa.sa_handler = SIG_DFL;
2784 break; /* will return non-zero "signr" value */
2788 * Now we are doing the default action for this signal.
2790 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2794 * Global init gets no signals it doesn't want.
2795 * Container-init gets no signals it doesn't want from same
2798 * Note that if global/container-init sees a sig_kernel_only()
2799 * signal here, the signal must have been generated internally
2800 * or must have come from an ancestor namespace. In either
2801 * case, the signal cannot be dropped.
2803 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2804 !sig_kernel_only(signr))
2807 if (sig_kernel_stop(signr)) {
2809 * The default action is to stop all threads in
2810 * the thread group. The job control signals
2811 * do nothing in an orphaned pgrp, but SIGSTOP
2812 * always works. Note that siglock needs to be
2813 * dropped during the call to is_orphaned_pgrp()
2814 * because of lock ordering with tasklist_lock.
2815 * This allows an intervening SIGCONT to be posted.
2816 * We need to check for that and bail out if necessary.
2818 if (signr != SIGSTOP) {
2819 spin_unlock_irq(&sighand->siglock);
2821 /* signals can be posted during this window */
2823 if (is_current_pgrp_orphaned())
2826 spin_lock_irq(&sighand->siglock);
2829 if (likely(do_signal_stop(ksig->info.si_signo))) {
2830 /* It released the siglock. */
2835 * We didn't actually stop, due to a race
2836 * with SIGCONT or something like that.
2842 spin_unlock_irq(&sighand->siglock);
2843 if (unlikely(cgroup_task_frozen(current)))
2844 cgroup_leave_frozen(true);
2847 * Anything else is fatal, maybe with a core dump.
2849 current->flags |= PF_SIGNALED;
2851 if (sig_kernel_coredump(signr)) {
2852 if (print_fatal_signals)
2853 print_fatal_signal(ksig->info.si_signo);
2854 proc_coredump_connector(current);
2856 * If it was able to dump core, this kills all
2857 * other threads in the group and synchronizes with
2858 * their demise. If we lost the race with another
2859 * thread getting here, it set group_exit_code
2860 * first and our do_group_exit call below will use
2861 * that value and ignore the one we pass it.
2863 do_coredump(&ksig->info);
2867 * PF_IO_WORKER threads will catch and exit on fatal signals
2868 * themselves. They have cleanup that must be performed, so
2869 * we cannot call do_exit() on their behalf.
2871 if (current->flags & PF_IO_WORKER)
2875 * Death signals, no core dump.
2877 do_group_exit(ksig->info.si_signo);
2880 spin_unlock_irq(&sighand->siglock);
2884 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2885 hide_si_addr_tag_bits(ksig);
2887 return ksig->sig > 0;
2891 * signal_delivered -
2892 * @ksig: kernel signal struct
2893 * @stepping: nonzero if debugger single-step or block-step in use
2895 * This function should be called when a signal has successfully been
2896 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2897 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2898 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2900 static void signal_delivered(struct ksignal *ksig, int stepping)
2904 /* A signal was successfully delivered, and the
2905 saved sigmask was stored on the signal frame,
2906 and will be restored by sigreturn. So we can
2907 simply clear the restore sigmask flag. */
2908 clear_restore_sigmask();
2910 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2911 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2912 sigaddset(&blocked, ksig->sig);
2913 set_current_blocked(&blocked);
2914 if (current->sas_ss_flags & SS_AUTODISARM)
2915 sas_ss_reset(current);
2916 tracehook_signal_handler(stepping);
2919 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2922 force_sigsegv(ksig->sig);
2924 signal_delivered(ksig, stepping);
2928 * It could be that complete_signal() picked us to notify about the
2929 * group-wide signal. Other threads should be notified now to take
2930 * the shared signals in @which since we will not.
2932 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2935 struct task_struct *t;
2937 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2938 if (sigisemptyset(&retarget))
2942 while_each_thread(tsk, t) {
2943 if (t->flags & PF_EXITING)
2946 if (!has_pending_signals(&retarget, &t->blocked))
2948 /* Remove the signals this thread can handle. */
2949 sigandsets(&retarget, &retarget, &t->blocked);
2951 if (!task_sigpending(t))
2952 signal_wake_up(t, 0);
2954 if (sigisemptyset(&retarget))
2959 void exit_signals(struct task_struct *tsk)
2965 * @tsk is about to have PF_EXITING set - lock out users which
2966 * expect stable threadgroup.
2968 cgroup_threadgroup_change_begin(tsk);
2970 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2971 tsk->flags |= PF_EXITING;
2972 cgroup_threadgroup_change_end(tsk);
2976 spin_lock_irq(&tsk->sighand->siglock);
2978 * From now this task is not visible for group-wide signals,
2979 * see wants_signal(), do_signal_stop().
2981 tsk->flags |= PF_EXITING;
2983 cgroup_threadgroup_change_end(tsk);
2985 if (!task_sigpending(tsk))
2988 unblocked = tsk->blocked;
2989 signotset(&unblocked);
2990 retarget_shared_pending(tsk, &unblocked);
2992 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2993 task_participate_group_stop(tsk))
2994 group_stop = CLD_STOPPED;
2996 spin_unlock_irq(&tsk->sighand->siglock);
2999 * If group stop has completed, deliver the notification. This
3000 * should always go to the real parent of the group leader.
3002 if (unlikely(group_stop)) {
3003 read_lock(&tasklist_lock);
3004 do_notify_parent_cldstop(tsk, false, group_stop);
3005 read_unlock(&tasklist_lock);
3010 * System call entry points.
3014 * sys_restart_syscall - restart a system call
3016 SYSCALL_DEFINE0(restart_syscall)
3018 struct restart_block *restart = ¤t->restart_block;
3019 return restart->fn(restart);
3022 long do_no_restart_syscall(struct restart_block *param)
3027 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3029 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3030 sigset_t newblocked;
3031 /* A set of now blocked but previously unblocked signals. */
3032 sigandnsets(&newblocked, newset, ¤t->blocked);
3033 retarget_shared_pending(tsk, &newblocked);
3035 tsk->blocked = *newset;
3036 recalc_sigpending();
3040 * set_current_blocked - change current->blocked mask
3043 * It is wrong to change ->blocked directly, this helper should be used
3044 * to ensure the process can't miss a shared signal we are going to block.
3046 void set_current_blocked(sigset_t *newset)
3048 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3049 __set_current_blocked(newset);
3052 void __set_current_blocked(const sigset_t *newset)
3054 struct task_struct *tsk = current;
3057 * In case the signal mask hasn't changed, there is nothing we need
3058 * to do. The current->blocked shouldn't be modified by other task.
3060 if (sigequalsets(&tsk->blocked, newset))
3063 spin_lock_irq(&tsk->sighand->siglock);
3064 __set_task_blocked(tsk, newset);
3065 spin_unlock_irq(&tsk->sighand->siglock);
3069 * This is also useful for kernel threads that want to temporarily
3070 * (or permanently) block certain signals.
3072 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3073 * interface happily blocks "unblockable" signals like SIGKILL
3076 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3078 struct task_struct *tsk = current;
3081 /* Lockless, only current can change ->blocked, never from irq */
3083 *oldset = tsk->blocked;
3087 sigorsets(&newset, &tsk->blocked, set);
3090 sigandnsets(&newset, &tsk->blocked, set);
3099 __set_current_blocked(&newset);
3102 EXPORT_SYMBOL(sigprocmask);
3105 * The api helps set app-provided sigmasks.
3107 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3108 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3110 * Note that it does set_restore_sigmask() in advance, so it must be always
3111 * paired with restore_saved_sigmask_unless() before return from syscall.
3113 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3119 if (sigsetsize != sizeof(sigset_t))
3121 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3124 set_restore_sigmask();
3125 current->saved_sigmask = current->blocked;
3126 set_current_blocked(&kmask);
3131 #ifdef CONFIG_COMPAT
3132 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3139 if (sigsetsize != sizeof(compat_sigset_t))
3141 if (get_compat_sigset(&kmask, umask))
3144 set_restore_sigmask();
3145 current->saved_sigmask = current->blocked;
3146 set_current_blocked(&kmask);
3153 * sys_rt_sigprocmask - change the list of currently blocked signals
3154 * @how: whether to add, remove, or set signals
3155 * @nset: stores pending signals
3156 * @oset: previous value of signal mask if non-null
3157 * @sigsetsize: size of sigset_t type
3159 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3160 sigset_t __user *, oset, size_t, sigsetsize)
3162 sigset_t old_set, new_set;
3165 /* XXX: Don't preclude handling different sized sigset_t's. */
3166 if (sigsetsize != sizeof(sigset_t))
3169 old_set = current->blocked;
3172 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3174 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3176 error = sigprocmask(how, &new_set, NULL);
3182 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3189 #ifdef CONFIG_COMPAT
3190 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3191 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3193 sigset_t old_set = current->blocked;
3195 /* XXX: Don't preclude handling different sized sigset_t's. */
3196 if (sigsetsize != sizeof(sigset_t))
3202 if (get_compat_sigset(&new_set, nset))
3204 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3206 error = sigprocmask(how, &new_set, NULL);
3210 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3214 static void do_sigpending(sigset_t *set)
3216 spin_lock_irq(¤t->sighand->siglock);
3217 sigorsets(set, ¤t->pending.signal,
3218 ¤t->signal->shared_pending.signal);
3219 spin_unlock_irq(¤t->sighand->siglock);
3221 /* Outside the lock because only this thread touches it. */
3222 sigandsets(set, ¤t->blocked, set);
3226 * sys_rt_sigpending - examine a pending signal that has been raised
3228 * @uset: stores pending signals
3229 * @sigsetsize: size of sigset_t type or larger
3231 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3235 if (sigsetsize > sizeof(*uset))
3238 do_sigpending(&set);
3240 if (copy_to_user(uset, &set, sigsetsize))
3246 #ifdef CONFIG_COMPAT
3247 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3248 compat_size_t, sigsetsize)
3252 if (sigsetsize > sizeof(*uset))
3255 do_sigpending(&set);
3257 return put_compat_sigset(uset, &set, sigsetsize);
3261 static const struct {
3262 unsigned char limit, layout;
3264 [SIGILL] = { NSIGILL, SIL_FAULT },
3265 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3266 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3267 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3268 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3270 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3272 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3273 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3274 [SIGSYS] = { NSIGSYS, SIL_SYS },
3277 static bool known_siginfo_layout(unsigned sig, int si_code)
3279 if (si_code == SI_KERNEL)
3281 else if ((si_code > SI_USER)) {
3282 if (sig_specific_sicodes(sig)) {
3283 if (si_code <= sig_sicodes[sig].limit)
3286 else if (si_code <= NSIGPOLL)
3289 else if (si_code >= SI_DETHREAD)
3291 else if (si_code == SI_ASYNCNL)
3296 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3298 enum siginfo_layout layout = SIL_KILL;
3299 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3300 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3301 (si_code <= sig_sicodes[sig].limit)) {
3302 layout = sig_sicodes[sig].layout;
3303 /* Handle the exceptions */
3304 if ((sig == SIGBUS) &&
3305 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3306 layout = SIL_FAULT_MCEERR;
3307 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3308 layout = SIL_FAULT_BNDERR;
3310 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3311 layout = SIL_FAULT_PKUERR;
3313 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3314 layout = SIL_FAULT_PERF_EVENT;
3315 else if (IS_ENABLED(CONFIG_SPARC) &&
3316 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3317 layout = SIL_FAULT_TRAPNO;
3318 else if (IS_ENABLED(CONFIG_ALPHA) &&
3320 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3321 layout = SIL_FAULT_TRAPNO;
3323 else if (si_code <= NSIGPOLL)
3326 if (si_code == SI_TIMER)
3328 else if (si_code == SI_SIGIO)
3330 else if (si_code < 0)
3336 static inline char __user *si_expansion(const siginfo_t __user *info)
3338 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3341 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3343 char __user *expansion = si_expansion(to);
3344 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3346 if (clear_user(expansion, SI_EXPANSION_SIZE))
3351 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3352 const siginfo_t __user *from)
3354 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3355 char __user *expansion = si_expansion(from);
3356 char buf[SI_EXPANSION_SIZE];
3359 * An unknown si_code might need more than
3360 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3361 * extra bytes are 0. This guarantees copy_siginfo_to_user
3362 * will return this data to userspace exactly.
3364 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3366 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3374 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3375 const siginfo_t __user *from)
3377 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3379 to->si_signo = signo;
3380 return post_copy_siginfo_from_user(to, from);
3383 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3385 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3387 return post_copy_siginfo_from_user(to, from);
3390 #ifdef CONFIG_COMPAT
3392 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3393 * @to: compat siginfo destination
3394 * @from: kernel siginfo source
3396 * Note: This function does not work properly for the SIGCHLD on x32, but
3397 * fortunately it doesn't have to. The only valid callers for this function are
3398 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3399 * The latter does not care because SIGCHLD will never cause a coredump.
3401 void copy_siginfo_to_external32(struct compat_siginfo *to,
3402 const struct kernel_siginfo *from)
3404 memset(to, 0, sizeof(*to));
3406 to->si_signo = from->si_signo;
3407 to->si_errno = from->si_errno;
3408 to->si_code = from->si_code;
3409 switch(siginfo_layout(from->si_signo, from->si_code)) {
3411 to->si_pid = from->si_pid;
3412 to->si_uid = from->si_uid;
3415 to->si_tid = from->si_tid;
3416 to->si_overrun = from->si_overrun;
3417 to->si_int = from->si_int;
3420 to->si_band = from->si_band;
3421 to->si_fd = from->si_fd;
3424 to->si_addr = ptr_to_compat(from->si_addr);
3426 case SIL_FAULT_TRAPNO:
3427 to->si_addr = ptr_to_compat(from->si_addr);
3428 to->si_trapno = from->si_trapno;
3430 case SIL_FAULT_MCEERR:
3431 to->si_addr = ptr_to_compat(from->si_addr);
3432 to->si_addr_lsb = from->si_addr_lsb;
3434 case SIL_FAULT_BNDERR:
3435 to->si_addr = ptr_to_compat(from->si_addr);
3436 to->si_lower = ptr_to_compat(from->si_lower);
3437 to->si_upper = ptr_to_compat(from->si_upper);
3439 case SIL_FAULT_PKUERR:
3440 to->si_addr = ptr_to_compat(from->si_addr);
3441 to->si_pkey = from->si_pkey;
3443 case SIL_FAULT_PERF_EVENT:
3444 to->si_addr = ptr_to_compat(from->si_addr);
3445 to->si_perf_data = from->si_perf_data;
3446 to->si_perf_type = from->si_perf_type;
3449 to->si_pid = from->si_pid;
3450 to->si_uid = from->si_uid;
3451 to->si_status = from->si_status;
3452 to->si_utime = from->si_utime;
3453 to->si_stime = from->si_stime;
3456 to->si_pid = from->si_pid;
3457 to->si_uid = from->si_uid;
3458 to->si_int = from->si_int;
3461 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3462 to->si_syscall = from->si_syscall;
3463 to->si_arch = from->si_arch;
3468 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3469 const struct kernel_siginfo *from)
3471 struct compat_siginfo new;
3473 copy_siginfo_to_external32(&new, from);
3474 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3479 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3480 const struct compat_siginfo *from)
3483 to->si_signo = from->si_signo;
3484 to->si_errno = from->si_errno;
3485 to->si_code = from->si_code;
3486 switch(siginfo_layout(from->si_signo, from->si_code)) {
3488 to->si_pid = from->si_pid;
3489 to->si_uid = from->si_uid;
3492 to->si_tid = from->si_tid;
3493 to->si_overrun = from->si_overrun;
3494 to->si_int = from->si_int;
3497 to->si_band = from->si_band;
3498 to->si_fd = from->si_fd;
3501 to->si_addr = compat_ptr(from->si_addr);
3503 case SIL_FAULT_TRAPNO:
3504 to->si_addr = compat_ptr(from->si_addr);
3505 to->si_trapno = from->si_trapno;
3507 case SIL_FAULT_MCEERR:
3508 to->si_addr = compat_ptr(from->si_addr);
3509 to->si_addr_lsb = from->si_addr_lsb;
3511 case SIL_FAULT_BNDERR:
3512 to->si_addr = compat_ptr(from->si_addr);
3513 to->si_lower = compat_ptr(from->si_lower);
3514 to->si_upper = compat_ptr(from->si_upper);
3516 case SIL_FAULT_PKUERR:
3517 to->si_addr = compat_ptr(from->si_addr);
3518 to->si_pkey = from->si_pkey;
3520 case SIL_FAULT_PERF_EVENT:
3521 to->si_addr = compat_ptr(from->si_addr);
3522 to->si_perf_data = from->si_perf_data;
3523 to->si_perf_type = from->si_perf_type;
3526 to->si_pid = from->si_pid;
3527 to->si_uid = from->si_uid;
3528 to->si_status = from->si_status;
3529 #ifdef CONFIG_X86_X32_ABI
3530 if (in_x32_syscall()) {
3531 to->si_utime = from->_sifields._sigchld_x32._utime;
3532 to->si_stime = from->_sifields._sigchld_x32._stime;
3536 to->si_utime = from->si_utime;
3537 to->si_stime = from->si_stime;
3541 to->si_pid = from->si_pid;
3542 to->si_uid = from->si_uid;
3543 to->si_int = from->si_int;
3546 to->si_call_addr = compat_ptr(from->si_call_addr);
3547 to->si_syscall = from->si_syscall;
3548 to->si_arch = from->si_arch;
3554 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3555 const struct compat_siginfo __user *ufrom)
3557 struct compat_siginfo from;
3559 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3562 from.si_signo = signo;
3563 return post_copy_siginfo_from_user32(to, &from);
3566 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3567 const struct compat_siginfo __user *ufrom)
3569 struct compat_siginfo from;
3571 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3574 return post_copy_siginfo_from_user32(to, &from);
3576 #endif /* CONFIG_COMPAT */
3579 * do_sigtimedwait - wait for queued signals specified in @which
3580 * @which: queued signals to wait for
3581 * @info: if non-null, the signal's siginfo is returned here
3582 * @ts: upper bound on process time suspension
3584 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3585 const struct timespec64 *ts)
3587 ktime_t *to = NULL, timeout = KTIME_MAX;
3588 struct task_struct *tsk = current;
3589 sigset_t mask = *which;
3593 if (!timespec64_valid(ts))
3595 timeout = timespec64_to_ktime(*ts);
3600 * Invert the set of allowed signals to get those we want to block.
3602 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3605 spin_lock_irq(&tsk->sighand->siglock);
3606 sig = dequeue_signal(tsk, &mask, info);
3607 if (!sig && timeout) {
3609 * None ready, temporarily unblock those we're interested
3610 * while we are sleeping in so that we'll be awakened when
3611 * they arrive. Unblocking is always fine, we can avoid
3612 * set_current_blocked().
3614 tsk->real_blocked = tsk->blocked;
3615 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3616 recalc_sigpending();
3617 spin_unlock_irq(&tsk->sighand->siglock);
3619 __set_current_state(TASK_INTERRUPTIBLE);
3620 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3622 spin_lock_irq(&tsk->sighand->siglock);
3623 __set_task_blocked(tsk, &tsk->real_blocked);
3624 sigemptyset(&tsk->real_blocked);
3625 sig = dequeue_signal(tsk, &mask, info);
3627 spin_unlock_irq(&tsk->sighand->siglock);
3631 return ret ? -EINTR : -EAGAIN;
3635 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3637 * @uthese: queued signals to wait for
3638 * @uinfo: if non-null, the signal's siginfo is returned here
3639 * @uts: upper bound on process time suspension
3640 * @sigsetsize: size of sigset_t type
3642 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3643 siginfo_t __user *, uinfo,
3644 const struct __kernel_timespec __user *, uts,
3648 struct timespec64 ts;
3649 kernel_siginfo_t info;
3652 /* XXX: Don't preclude handling different sized sigset_t's. */
3653 if (sigsetsize != sizeof(sigset_t))
3656 if (copy_from_user(&these, uthese, sizeof(these)))
3660 if (get_timespec64(&ts, uts))
3664 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3666 if (ret > 0 && uinfo) {
3667 if (copy_siginfo_to_user(uinfo, &info))
3674 #ifdef CONFIG_COMPAT_32BIT_TIME
3675 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3676 siginfo_t __user *, uinfo,
3677 const struct old_timespec32 __user *, uts,
3681 struct timespec64 ts;
3682 kernel_siginfo_t info;
3685 if (sigsetsize != sizeof(sigset_t))
3688 if (copy_from_user(&these, uthese, sizeof(these)))
3692 if (get_old_timespec32(&ts, uts))
3696 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3698 if (ret > 0 && uinfo) {
3699 if (copy_siginfo_to_user(uinfo, &info))
3707 #ifdef CONFIG_COMPAT
3708 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3709 struct compat_siginfo __user *, uinfo,
3710 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3713 struct timespec64 t;
3714 kernel_siginfo_t info;
3717 if (sigsetsize != sizeof(sigset_t))
3720 if (get_compat_sigset(&s, uthese))
3724 if (get_timespec64(&t, uts))
3728 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3730 if (ret > 0 && uinfo) {
3731 if (copy_siginfo_to_user32(uinfo, &info))
3738 #ifdef CONFIG_COMPAT_32BIT_TIME
3739 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3740 struct compat_siginfo __user *, uinfo,
3741 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3744 struct timespec64 t;
3745 kernel_siginfo_t info;
3748 if (sigsetsize != sizeof(sigset_t))
3751 if (get_compat_sigset(&s, uthese))
3755 if (get_old_timespec32(&t, uts))
3759 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3761 if (ret > 0 && uinfo) {
3762 if (copy_siginfo_to_user32(uinfo, &info))
3771 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3773 clear_siginfo(info);
3774 info->si_signo = sig;
3776 info->si_code = SI_USER;
3777 info->si_pid = task_tgid_vnr(current);
3778 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3782 * sys_kill - send a signal to a process
3783 * @pid: the PID of the process
3784 * @sig: signal to be sent
3786 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3788 struct kernel_siginfo info;
3790 prepare_kill_siginfo(sig, &info);
3792 return kill_something_info(sig, &info, pid);
3796 * Verify that the signaler and signalee either are in the same pid namespace
3797 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3800 static bool access_pidfd_pidns(struct pid *pid)
3802 struct pid_namespace *active = task_active_pid_ns(current);
3803 struct pid_namespace *p = ns_of_pid(pid);
3816 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3817 siginfo_t __user *info)
3819 #ifdef CONFIG_COMPAT
3821 * Avoid hooking up compat syscalls and instead handle necessary
3822 * conversions here. Note, this is a stop-gap measure and should not be
3823 * considered a generic solution.
3825 if (in_compat_syscall())
3826 return copy_siginfo_from_user32(
3827 kinfo, (struct compat_siginfo __user *)info);
3829 return copy_siginfo_from_user(kinfo, info);
3832 static struct pid *pidfd_to_pid(const struct file *file)
3836 pid = pidfd_pid(file);
3840 return tgid_pidfd_to_pid(file);
3844 * sys_pidfd_send_signal - Signal a process through a pidfd
3845 * @pidfd: file descriptor of the process
3846 * @sig: signal to send
3847 * @info: signal info
3848 * @flags: future flags
3850 * The syscall currently only signals via PIDTYPE_PID which covers
3851 * kill(<positive-pid>, <signal>. It does not signal threads or process
3853 * In order to extend the syscall to threads and process groups the @flags
3854 * argument should be used. In essence, the @flags argument will determine
3855 * what is signaled and not the file descriptor itself. Put in other words,
3856 * grouping is a property of the flags argument not a property of the file
3859 * Return: 0 on success, negative errno on failure
3861 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3862 siginfo_t __user *, info, unsigned int, flags)
3867 kernel_siginfo_t kinfo;
3869 /* Enforce flags be set to 0 until we add an extension. */
3877 /* Is this a pidfd? */
3878 pid = pidfd_to_pid(f.file);
3885 if (!access_pidfd_pidns(pid))
3889 ret = copy_siginfo_from_user_any(&kinfo, info);
3894 if (unlikely(sig != kinfo.si_signo))
3897 /* Only allow sending arbitrary signals to yourself. */
3899 if ((task_pid(current) != pid) &&
3900 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3903 prepare_kill_siginfo(sig, &kinfo);
3906 ret = kill_pid_info(sig, &kinfo, pid);
3914 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3916 struct task_struct *p;
3920 p = find_task_by_vpid(pid);
3921 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3922 error = check_kill_permission(sig, info, p);
3924 * The null signal is a permissions and process existence
3925 * probe. No signal is actually delivered.
3927 if (!error && sig) {
3928 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3930 * If lock_task_sighand() failed we pretend the task
3931 * dies after receiving the signal. The window is tiny,
3932 * and the signal is private anyway.
3934 if (unlikely(error == -ESRCH))
3943 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3945 struct kernel_siginfo info;
3947 clear_siginfo(&info);
3948 info.si_signo = sig;
3950 info.si_code = SI_TKILL;
3951 info.si_pid = task_tgid_vnr(current);
3952 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3954 return do_send_specific(tgid, pid, sig, &info);
3958 * sys_tgkill - send signal to one specific thread
3959 * @tgid: the thread group ID of the thread
3960 * @pid: the PID of the thread
3961 * @sig: signal to be sent
3963 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3964 * exists but it's not belonging to the target process anymore. This
3965 * method solves the problem of threads exiting and PIDs getting reused.
3967 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3969 /* This is only valid for single tasks */
3970 if (pid <= 0 || tgid <= 0)
3973 return do_tkill(tgid, pid, sig);
3977 * sys_tkill - send signal to one specific task
3978 * @pid: the PID of the task
3979 * @sig: signal to be sent
3981 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3983 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3985 /* This is only valid for single tasks */
3989 return do_tkill(0, pid, sig);
3992 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3994 /* Not even root can pretend to send signals from the kernel.
3995 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3997 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3998 (task_pid_vnr(current) != pid))
4001 /* POSIX.1b doesn't mention process groups. */
4002 return kill_proc_info(sig, info, pid);
4006 * sys_rt_sigqueueinfo - send signal information to a signal
4007 * @pid: the PID of the thread
4008 * @sig: signal to be sent
4009 * @uinfo: signal info to be sent
4011 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4012 siginfo_t __user *, uinfo)
4014 kernel_siginfo_t info;
4015 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4018 return do_rt_sigqueueinfo(pid, sig, &info);
4021 #ifdef CONFIG_COMPAT
4022 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4025 struct compat_siginfo __user *, uinfo)
4027 kernel_siginfo_t info;
4028 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4031 return do_rt_sigqueueinfo(pid, sig, &info);
4035 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4037 /* This is only valid for single tasks */
4038 if (pid <= 0 || tgid <= 0)
4041 /* Not even root can pretend to send signals from the kernel.
4042 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4044 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4045 (task_pid_vnr(current) != pid))
4048 return do_send_specific(tgid, pid, sig, info);
4051 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4052 siginfo_t __user *, uinfo)
4054 kernel_siginfo_t info;
4055 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4058 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4061 #ifdef CONFIG_COMPAT
4062 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4066 struct compat_siginfo __user *, uinfo)
4068 kernel_siginfo_t info;
4069 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4072 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4077 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4079 void kernel_sigaction(int sig, __sighandler_t action)
4081 spin_lock_irq(¤t->sighand->siglock);
4082 current->sighand->action[sig - 1].sa.sa_handler = action;
4083 if (action == SIG_IGN) {
4087 sigaddset(&mask, sig);
4089 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4090 flush_sigqueue_mask(&mask, ¤t->pending);
4091 recalc_sigpending();
4093 spin_unlock_irq(¤t->sighand->siglock);
4095 EXPORT_SYMBOL(kernel_sigaction);
4097 void __weak sigaction_compat_abi(struct k_sigaction *act,
4098 struct k_sigaction *oact)
4102 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4104 struct task_struct *p = current, *t;
4105 struct k_sigaction *k;
4108 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4111 k = &p->sighand->action[sig-1];
4113 spin_lock_irq(&p->sighand->siglock);
4114 if (k->sa.sa_flags & SA_IMMUTABLE) {
4115 spin_unlock_irq(&p->sighand->siglock);
4122 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4123 * e.g. by having an architecture use the bit in their uapi.
4125 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4128 * Clear unknown flag bits in order to allow userspace to detect missing
4129 * support for flag bits and to allow the kernel to use non-uapi bits
4133 act->sa.sa_flags &= UAPI_SA_FLAGS;
4135 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4137 sigaction_compat_abi(act, oact);
4140 sigdelsetmask(&act->sa.sa_mask,
4141 sigmask(SIGKILL) | sigmask(SIGSTOP));
4145 * "Setting a signal action to SIG_IGN for a signal that is
4146 * pending shall cause the pending signal to be discarded,
4147 * whether or not it is blocked."
4149 * "Setting a signal action to SIG_DFL for a signal that is
4150 * pending and whose default action is to ignore the signal
4151 * (for example, SIGCHLD), shall cause the pending signal to
4152 * be discarded, whether or not it is blocked"
4154 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4156 sigaddset(&mask, sig);
4157 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4158 for_each_thread(p, t)
4159 flush_sigqueue_mask(&mask, &t->pending);
4163 spin_unlock_irq(&p->sighand->siglock);
4168 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4171 struct task_struct *t = current;
4174 memset(oss, 0, sizeof(stack_t));
4175 oss->ss_sp = (void __user *) t->sas_ss_sp;
4176 oss->ss_size = t->sas_ss_size;
4177 oss->ss_flags = sas_ss_flags(sp) |
4178 (current->sas_ss_flags & SS_FLAG_BITS);
4182 void __user *ss_sp = ss->ss_sp;
4183 size_t ss_size = ss->ss_size;
4184 unsigned ss_flags = ss->ss_flags;
4187 if (unlikely(on_sig_stack(sp)))
4190 ss_mode = ss_flags & ~SS_FLAG_BITS;
4191 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4195 if (ss_mode == SS_DISABLE) {
4199 if (unlikely(ss_size < min_ss_size))
4203 t->sas_ss_sp = (unsigned long) ss_sp;
4204 t->sas_ss_size = ss_size;
4205 t->sas_ss_flags = ss_flags;
4210 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4214 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4216 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4217 current_user_stack_pointer(),
4219 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4224 int restore_altstack(const stack_t __user *uss)
4227 if (copy_from_user(&new, uss, sizeof(stack_t)))
4229 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4231 /* squash all but EFAULT for now */
4235 int __save_altstack(stack_t __user *uss, unsigned long sp)
4237 struct task_struct *t = current;
4238 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4239 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4240 __put_user(t->sas_ss_size, &uss->ss_size);
4244 #ifdef CONFIG_COMPAT
4245 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4246 compat_stack_t __user *uoss_ptr)
4252 compat_stack_t uss32;
4253 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4255 uss.ss_sp = compat_ptr(uss32.ss_sp);
4256 uss.ss_flags = uss32.ss_flags;
4257 uss.ss_size = uss32.ss_size;
4259 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4260 compat_user_stack_pointer(),
4261 COMPAT_MINSIGSTKSZ);
4262 if (ret >= 0 && uoss_ptr) {
4264 memset(&old, 0, sizeof(old));
4265 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4266 old.ss_flags = uoss.ss_flags;
4267 old.ss_size = uoss.ss_size;
4268 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4274 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4275 const compat_stack_t __user *, uss_ptr,
4276 compat_stack_t __user *, uoss_ptr)
4278 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4281 int compat_restore_altstack(const compat_stack_t __user *uss)
4283 int err = do_compat_sigaltstack(uss, NULL);
4284 /* squash all but -EFAULT for now */
4285 return err == -EFAULT ? err : 0;
4288 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4291 struct task_struct *t = current;
4292 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4294 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4295 __put_user(t->sas_ss_size, &uss->ss_size);
4300 #ifdef __ARCH_WANT_SYS_SIGPENDING
4303 * sys_sigpending - examine pending signals
4304 * @uset: where mask of pending signal is returned
4306 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4310 if (sizeof(old_sigset_t) > sizeof(*uset))
4313 do_sigpending(&set);
4315 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4321 #ifdef CONFIG_COMPAT
4322 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4326 do_sigpending(&set);
4328 return put_user(set.sig[0], set32);
4334 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4336 * sys_sigprocmask - examine and change blocked signals
4337 * @how: whether to add, remove, or set signals
4338 * @nset: signals to add or remove (if non-null)
4339 * @oset: previous value of signal mask if non-null
4341 * Some platforms have their own version with special arguments;
4342 * others support only sys_rt_sigprocmask.
4345 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4346 old_sigset_t __user *, oset)
4348 old_sigset_t old_set, new_set;
4349 sigset_t new_blocked;
4351 old_set = current->blocked.sig[0];
4354 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4357 new_blocked = current->blocked;
4361 sigaddsetmask(&new_blocked, new_set);
4364 sigdelsetmask(&new_blocked, new_set);
4367 new_blocked.sig[0] = new_set;
4373 set_current_blocked(&new_blocked);
4377 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4383 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4385 #ifndef CONFIG_ODD_RT_SIGACTION
4387 * sys_rt_sigaction - alter an action taken by a process
4388 * @sig: signal to be sent
4389 * @act: new sigaction
4390 * @oact: used to save the previous sigaction
4391 * @sigsetsize: size of sigset_t type
4393 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4394 const struct sigaction __user *, act,
4395 struct sigaction __user *, oact,
4398 struct k_sigaction new_sa, old_sa;
4401 /* XXX: Don't preclude handling different sized sigset_t's. */
4402 if (sigsetsize != sizeof(sigset_t))
4405 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4408 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4412 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4417 #ifdef CONFIG_COMPAT
4418 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4419 const struct compat_sigaction __user *, act,
4420 struct compat_sigaction __user *, oact,
4421 compat_size_t, sigsetsize)
4423 struct k_sigaction new_ka, old_ka;
4424 #ifdef __ARCH_HAS_SA_RESTORER
4425 compat_uptr_t restorer;
4429 /* XXX: Don't preclude handling different sized sigset_t's. */
4430 if (sigsetsize != sizeof(compat_sigset_t))
4434 compat_uptr_t handler;
4435 ret = get_user(handler, &act->sa_handler);
4436 new_ka.sa.sa_handler = compat_ptr(handler);
4437 #ifdef __ARCH_HAS_SA_RESTORER
4438 ret |= get_user(restorer, &act->sa_restorer);
4439 new_ka.sa.sa_restorer = compat_ptr(restorer);
4441 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4442 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4447 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4449 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4451 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4452 sizeof(oact->sa_mask));
4453 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4454 #ifdef __ARCH_HAS_SA_RESTORER
4455 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4456 &oact->sa_restorer);
4462 #endif /* !CONFIG_ODD_RT_SIGACTION */
4464 #ifdef CONFIG_OLD_SIGACTION
4465 SYSCALL_DEFINE3(sigaction, int, sig,
4466 const struct old_sigaction __user *, act,
4467 struct old_sigaction __user *, oact)
4469 struct k_sigaction new_ka, old_ka;
4474 if (!access_ok(act, sizeof(*act)) ||
4475 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4476 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4477 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4478 __get_user(mask, &act->sa_mask))
4480 #ifdef __ARCH_HAS_KA_RESTORER
4481 new_ka.ka_restorer = NULL;
4483 siginitset(&new_ka.sa.sa_mask, mask);
4486 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4489 if (!access_ok(oact, sizeof(*oact)) ||
4490 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4491 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4492 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4493 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4500 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4501 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4502 const struct compat_old_sigaction __user *, act,
4503 struct compat_old_sigaction __user *, oact)
4505 struct k_sigaction new_ka, old_ka;
4507 compat_old_sigset_t mask;
4508 compat_uptr_t handler, restorer;
4511 if (!access_ok(act, sizeof(*act)) ||
4512 __get_user(handler, &act->sa_handler) ||
4513 __get_user(restorer, &act->sa_restorer) ||
4514 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4515 __get_user(mask, &act->sa_mask))
4518 #ifdef __ARCH_HAS_KA_RESTORER
4519 new_ka.ka_restorer = NULL;
4521 new_ka.sa.sa_handler = compat_ptr(handler);
4522 new_ka.sa.sa_restorer = compat_ptr(restorer);
4523 siginitset(&new_ka.sa.sa_mask, mask);
4526 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4529 if (!access_ok(oact, sizeof(*oact)) ||
4530 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4531 &oact->sa_handler) ||
4532 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4533 &oact->sa_restorer) ||
4534 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4535 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4542 #ifdef CONFIG_SGETMASK_SYSCALL
4545 * For backwards compatibility. Functionality superseded by sigprocmask.
4547 SYSCALL_DEFINE0(sgetmask)
4550 return current->blocked.sig[0];
4553 SYSCALL_DEFINE1(ssetmask, int, newmask)
4555 int old = current->blocked.sig[0];
4558 siginitset(&newset, newmask);
4559 set_current_blocked(&newset);
4563 #endif /* CONFIG_SGETMASK_SYSCALL */
4565 #ifdef __ARCH_WANT_SYS_SIGNAL
4567 * For backwards compatibility. Functionality superseded by sigaction.
4569 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4571 struct k_sigaction new_sa, old_sa;
4574 new_sa.sa.sa_handler = handler;
4575 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4576 sigemptyset(&new_sa.sa.sa_mask);
4578 ret = do_sigaction(sig, &new_sa, &old_sa);
4580 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4582 #endif /* __ARCH_WANT_SYS_SIGNAL */
4584 #ifdef __ARCH_WANT_SYS_PAUSE
4586 SYSCALL_DEFINE0(pause)
4588 while (!signal_pending(current)) {
4589 __set_current_state(TASK_INTERRUPTIBLE);
4592 return -ERESTARTNOHAND;
4597 static int sigsuspend(sigset_t *set)
4599 current->saved_sigmask = current->blocked;
4600 set_current_blocked(set);
4602 while (!signal_pending(current)) {
4603 __set_current_state(TASK_INTERRUPTIBLE);
4606 set_restore_sigmask();
4607 return -ERESTARTNOHAND;
4611 * sys_rt_sigsuspend - replace the signal mask for a value with the
4612 * @unewset value until a signal is received
4613 * @unewset: new signal mask value
4614 * @sigsetsize: size of sigset_t type
4616 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4620 /* XXX: Don't preclude handling different sized sigset_t's. */
4621 if (sigsetsize != sizeof(sigset_t))
4624 if (copy_from_user(&newset, unewset, sizeof(newset)))
4626 return sigsuspend(&newset);
4629 #ifdef CONFIG_COMPAT
4630 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4634 /* XXX: Don't preclude handling different sized sigset_t's. */
4635 if (sigsetsize != sizeof(sigset_t))
4638 if (get_compat_sigset(&newset, unewset))
4640 return sigsuspend(&newset);
4644 #ifdef CONFIG_OLD_SIGSUSPEND
4645 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4648 siginitset(&blocked, mask);
4649 return sigsuspend(&blocked);
4652 #ifdef CONFIG_OLD_SIGSUSPEND3
4653 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4656 siginitset(&blocked, mask);
4657 return sigsuspend(&blocked);
4661 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4666 static inline void siginfo_buildtime_checks(void)
4668 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4670 /* Verify the offsets in the two siginfos match */
4671 #define CHECK_OFFSET(field) \
4672 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4675 CHECK_OFFSET(si_pid);
4676 CHECK_OFFSET(si_uid);
4679 CHECK_OFFSET(si_tid);
4680 CHECK_OFFSET(si_overrun);
4681 CHECK_OFFSET(si_value);
4684 CHECK_OFFSET(si_pid);
4685 CHECK_OFFSET(si_uid);
4686 CHECK_OFFSET(si_value);
4689 CHECK_OFFSET(si_pid);
4690 CHECK_OFFSET(si_uid);
4691 CHECK_OFFSET(si_status);
4692 CHECK_OFFSET(si_utime);
4693 CHECK_OFFSET(si_stime);
4696 CHECK_OFFSET(si_addr);
4697 CHECK_OFFSET(si_trapno);
4698 CHECK_OFFSET(si_addr_lsb);
4699 CHECK_OFFSET(si_lower);
4700 CHECK_OFFSET(si_upper);
4701 CHECK_OFFSET(si_pkey);
4702 CHECK_OFFSET(si_perf_data);
4703 CHECK_OFFSET(si_perf_type);
4706 CHECK_OFFSET(si_band);
4707 CHECK_OFFSET(si_fd);
4710 CHECK_OFFSET(si_call_addr);
4711 CHECK_OFFSET(si_syscall);
4712 CHECK_OFFSET(si_arch);
4716 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4717 offsetof(struct siginfo, si_addr));
4718 if (sizeof(int) == sizeof(void __user *)) {
4719 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4720 sizeof(void __user *));
4722 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4723 sizeof_field(struct siginfo, si_uid)) !=
4724 sizeof(void __user *));
4725 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4726 offsetof(struct siginfo, si_uid));
4728 #ifdef CONFIG_COMPAT
4729 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4730 offsetof(struct compat_siginfo, si_addr));
4731 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4732 sizeof(compat_uptr_t));
4733 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4734 sizeof_field(struct siginfo, si_pid));
4738 void __init signals_init(void)
4740 siginfo_buildtime_checks();
4742 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4745 #ifdef CONFIG_KGDB_KDB
4746 #include <linux/kdb.h>
4748 * kdb_send_sig - Allows kdb to send signals without exposing
4749 * signal internals. This function checks if the required locks are
4750 * available before calling the main signal code, to avoid kdb
4753 void kdb_send_sig(struct task_struct *t, int sig)
4755 static struct task_struct *kdb_prev_t;
4757 if (!spin_trylock(&t->sighand->siglock)) {
4758 kdb_printf("Can't do kill command now.\n"
4759 "The sigmask lock is held somewhere else in "
4760 "kernel, try again later\n");
4763 new_t = kdb_prev_t != t;
4765 if (!task_is_running(t) && new_t) {
4766 spin_unlock(&t->sighand->siglock);
4767 kdb_printf("Process is not RUNNING, sending a signal from "
4768 "kdb risks deadlock\n"
4769 "on the run queue locks. "
4770 "The signal has _not_ been sent.\n"
4771 "Reissue the kill command if you want to risk "
4775 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4776 spin_unlock(&t->sighand->siglock);
4778 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4781 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4783 #endif /* CONFIG_KGDB_KDB */