1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/task_work.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/cgroup.h>
47 #include <linux/audit.h>
48 #include <linux/sysctl.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
58 #include <asm/syscall.h> /* for syscall_get_* */
61 * SLAB caches for signal bits.
64 static struct kmem_cache *sigqueue_cachep;
66 int print_fatal_signals __read_mostly;
68 static void __user *sig_handler(struct task_struct *t, int sig)
70 return t->sighand->action[sig - 1].sa.sa_handler;
73 static inline bool sig_handler_ignored(void __user *handler, int sig)
75 /* Is it explicitly or implicitly ignored? */
76 return handler == SIG_IGN ||
77 (handler == SIG_DFL && sig_kernel_ignore(sig));
80 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
84 handler = sig_handler(t, sig);
86 /* SIGKILL and SIGSTOP may not be sent to the global init */
87 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
90 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
91 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
94 /* Only allow kernel generated signals to this kthread */
95 if (unlikely((t->flags & PF_KTHREAD) &&
96 (handler == SIG_KTHREAD_KERNEL) && !force))
99 return sig_handler_ignored(handler, sig);
102 static bool sig_ignored(struct task_struct *t, int sig, bool force)
105 * Blocked signals are never ignored, since the
106 * signal handler may change by the time it is
109 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
113 * Tracers may want to know about even ignored signal unless it
114 * is SIGKILL which can't be reported anyway but can be ignored
115 * by SIGNAL_UNKILLABLE task.
117 if (t->ptrace && sig != SIGKILL)
120 return sig_task_ignored(t, sig, force);
124 * Re-calculate pending state from the set of locally pending
125 * signals, globally pending signals, and blocked signals.
127 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 switch (_NSIG_WORDS) {
134 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
135 ready |= signal->sig[i] &~ blocked->sig[i];
138 case 4: ready = signal->sig[3] &~ blocked->sig[3];
139 ready |= signal->sig[2] &~ blocked->sig[2];
140 ready |= signal->sig[1] &~ blocked->sig[1];
141 ready |= signal->sig[0] &~ blocked->sig[0];
144 case 2: ready = signal->sig[1] &~ blocked->sig[1];
145 ready |= signal->sig[0] &~ blocked->sig[0];
148 case 1: ready = signal->sig[0] &~ blocked->sig[0];
153 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
155 static bool recalc_sigpending_tsk(struct task_struct *t)
157 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
158 PENDING(&t->pending, &t->blocked) ||
159 PENDING(&t->signal->shared_pending, &t->blocked) ||
160 cgroup_task_frozen(t)) {
161 set_tsk_thread_flag(t, TIF_SIGPENDING);
166 * We must never clear the flag in another thread, or in current
167 * when it's possible the current syscall is returning -ERESTART*.
168 * So we don't clear it here, and only callers who know they should do.
174 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
175 * This is superfluous when called on current, the wakeup is a harmless no-op.
177 void recalc_sigpending_and_wake(struct task_struct *t)
179 if (recalc_sigpending_tsk(t))
180 signal_wake_up(t, 0);
183 void recalc_sigpending(void)
185 if (!recalc_sigpending_tsk(current) && !freezing(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 task->jobctl |= mask;
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
311 * Must be called with @task->sighand->siglock held.
313 void task_clear_jobctl_trapping(struct task_struct *task)
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
323 * task_clear_jobctl_pending - clear jobctl pending bits
325 * @mask: pending bits to clear
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
335 * Must be called with @task->sighand->siglock held.
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 task->jobctl &= ~mask;
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
360 * Must be called with @task->sighand->siglock held.
363 * %true if group stop completion should be notified to the parent, %false
366 static bool task_participate_group_stop(struct task_struct *task)
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
392 void task_join_group_stop(struct task_struct *task)
394 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
395 struct signal_struct *sig = current->signal;
397 if (sig->group_stop_count) {
398 sig->group_stop_count++;
399 mask |= JOBCTL_STOP_CONSUME;
400 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
403 /* Have the new thread join an on-going signal group stop */
404 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
408 * allocate a new signal queue record
409 * - this may be called without locks if and only if t == current, otherwise an
410 * appropriate lock must be held to stop the target task from exiting
412 static struct sigqueue *
413 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
414 int override_rlimit, const unsigned int sigqueue_flags)
416 struct sigqueue *q = NULL;
417 struct ucounts *ucounts = NULL;
421 * Protect access to @t credentials. This can go away when all
422 * callers hold rcu read lock.
424 * NOTE! A pending signal will hold on to the user refcount,
425 * and we get/put the refcount only when the sigpending count
426 * changes from/to zero.
429 ucounts = task_ucounts(t);
430 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
435 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
436 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
438 print_dropped_signal(sig);
441 if (unlikely(q == NULL)) {
442 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
444 INIT_LIST_HEAD(&q->list);
445 q->flags = sigqueue_flags;
446 q->ucounts = ucounts;
451 static void __sigqueue_free(struct sigqueue *q)
453 if (q->flags & SIGQUEUE_PREALLOC)
456 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
459 kmem_cache_free(sigqueue_cachep, q);
462 void flush_sigqueue(struct sigpending *queue)
466 sigemptyset(&queue->signal);
467 while (!list_empty(&queue->list)) {
468 q = list_entry(queue->list.next, struct sigqueue , list);
469 list_del_init(&q->list);
475 * Flush all pending signals for this kthread.
477 void flush_signals(struct task_struct *t)
481 spin_lock_irqsave(&t->sighand->siglock, flags);
482 clear_tsk_thread_flag(t, TIF_SIGPENDING);
483 flush_sigqueue(&t->pending);
484 flush_sigqueue(&t->signal->shared_pending);
485 spin_unlock_irqrestore(&t->sighand->siglock, flags);
487 EXPORT_SYMBOL(flush_signals);
489 #ifdef CONFIG_POSIX_TIMERS
490 static void __flush_itimer_signals(struct sigpending *pending)
492 sigset_t signal, retain;
493 struct sigqueue *q, *n;
495 signal = pending->signal;
496 sigemptyset(&retain);
498 list_for_each_entry_safe(q, n, &pending->list, list) {
499 int sig = q->info.si_signo;
501 if (likely(q->info.si_code != SI_TIMER)) {
502 sigaddset(&retain, sig);
504 sigdelset(&signal, sig);
505 list_del_init(&q->list);
510 sigorsets(&pending->signal, &signal, &retain);
513 void flush_itimer_signals(void)
515 struct task_struct *tsk = current;
518 spin_lock_irqsave(&tsk->sighand->siglock, flags);
519 __flush_itimer_signals(&tsk->pending);
520 __flush_itimer_signals(&tsk->signal->shared_pending);
521 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
525 void ignore_signals(struct task_struct *t)
529 for (i = 0; i < _NSIG; ++i)
530 t->sighand->action[i].sa.sa_handler = SIG_IGN;
536 * Flush all handlers for a task.
540 flush_signal_handlers(struct task_struct *t, int force_default)
543 struct k_sigaction *ka = &t->sighand->action[0];
544 for (i = _NSIG ; i != 0 ; i--) {
545 if (force_default || ka->sa.sa_handler != SIG_IGN)
546 ka->sa.sa_handler = SIG_DFL;
548 #ifdef __ARCH_HAS_SA_RESTORER
549 ka->sa.sa_restorer = NULL;
551 sigemptyset(&ka->sa.sa_mask);
556 bool unhandled_signal(struct task_struct *tsk, int sig)
558 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
559 if (is_global_init(tsk))
562 if (handler != SIG_IGN && handler != SIG_DFL)
565 /* if ptraced, let the tracer determine */
569 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
572 struct sigqueue *q, *first = NULL;
575 * Collect the siginfo appropriate to this signal. Check if
576 * there is another siginfo for the same signal.
578 list_for_each_entry(q, &list->list, list) {
579 if (q->info.si_signo == sig) {
586 sigdelset(&list->signal, sig);
590 list_del_init(&first->list);
591 copy_siginfo(info, &first->info);
594 (first->flags & SIGQUEUE_PREALLOC) &&
595 (info->si_code == SI_TIMER) &&
596 (info->si_sys_private);
598 __sigqueue_free(first);
601 * Ok, it wasn't in the queue. This must be
602 * a fast-pathed signal or we must have been
603 * out of queue space. So zero out the info.
606 info->si_signo = sig;
608 info->si_code = SI_USER;
614 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
615 kernel_siginfo_t *info, bool *resched_timer)
617 int sig = next_signal(pending, mask);
620 collect_signal(sig, pending, info, resched_timer);
625 * Dequeue a signal and return the element to the caller, which is
626 * expected to free it.
628 * All callers have to hold the siglock.
630 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
631 kernel_siginfo_t *info, enum pid_type *type)
633 bool resched_timer = false;
636 /* We only dequeue private signals from ourselves, we don't let
637 * signalfd steal them
640 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
642 *type = PIDTYPE_TGID;
643 signr = __dequeue_signal(&tsk->signal->shared_pending,
644 mask, info, &resched_timer);
645 #ifdef CONFIG_POSIX_TIMERS
649 * itimers are process shared and we restart periodic
650 * itimers in the signal delivery path to prevent DoS
651 * attacks in the high resolution timer case. This is
652 * compliant with the old way of self-restarting
653 * itimers, as the SIGALRM is a legacy signal and only
654 * queued once. Changing the restart behaviour to
655 * restart the timer in the signal dequeue path is
656 * reducing the timer noise on heavy loaded !highres
659 if (unlikely(signr == SIGALRM)) {
660 struct hrtimer *tmr = &tsk->signal->real_timer;
662 if (!hrtimer_is_queued(tmr) &&
663 tsk->signal->it_real_incr != 0) {
664 hrtimer_forward(tmr, tmr->base->get_time(),
665 tsk->signal->it_real_incr);
666 hrtimer_restart(tmr);
676 if (unlikely(sig_kernel_stop(signr))) {
678 * Set a marker that we have dequeued a stop signal. Our
679 * caller might release the siglock and then the pending
680 * stop signal it is about to process is no longer in the
681 * pending bitmasks, but must still be cleared by a SIGCONT
682 * (and overruled by a SIGKILL). So those cases clear this
683 * shared flag after we've set it. Note that this flag may
684 * remain set after the signal we return is ignored or
685 * handled. That doesn't matter because its only purpose
686 * is to alert stop-signal processing code when another
687 * processor has come along and cleared the flag.
689 current->jobctl |= JOBCTL_STOP_DEQUEUED;
691 #ifdef CONFIG_POSIX_TIMERS
694 * Release the siglock to ensure proper locking order
695 * of timer locks outside of siglocks. Note, we leave
696 * irqs disabled here, since the posix-timers code is
697 * about to disable them again anyway.
699 spin_unlock(&tsk->sighand->siglock);
700 posixtimer_rearm(info);
701 spin_lock(&tsk->sighand->siglock);
703 /* Don't expose the si_sys_private value to userspace */
704 info->si_sys_private = 0;
709 EXPORT_SYMBOL_GPL(dequeue_signal);
711 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
713 struct task_struct *tsk = current;
714 struct sigpending *pending = &tsk->pending;
715 struct sigqueue *q, *sync = NULL;
718 * Might a synchronous signal be in the queue?
720 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
724 * Return the first synchronous signal in the queue.
726 list_for_each_entry(q, &pending->list, list) {
727 /* Synchronous signals have a positive si_code */
728 if ((q->info.si_code > SI_USER) &&
729 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
737 * Check if there is another siginfo for the same signal.
739 list_for_each_entry_continue(q, &pending->list, list) {
740 if (q->info.si_signo == sync->info.si_signo)
744 sigdelset(&pending->signal, sync->info.si_signo);
747 list_del_init(&sync->list);
748 copy_siginfo(info, &sync->info);
749 __sigqueue_free(sync);
750 return info->si_signo;
754 * Tell a process that it has a new active signal..
756 * NOTE! we rely on the previous spin_lock to
757 * lock interrupts for us! We can only be called with
758 * "siglock" held, and the local interrupt must
759 * have been disabled when that got acquired!
761 * No need to set need_resched since signal event passing
762 * goes through ->blocked
764 void signal_wake_up_state(struct task_struct *t, unsigned int state)
766 lockdep_assert_held(&t->sighand->siglock);
768 set_tsk_thread_flag(t, TIF_SIGPENDING);
771 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
772 * case. We don't check t->state here because there is a race with it
773 * executing another processor and just now entering stopped state.
774 * By using wake_up_state, we ensure the process will wake up and
775 * handle its death signal.
777 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
782 * Remove signals in mask from the pending set and queue.
783 * Returns 1 if any signals were found.
785 * All callers must be holding the siglock.
787 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
789 struct sigqueue *q, *n;
792 sigandsets(&m, mask, &s->signal);
793 if (sigisemptyset(&m))
796 sigandnsets(&s->signal, &s->signal, mask);
797 list_for_each_entry_safe(q, n, &s->list, list) {
798 if (sigismember(mask, q->info.si_signo)) {
799 list_del_init(&q->list);
805 static inline int is_si_special(const struct kernel_siginfo *info)
807 return info <= SEND_SIG_PRIV;
810 static inline bool si_fromuser(const struct kernel_siginfo *info)
812 return info == SEND_SIG_NOINFO ||
813 (!is_si_special(info) && SI_FROMUSER(info));
817 * called with RCU read lock from check_kill_permission()
819 static bool kill_ok_by_cred(struct task_struct *t)
821 const struct cred *cred = current_cred();
822 const struct cred *tcred = __task_cred(t);
824 return uid_eq(cred->euid, tcred->suid) ||
825 uid_eq(cred->euid, tcred->uid) ||
826 uid_eq(cred->uid, tcred->suid) ||
827 uid_eq(cred->uid, tcred->uid) ||
828 ns_capable(tcred->user_ns, CAP_KILL);
832 * Bad permissions for sending the signal
833 * - the caller must hold the RCU read lock
835 static int check_kill_permission(int sig, struct kernel_siginfo *info,
836 struct task_struct *t)
841 if (!valid_signal(sig))
844 if (!si_fromuser(info))
847 error = audit_signal_info(sig, t); /* Let audit system see the signal */
851 if (!same_thread_group(current, t) &&
852 !kill_ok_by_cred(t)) {
855 sid = task_session(t);
857 * We don't return the error if sid == NULL. The
858 * task was unhashed, the caller must notice this.
860 if (!sid || sid == task_session(current))
868 return security_task_kill(t, info, sig, NULL);
872 * ptrace_trap_notify - schedule trap to notify ptracer
873 * @t: tracee wanting to notify tracer
875 * This function schedules sticky ptrace trap which is cleared on the next
876 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
879 * If @t is running, STOP trap will be taken. If trapped for STOP and
880 * ptracer is listening for events, tracee is woken up so that it can
881 * re-trap for the new event. If trapped otherwise, STOP trap will be
882 * eventually taken without returning to userland after the existing traps
883 * are finished by PTRACE_CONT.
886 * Must be called with @task->sighand->siglock held.
888 static void ptrace_trap_notify(struct task_struct *t)
890 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
891 lockdep_assert_held(&t->sighand->siglock);
893 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
894 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
898 * Handle magic process-wide effects of stop/continue signals. Unlike
899 * the signal actions, these happen immediately at signal-generation
900 * time regardless of blocking, ignoring, or handling. This does the
901 * actual continuing for SIGCONT, but not the actual stopping for stop
902 * signals. The process stop is done as a signal action for SIG_DFL.
904 * Returns true if the signal should be actually delivered, otherwise
905 * it should be dropped.
907 static bool prepare_signal(int sig, struct task_struct *p, bool force)
909 struct signal_struct *signal = p->signal;
910 struct task_struct *t;
913 if (signal->flags & SIGNAL_GROUP_EXIT) {
914 if (signal->core_state)
915 return sig == SIGKILL;
917 * The process is in the middle of dying, drop the signal.
920 } else if (sig_kernel_stop(sig)) {
922 * This is a stop signal. Remove SIGCONT from all queues.
924 siginitset(&flush, sigmask(SIGCONT));
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t)
927 flush_sigqueue_mask(&flush, &t->pending);
928 } else if (sig == SIGCONT) {
931 * Remove all stop signals from all queues, wake all threads.
933 siginitset(&flush, SIG_KERNEL_STOP_MASK);
934 flush_sigqueue_mask(&flush, &signal->shared_pending);
935 for_each_thread(p, t) {
936 flush_sigqueue_mask(&flush, &t->pending);
937 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
938 if (likely(!(t->ptrace & PT_SEIZED))) {
939 t->jobctl &= ~JOBCTL_STOPPED;
940 wake_up_state(t, __TASK_STOPPED);
942 ptrace_trap_notify(t);
946 * Notify the parent with CLD_CONTINUED if we were stopped.
948 * If we were in the middle of a group stop, we pretend it
949 * was already finished, and then continued. Since SIGCHLD
950 * doesn't queue we report only CLD_STOPPED, as if the next
951 * CLD_CONTINUED was dropped.
954 if (signal->flags & SIGNAL_STOP_STOPPED)
955 why |= SIGNAL_CLD_CONTINUED;
956 else if (signal->group_stop_count)
957 why |= SIGNAL_CLD_STOPPED;
961 * The first thread which returns from do_signal_stop()
962 * will take ->siglock, notice SIGNAL_CLD_MASK, and
963 * notify its parent. See get_signal().
965 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
966 signal->group_stop_count = 0;
967 signal->group_exit_code = 0;
971 return !sig_ignored(p, sig, force);
975 * Test if P wants to take SIG. After we've checked all threads with this,
976 * it's equivalent to finding no threads not blocking SIG. Any threads not
977 * blocking SIG were ruled out because they are not running and already
978 * have pending signals. Such threads will dequeue from the shared queue
979 * as soon as they're available, so putting the signal on the shared queue
980 * will be equivalent to sending it to one such thread.
982 static inline bool wants_signal(int sig, struct task_struct *p)
984 if (sigismember(&p->blocked, sig))
987 if (p->flags & PF_EXITING)
993 if (task_is_stopped_or_traced(p))
996 return task_curr(p) || !task_sigpending(p);
999 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1001 struct signal_struct *signal = p->signal;
1002 struct task_struct *t;
1005 * Now find a thread we can wake up to take the signal off the queue.
1007 * Try the suggested task first (may or may not be the main thread).
1009 if (wants_signal(sig, p))
1011 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1013 * There is just one thread and it does not need to be woken.
1014 * It will dequeue unblocked signals before it runs again.
1019 * Otherwise try to find a suitable thread.
1021 t = signal->curr_target;
1022 while (!wants_signal(sig, t)) {
1024 if (t == signal->curr_target)
1026 * No thread needs to be woken.
1027 * Any eligible threads will see
1028 * the signal in the queue soon.
1032 signal->curr_target = t;
1036 * Found a killable thread. If the signal will be fatal,
1037 * then start taking the whole group down immediately.
1039 if (sig_fatal(p, sig) &&
1040 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1041 !sigismember(&t->real_blocked, sig) &&
1042 (sig == SIGKILL || !p->ptrace)) {
1044 * This signal will be fatal to the whole group.
1046 if (!sig_kernel_coredump(sig)) {
1048 * Start a group exit and wake everybody up.
1049 * This way we don't have other threads
1050 * running and doing things after a slower
1051 * thread has the fatal signal pending.
1053 signal->flags = SIGNAL_GROUP_EXIT;
1054 signal->group_exit_code = sig;
1055 signal->group_stop_count = 0;
1058 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1059 sigaddset(&t->pending.signal, SIGKILL);
1060 signal_wake_up(t, 1);
1061 } while_each_thread(p, t);
1067 * The signal is already in the shared-pending queue.
1068 * Tell the chosen thread to wake up and dequeue it.
1070 signal_wake_up(t, sig == SIGKILL);
1074 static inline bool legacy_queue(struct sigpending *signals, int sig)
1076 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1079 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1080 struct task_struct *t, enum pid_type type, bool force)
1082 struct sigpending *pending;
1084 int override_rlimit;
1085 int ret = 0, result;
1087 lockdep_assert_held(&t->sighand->siglock);
1089 result = TRACE_SIGNAL_IGNORED;
1090 if (!prepare_signal(sig, t, force))
1093 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1095 * Short-circuit ignored signals and support queuing
1096 * exactly one non-rt signal, so that we can get more
1097 * detailed information about the cause of the signal.
1099 result = TRACE_SIGNAL_ALREADY_PENDING;
1100 if (legacy_queue(pending, sig))
1103 result = TRACE_SIGNAL_DELIVERED;
1105 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1107 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1111 * Real-time signals must be queued if sent by sigqueue, or
1112 * some other real-time mechanism. It is implementation
1113 * defined whether kill() does so. We attempt to do so, on
1114 * the principle of least surprise, but since kill is not
1115 * allowed to fail with EAGAIN when low on memory we just
1116 * make sure at least one signal gets delivered and don't
1117 * pass on the info struct.
1120 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1122 override_rlimit = 0;
1124 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1127 list_add_tail(&q->list, &pending->list);
1128 switch ((unsigned long) info) {
1129 case (unsigned long) SEND_SIG_NOINFO:
1130 clear_siginfo(&q->info);
1131 q->info.si_signo = sig;
1132 q->info.si_errno = 0;
1133 q->info.si_code = SI_USER;
1134 q->info.si_pid = task_tgid_nr_ns(current,
1135 task_active_pid_ns(t));
1138 from_kuid_munged(task_cred_xxx(t, user_ns),
1142 case (unsigned long) SEND_SIG_PRIV:
1143 clear_siginfo(&q->info);
1144 q->info.si_signo = sig;
1145 q->info.si_errno = 0;
1146 q->info.si_code = SI_KERNEL;
1151 copy_siginfo(&q->info, info);
1154 } else if (!is_si_special(info) &&
1155 sig >= SIGRTMIN && info->si_code != SI_USER) {
1157 * Queue overflow, abort. We may abort if the
1158 * signal was rt and sent by user using something
1159 * other than kill().
1161 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1166 * This is a silent loss of information. We still
1167 * send the signal, but the *info bits are lost.
1169 result = TRACE_SIGNAL_LOSE_INFO;
1173 signalfd_notify(t, sig);
1174 sigaddset(&pending->signal, sig);
1176 /* Let multiprocess signals appear after on-going forks */
1177 if (type > PIDTYPE_TGID) {
1178 struct multiprocess_signals *delayed;
1179 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1180 sigset_t *signal = &delayed->signal;
1181 /* Can't queue both a stop and a continue signal */
1183 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1184 else if (sig_kernel_stop(sig))
1185 sigdelset(signal, SIGCONT);
1186 sigaddset(signal, sig);
1190 complete_signal(sig, t, type);
1192 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1196 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1199 switch (siginfo_layout(info->si_signo, info->si_code)) {
1208 case SIL_FAULT_TRAPNO:
1209 case SIL_FAULT_MCEERR:
1210 case SIL_FAULT_BNDERR:
1211 case SIL_FAULT_PKUERR:
1212 case SIL_FAULT_PERF_EVENT:
1220 int send_signal_locked(int sig, struct kernel_siginfo *info,
1221 struct task_struct *t, enum pid_type type)
1223 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1226 if (info == SEND_SIG_NOINFO) {
1227 /* Force if sent from an ancestor pid namespace */
1228 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1229 } else if (info == SEND_SIG_PRIV) {
1230 /* Don't ignore kernel generated signals */
1232 } else if (has_si_pid_and_uid(info)) {
1233 /* SIGKILL and SIGSTOP is special or has ids */
1234 struct user_namespace *t_user_ns;
1237 t_user_ns = task_cred_xxx(t, user_ns);
1238 if (current_user_ns() != t_user_ns) {
1239 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1240 info->si_uid = from_kuid_munged(t_user_ns, uid);
1244 /* A kernel generated signal? */
1245 force = (info->si_code == SI_KERNEL);
1247 /* From an ancestor pid namespace? */
1248 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1253 return __send_signal_locked(sig, info, t, type, force);
1256 static void print_fatal_signal(int signr)
1258 struct pt_regs *regs = task_pt_regs(current);
1259 pr_info("potentially unexpected fatal signal %d.\n", signr);
1261 #if defined(__i386__) && !defined(__arch_um__)
1262 pr_info("code at %08lx: ", regs->ip);
1265 for (i = 0; i < 16; i++) {
1268 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1270 pr_cont("%02x ", insn);
1280 static int __init setup_print_fatal_signals(char *str)
1282 get_option (&str, &print_fatal_signals);
1287 __setup("print-fatal-signals=", setup_print_fatal_signals);
1289 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1292 unsigned long flags;
1295 if (lock_task_sighand(p, &flags)) {
1296 ret = send_signal_locked(sig, info, p, type);
1297 unlock_task_sighand(p, &flags);
1304 HANDLER_CURRENT, /* If reachable use the current handler */
1305 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1306 HANDLER_EXIT, /* Only visible as the process exit code */
1310 * Force a signal that the process can't ignore: if necessary
1311 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1313 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1314 * since we do not want to have a signal handler that was blocked
1315 * be invoked when user space had explicitly blocked it.
1317 * We don't want to have recursive SIGSEGV's etc, for example,
1318 * that is why we also clear SIGNAL_UNKILLABLE.
1321 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1322 enum sig_handler handler)
1324 unsigned long int flags;
1325 int ret, blocked, ignored;
1326 struct k_sigaction *action;
1327 int sig = info->si_signo;
1329 spin_lock_irqsave(&t->sighand->siglock, flags);
1330 action = &t->sighand->action[sig-1];
1331 ignored = action->sa.sa_handler == SIG_IGN;
1332 blocked = sigismember(&t->blocked, sig);
1333 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1334 action->sa.sa_handler = SIG_DFL;
1335 if (handler == HANDLER_EXIT)
1336 action->sa.sa_flags |= SA_IMMUTABLE;
1338 sigdelset(&t->blocked, sig);
1339 recalc_sigpending_and_wake(t);
1343 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1344 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1346 if (action->sa.sa_handler == SIG_DFL &&
1347 (!t->ptrace || (handler == HANDLER_EXIT)))
1348 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1349 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1350 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1355 int force_sig_info(struct kernel_siginfo *info)
1357 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1361 * Nuke all other threads in the group.
1363 int zap_other_threads(struct task_struct *p)
1365 struct task_struct *t = p;
1368 p->signal->group_stop_count = 0;
1370 while_each_thread(p, t) {
1371 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1372 /* Don't require de_thread to wait for the vhost_worker */
1373 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1376 /* Don't bother with already dead threads */
1379 sigaddset(&t->pending.signal, SIGKILL);
1380 signal_wake_up(t, 1);
1386 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1387 unsigned long *flags)
1389 struct sighand_struct *sighand;
1393 sighand = rcu_dereference(tsk->sighand);
1394 if (unlikely(sighand == NULL))
1398 * This sighand can be already freed and even reused, but
1399 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1400 * initializes ->siglock: this slab can't go away, it has
1401 * the same object type, ->siglock can't be reinitialized.
1403 * We need to ensure that tsk->sighand is still the same
1404 * after we take the lock, we can race with de_thread() or
1405 * __exit_signal(). In the latter case the next iteration
1406 * must see ->sighand == NULL.
1408 spin_lock_irqsave(&sighand->siglock, *flags);
1409 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1411 spin_unlock_irqrestore(&sighand->siglock, *flags);
1418 #ifdef CONFIG_LOCKDEP
1419 void lockdep_assert_task_sighand_held(struct task_struct *task)
1421 struct sighand_struct *sighand;
1424 sighand = rcu_dereference(task->sighand);
1426 lockdep_assert_held(&sighand->siglock);
1434 * send signal info to all the members of a group
1436 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1437 struct task_struct *p, enum pid_type type)
1442 ret = check_kill_permission(sig, info, p);
1446 ret = do_send_sig_info(sig, info, p, type);
1452 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1453 * control characters do (^C, ^Z etc)
1454 * - the caller must hold at least a readlock on tasklist_lock
1456 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1458 struct task_struct *p = NULL;
1459 int retval, success;
1463 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1464 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1467 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1468 return success ? 0 : retval;
1471 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1474 struct task_struct *p;
1478 p = pid_task(pid, PIDTYPE_PID);
1480 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1482 if (likely(!p || error != -ESRCH))
1486 * The task was unhashed in between, try again. If it
1487 * is dead, pid_task() will return NULL, if we race with
1488 * de_thread() it will find the new leader.
1493 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1497 error = kill_pid_info(sig, info, find_vpid(pid));
1502 static inline bool kill_as_cred_perm(const struct cred *cred,
1503 struct task_struct *target)
1505 const struct cred *pcred = __task_cred(target);
1507 return uid_eq(cred->euid, pcred->suid) ||
1508 uid_eq(cred->euid, pcred->uid) ||
1509 uid_eq(cred->uid, pcred->suid) ||
1510 uid_eq(cred->uid, pcred->uid);
1514 * The usb asyncio usage of siginfo is wrong. The glibc support
1515 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1516 * AKA after the generic fields:
1517 * kernel_pid_t si_pid;
1518 * kernel_uid32_t si_uid;
1519 * sigval_t si_value;
1521 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1522 * after the generic fields is:
1523 * void __user *si_addr;
1525 * This is a practical problem when there is a 64bit big endian kernel
1526 * and a 32bit userspace. As the 32bit address will encoded in the low
1527 * 32bits of the pointer. Those low 32bits will be stored at higher
1528 * address than appear in a 32 bit pointer. So userspace will not
1529 * see the address it was expecting for it's completions.
1531 * There is nothing in the encoding that can allow
1532 * copy_siginfo_to_user32 to detect this confusion of formats, so
1533 * handle this by requiring the caller of kill_pid_usb_asyncio to
1534 * notice when this situration takes place and to store the 32bit
1535 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1538 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1539 struct pid *pid, const struct cred *cred)
1541 struct kernel_siginfo info;
1542 struct task_struct *p;
1543 unsigned long flags;
1546 if (!valid_signal(sig))
1549 clear_siginfo(&info);
1550 info.si_signo = sig;
1551 info.si_errno = errno;
1552 info.si_code = SI_ASYNCIO;
1553 *((sigval_t *)&info.si_pid) = addr;
1556 p = pid_task(pid, PIDTYPE_PID);
1561 if (!kill_as_cred_perm(cred, p)) {
1565 ret = security_task_kill(p, &info, sig, cred);
1570 if (lock_task_sighand(p, &flags)) {
1571 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1572 unlock_task_sighand(p, &flags);
1580 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1583 * kill_something_info() interprets pid in interesting ways just like kill(2).
1585 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1586 * is probably wrong. Should make it like BSD or SYSV.
1589 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1594 return kill_proc_info(sig, info, pid);
1596 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1600 read_lock(&tasklist_lock);
1602 ret = __kill_pgrp_info(sig, info,
1603 pid ? find_vpid(-pid) : task_pgrp(current));
1605 int retval = 0, count = 0;
1606 struct task_struct * p;
1608 for_each_process(p) {
1609 if (task_pid_vnr(p) > 1 &&
1610 !same_thread_group(p, current)) {
1611 int err = group_send_sig_info(sig, info, p,
1618 ret = count ? retval : -ESRCH;
1620 read_unlock(&tasklist_lock);
1626 * These are for backward compatibility with the rest of the kernel source.
1629 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1632 * Make sure legacy kernel users don't send in bad values
1633 * (normal paths check this in check_kill_permission).
1635 if (!valid_signal(sig))
1638 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1640 EXPORT_SYMBOL(send_sig_info);
1642 #define __si_special(priv) \
1643 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1646 send_sig(int sig, struct task_struct *p, int priv)
1648 return send_sig_info(sig, __si_special(priv), p);
1650 EXPORT_SYMBOL(send_sig);
1652 void force_sig(int sig)
1654 struct kernel_siginfo info;
1656 clear_siginfo(&info);
1657 info.si_signo = sig;
1659 info.si_code = SI_KERNEL;
1662 force_sig_info(&info);
1664 EXPORT_SYMBOL(force_sig);
1666 void force_fatal_sig(int sig)
1668 struct kernel_siginfo info;
1670 clear_siginfo(&info);
1671 info.si_signo = sig;
1673 info.si_code = SI_KERNEL;
1676 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1679 void force_exit_sig(int sig)
1681 struct kernel_siginfo info;
1683 clear_siginfo(&info);
1684 info.si_signo = sig;
1686 info.si_code = SI_KERNEL;
1689 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1693 * When things go south during signal handling, we
1694 * will force a SIGSEGV. And if the signal that caused
1695 * the problem was already a SIGSEGV, we'll want to
1696 * make sure we don't even try to deliver the signal..
1698 void force_sigsegv(int sig)
1701 force_fatal_sig(SIGSEGV);
1706 int force_sig_fault_to_task(int sig, int code, void __user *addr
1707 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1708 , struct task_struct *t)
1710 struct kernel_siginfo info;
1712 clear_siginfo(&info);
1713 info.si_signo = sig;
1715 info.si_code = code;
1716 info.si_addr = addr;
1719 info.si_flags = flags;
1722 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1725 int force_sig_fault(int sig, int code, void __user *addr
1726 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1728 return force_sig_fault_to_task(sig, code, addr
1729 ___ARCH_SI_IA64(imm, flags, isr), current);
1732 int send_sig_fault(int sig, int code, void __user *addr
1733 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1734 , struct task_struct *t)
1736 struct kernel_siginfo info;
1738 clear_siginfo(&info);
1739 info.si_signo = sig;
1741 info.si_code = code;
1742 info.si_addr = addr;
1745 info.si_flags = flags;
1748 return send_sig_info(info.si_signo, &info, t);
1751 int force_sig_mceerr(int code, void __user *addr, short lsb)
1753 struct kernel_siginfo info;
1755 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1756 clear_siginfo(&info);
1757 info.si_signo = SIGBUS;
1759 info.si_code = code;
1760 info.si_addr = addr;
1761 info.si_addr_lsb = lsb;
1762 return force_sig_info(&info);
1765 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1767 struct kernel_siginfo info;
1769 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1770 clear_siginfo(&info);
1771 info.si_signo = SIGBUS;
1773 info.si_code = code;
1774 info.si_addr = addr;
1775 info.si_addr_lsb = lsb;
1776 return send_sig_info(info.si_signo, &info, t);
1778 EXPORT_SYMBOL(send_sig_mceerr);
1780 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1782 struct kernel_siginfo info;
1784 clear_siginfo(&info);
1785 info.si_signo = SIGSEGV;
1787 info.si_code = SEGV_BNDERR;
1788 info.si_addr = addr;
1789 info.si_lower = lower;
1790 info.si_upper = upper;
1791 return force_sig_info(&info);
1795 int force_sig_pkuerr(void __user *addr, u32 pkey)
1797 struct kernel_siginfo info;
1799 clear_siginfo(&info);
1800 info.si_signo = SIGSEGV;
1802 info.si_code = SEGV_PKUERR;
1803 info.si_addr = addr;
1804 info.si_pkey = pkey;
1805 return force_sig_info(&info);
1809 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1811 struct kernel_siginfo info;
1813 clear_siginfo(&info);
1814 info.si_signo = SIGTRAP;
1816 info.si_code = TRAP_PERF;
1817 info.si_addr = addr;
1818 info.si_perf_data = sig_data;
1819 info.si_perf_type = type;
1822 * Signals generated by perf events should not terminate the whole
1823 * process if SIGTRAP is blocked, however, delivering the signal
1824 * asynchronously is better than not delivering at all. But tell user
1825 * space if the signal was asynchronous, so it can clearly be
1826 * distinguished from normal synchronous ones.
1828 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1829 TRAP_PERF_FLAG_ASYNC :
1832 return send_sig_info(info.si_signo, &info, current);
1836 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1837 * @syscall: syscall number to send to userland
1838 * @reason: filter-supplied reason code to send to userland (via si_errno)
1839 * @force_coredump: true to trigger a coredump
1841 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1843 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1845 struct kernel_siginfo info;
1847 clear_siginfo(&info);
1848 info.si_signo = SIGSYS;
1849 info.si_code = SYS_SECCOMP;
1850 info.si_call_addr = (void __user *)KSTK_EIP(current);
1851 info.si_errno = reason;
1852 info.si_arch = syscall_get_arch(current);
1853 info.si_syscall = syscall;
1854 return force_sig_info_to_task(&info, current,
1855 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1858 /* For the crazy architectures that include trap information in
1859 * the errno field, instead of an actual errno value.
1861 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1863 struct kernel_siginfo info;
1865 clear_siginfo(&info);
1866 info.si_signo = SIGTRAP;
1867 info.si_errno = errno;
1868 info.si_code = TRAP_HWBKPT;
1869 info.si_addr = addr;
1870 return force_sig_info(&info);
1873 /* For the rare architectures that include trap information using
1876 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1878 struct kernel_siginfo info;
1880 clear_siginfo(&info);
1881 info.si_signo = sig;
1883 info.si_code = code;
1884 info.si_addr = addr;
1885 info.si_trapno = trapno;
1886 return force_sig_info(&info);
1889 /* For the rare architectures that include trap information using
1892 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1893 struct task_struct *t)
1895 struct kernel_siginfo info;
1897 clear_siginfo(&info);
1898 info.si_signo = sig;
1900 info.si_code = code;
1901 info.si_addr = addr;
1902 info.si_trapno = trapno;
1903 return send_sig_info(info.si_signo, &info, t);
1906 int kill_pgrp(struct pid *pid, int sig, int priv)
1910 read_lock(&tasklist_lock);
1911 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1912 read_unlock(&tasklist_lock);
1916 EXPORT_SYMBOL(kill_pgrp);
1918 int kill_pid(struct pid *pid, int sig, int priv)
1920 return kill_pid_info(sig, __si_special(priv), pid);
1922 EXPORT_SYMBOL(kill_pid);
1925 * These functions support sending signals using preallocated sigqueue
1926 * structures. This is needed "because realtime applications cannot
1927 * afford to lose notifications of asynchronous events, like timer
1928 * expirations or I/O completions". In the case of POSIX Timers
1929 * we allocate the sigqueue structure from the timer_create. If this
1930 * allocation fails we are able to report the failure to the application
1931 * with an EAGAIN error.
1933 struct sigqueue *sigqueue_alloc(void)
1935 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1938 void sigqueue_free(struct sigqueue *q)
1940 unsigned long flags;
1941 spinlock_t *lock = ¤t->sighand->siglock;
1943 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1945 * We must hold ->siglock while testing q->list
1946 * to serialize with collect_signal() or with
1947 * __exit_signal()->flush_sigqueue().
1949 spin_lock_irqsave(lock, flags);
1950 q->flags &= ~SIGQUEUE_PREALLOC;
1952 * If it is queued it will be freed when dequeued,
1953 * like the "regular" sigqueue.
1955 if (!list_empty(&q->list))
1957 spin_unlock_irqrestore(lock, flags);
1963 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1965 int sig = q->info.si_signo;
1966 struct sigpending *pending;
1967 struct task_struct *t;
1968 unsigned long flags;
1971 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1977 * This function is used by POSIX timers to deliver a timer signal.
1978 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1979 * set), the signal must be delivered to the specific thread (queues
1982 * Where type is not PIDTYPE_PID, signals must be delivered to the
1983 * process. In this case, prefer to deliver to current if it is in
1984 * the same thread group as the target process, which avoids
1985 * unnecessarily waking up a potentially idle task.
1987 t = pid_task(pid, type);
1990 if (type != PIDTYPE_PID && same_thread_group(t, current))
1992 if (!likely(lock_task_sighand(t, &flags)))
1995 ret = 1; /* the signal is ignored */
1996 result = TRACE_SIGNAL_IGNORED;
1997 if (!prepare_signal(sig, t, false))
2001 if (unlikely(!list_empty(&q->list))) {
2003 * If an SI_TIMER entry is already queue just increment
2004 * the overrun count.
2006 BUG_ON(q->info.si_code != SI_TIMER);
2007 q->info.si_overrun++;
2008 result = TRACE_SIGNAL_ALREADY_PENDING;
2011 q->info.si_overrun = 0;
2013 signalfd_notify(t, sig);
2014 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2015 list_add_tail(&q->list, &pending->list);
2016 sigaddset(&pending->signal, sig);
2017 complete_signal(sig, t, type);
2018 result = TRACE_SIGNAL_DELIVERED;
2020 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2021 unlock_task_sighand(t, &flags);
2027 static void do_notify_pidfd(struct task_struct *task)
2031 WARN_ON(task->exit_state == 0);
2032 pid = task_pid(task);
2033 wake_up_all(&pid->wait_pidfd);
2037 * Let a parent know about the death of a child.
2038 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2040 * Returns true if our parent ignored us and so we've switched to
2043 bool do_notify_parent(struct task_struct *tsk, int sig)
2045 struct kernel_siginfo info;
2046 unsigned long flags;
2047 struct sighand_struct *psig;
2048 bool autoreap = false;
2051 WARN_ON_ONCE(sig == -1);
2053 /* do_notify_parent_cldstop should have been called instead. */
2054 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2056 WARN_ON_ONCE(!tsk->ptrace &&
2057 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2059 /* Wake up all pidfd waiters */
2060 do_notify_pidfd(tsk);
2062 if (sig != SIGCHLD) {
2064 * This is only possible if parent == real_parent.
2065 * Check if it has changed security domain.
2067 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2071 clear_siginfo(&info);
2072 info.si_signo = sig;
2075 * We are under tasklist_lock here so our parent is tied to
2076 * us and cannot change.
2078 * task_active_pid_ns will always return the same pid namespace
2079 * until a task passes through release_task.
2081 * write_lock() currently calls preempt_disable() which is the
2082 * same as rcu_read_lock(), but according to Oleg, this is not
2083 * correct to rely on this
2086 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2087 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2091 task_cputime(tsk, &utime, &stime);
2092 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2093 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2095 info.si_status = tsk->exit_code & 0x7f;
2096 if (tsk->exit_code & 0x80)
2097 info.si_code = CLD_DUMPED;
2098 else if (tsk->exit_code & 0x7f)
2099 info.si_code = CLD_KILLED;
2101 info.si_code = CLD_EXITED;
2102 info.si_status = tsk->exit_code >> 8;
2105 psig = tsk->parent->sighand;
2106 spin_lock_irqsave(&psig->siglock, flags);
2107 if (!tsk->ptrace && sig == SIGCHLD &&
2108 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2109 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2111 * We are exiting and our parent doesn't care. POSIX.1
2112 * defines special semantics for setting SIGCHLD to SIG_IGN
2113 * or setting the SA_NOCLDWAIT flag: we should be reaped
2114 * automatically and not left for our parent's wait4 call.
2115 * Rather than having the parent do it as a magic kind of
2116 * signal handler, we just set this to tell do_exit that we
2117 * can be cleaned up without becoming a zombie. Note that
2118 * we still call __wake_up_parent in this case, because a
2119 * blocked sys_wait4 might now return -ECHILD.
2121 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2122 * is implementation-defined: we do (if you don't want
2123 * it, just use SIG_IGN instead).
2126 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2130 * Send with __send_signal as si_pid and si_uid are in the
2131 * parent's namespaces.
2133 if (valid_signal(sig) && sig)
2134 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2135 __wake_up_parent(tsk, tsk->parent);
2136 spin_unlock_irqrestore(&psig->siglock, flags);
2142 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2143 * @tsk: task reporting the state change
2144 * @for_ptracer: the notification is for ptracer
2145 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2147 * Notify @tsk's parent that the stopped/continued state has changed. If
2148 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2149 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2152 * Must be called with tasklist_lock at least read locked.
2154 static void do_notify_parent_cldstop(struct task_struct *tsk,
2155 bool for_ptracer, int why)
2157 struct kernel_siginfo info;
2158 unsigned long flags;
2159 struct task_struct *parent;
2160 struct sighand_struct *sighand;
2164 parent = tsk->parent;
2166 tsk = tsk->group_leader;
2167 parent = tsk->real_parent;
2170 clear_siginfo(&info);
2171 info.si_signo = SIGCHLD;
2174 * see comment in do_notify_parent() about the following 4 lines
2177 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2178 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2181 task_cputime(tsk, &utime, &stime);
2182 info.si_utime = nsec_to_clock_t(utime);
2183 info.si_stime = nsec_to_clock_t(stime);
2188 info.si_status = SIGCONT;
2191 info.si_status = tsk->signal->group_exit_code & 0x7f;
2194 info.si_status = tsk->exit_code & 0x7f;
2200 sighand = parent->sighand;
2201 spin_lock_irqsave(&sighand->siglock, flags);
2202 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2203 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2204 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2206 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2208 __wake_up_parent(tsk, parent);
2209 spin_unlock_irqrestore(&sighand->siglock, flags);
2213 * This must be called with current->sighand->siglock held.
2215 * This should be the path for all ptrace stops.
2216 * We always set current->last_siginfo while stopped here.
2217 * That makes it a way to test a stopped process for
2218 * being ptrace-stopped vs being job-control-stopped.
2220 * Returns the signal the ptracer requested the code resume
2221 * with. If the code did not stop because the tracer is gone,
2222 * the stop signal remains unchanged unless clear_code.
2224 static int ptrace_stop(int exit_code, int why, unsigned long message,
2225 kernel_siginfo_t *info)
2226 __releases(¤t->sighand->siglock)
2227 __acquires(¤t->sighand->siglock)
2229 bool gstop_done = false;
2231 if (arch_ptrace_stop_needed()) {
2233 * The arch code has something special to do before a
2234 * ptrace stop. This is allowed to block, e.g. for faults
2235 * on user stack pages. We can't keep the siglock while
2236 * calling arch_ptrace_stop, so we must release it now.
2237 * To preserve proper semantics, we must do this before
2238 * any signal bookkeeping like checking group_stop_count.
2240 spin_unlock_irq(¤t->sighand->siglock);
2242 spin_lock_irq(¤t->sighand->siglock);
2246 * After this point ptrace_signal_wake_up or signal_wake_up
2247 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2248 * signal comes in. Handle previous ptrace_unlinks and fatal
2249 * signals here to prevent ptrace_stop sleeping in schedule.
2251 if (!current->ptrace || __fatal_signal_pending(current))
2254 set_special_state(TASK_TRACED);
2255 current->jobctl |= JOBCTL_TRACED;
2258 * We're committing to trapping. TRACED should be visible before
2259 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2260 * Also, transition to TRACED and updates to ->jobctl should be
2261 * atomic with respect to siglock and should be done after the arch
2262 * hook as siglock is released and regrabbed across it.
2267 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2269 * set_current_state() smp_wmb();
2271 * wait_task_stopped()
2272 * task_stopped_code()
2273 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2277 current->ptrace_message = message;
2278 current->last_siginfo = info;
2279 current->exit_code = exit_code;
2282 * If @why is CLD_STOPPED, we're trapping to participate in a group
2283 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2284 * across siglock relocks since INTERRUPT was scheduled, PENDING
2285 * could be clear now. We act as if SIGCONT is received after
2286 * TASK_TRACED is entered - ignore it.
2288 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2289 gstop_done = task_participate_group_stop(current);
2291 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2292 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2293 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2294 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2296 /* entering a trap, clear TRAPPING */
2297 task_clear_jobctl_trapping(current);
2299 spin_unlock_irq(¤t->sighand->siglock);
2300 read_lock(&tasklist_lock);
2302 * Notify parents of the stop.
2304 * While ptraced, there are two parents - the ptracer and
2305 * the real_parent of the group_leader. The ptracer should
2306 * know about every stop while the real parent is only
2307 * interested in the completion of group stop. The states
2308 * for the two don't interact with each other. Notify
2309 * separately unless they're gonna be duplicates.
2311 if (current->ptrace)
2312 do_notify_parent_cldstop(current, true, why);
2313 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2314 do_notify_parent_cldstop(current, false, why);
2317 * Don't want to allow preemption here, because
2318 * sys_ptrace() needs this task to be inactive.
2320 * XXX: implement read_unlock_no_resched().
2323 read_unlock(&tasklist_lock);
2324 cgroup_enter_frozen();
2325 preempt_enable_no_resched();
2327 cgroup_leave_frozen(true);
2330 * We are back. Now reacquire the siglock before touching
2331 * last_siginfo, so that we are sure to have synchronized with
2332 * any signal-sending on another CPU that wants to examine it.
2334 spin_lock_irq(¤t->sighand->siglock);
2335 exit_code = current->exit_code;
2336 current->last_siginfo = NULL;
2337 current->ptrace_message = 0;
2338 current->exit_code = 0;
2340 /* LISTENING can be set only during STOP traps, clear it */
2341 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2344 * Queued signals ignored us while we were stopped for tracing.
2345 * So check for any that we should take before resuming user mode.
2346 * This sets TIF_SIGPENDING, but never clears it.
2348 recalc_sigpending_tsk(current);
2352 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2354 kernel_siginfo_t info;
2356 clear_siginfo(&info);
2357 info.si_signo = signr;
2358 info.si_code = exit_code;
2359 info.si_pid = task_pid_vnr(current);
2360 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2362 /* Let the debugger run. */
2363 return ptrace_stop(exit_code, why, message, &info);
2366 int ptrace_notify(int exit_code, unsigned long message)
2370 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2371 if (unlikely(task_work_pending(current)))
2374 spin_lock_irq(¤t->sighand->siglock);
2375 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2376 spin_unlock_irq(¤t->sighand->siglock);
2381 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2382 * @signr: signr causing group stop if initiating
2384 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2385 * and participate in it. If already set, participate in the existing
2386 * group stop. If participated in a group stop (and thus slept), %true is
2387 * returned with siglock released.
2389 * If ptraced, this function doesn't handle stop itself. Instead,
2390 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2391 * untouched. The caller must ensure that INTERRUPT trap handling takes
2392 * places afterwards.
2395 * Must be called with @current->sighand->siglock held, which is released
2399 * %false if group stop is already cancelled or ptrace trap is scheduled.
2400 * %true if participated in group stop.
2402 static bool do_signal_stop(int signr)
2403 __releases(¤t->sighand->siglock)
2405 struct signal_struct *sig = current->signal;
2407 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2408 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2409 struct task_struct *t;
2411 /* signr will be recorded in task->jobctl for retries */
2412 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2414 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2415 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2416 unlikely(sig->group_exec_task))
2419 * There is no group stop already in progress. We must
2422 * While ptraced, a task may be resumed while group stop is
2423 * still in effect and then receive a stop signal and
2424 * initiate another group stop. This deviates from the
2425 * usual behavior as two consecutive stop signals can't
2426 * cause two group stops when !ptraced. That is why we
2427 * also check !task_is_stopped(t) below.
2429 * The condition can be distinguished by testing whether
2430 * SIGNAL_STOP_STOPPED is already set. Don't generate
2431 * group_exit_code in such case.
2433 * This is not necessary for SIGNAL_STOP_CONTINUED because
2434 * an intervening stop signal is required to cause two
2435 * continued events regardless of ptrace.
2437 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2438 sig->group_exit_code = signr;
2440 sig->group_stop_count = 0;
2442 if (task_set_jobctl_pending(current, signr | gstop))
2443 sig->group_stop_count++;
2446 while_each_thread(current, t) {
2448 * Setting state to TASK_STOPPED for a group
2449 * stop is always done with the siglock held,
2450 * so this check has no races.
2452 if (!task_is_stopped(t) &&
2453 task_set_jobctl_pending(t, signr | gstop)) {
2454 sig->group_stop_count++;
2455 if (likely(!(t->ptrace & PT_SEIZED)))
2456 signal_wake_up(t, 0);
2458 ptrace_trap_notify(t);
2463 if (likely(!current->ptrace)) {
2467 * If there are no other threads in the group, or if there
2468 * is a group stop in progress and we are the last to stop,
2469 * report to the parent.
2471 if (task_participate_group_stop(current))
2472 notify = CLD_STOPPED;
2474 current->jobctl |= JOBCTL_STOPPED;
2475 set_special_state(TASK_STOPPED);
2476 spin_unlock_irq(¤t->sighand->siglock);
2479 * Notify the parent of the group stop completion. Because
2480 * we're not holding either the siglock or tasklist_lock
2481 * here, ptracer may attach inbetween; however, this is for
2482 * group stop and should always be delivered to the real
2483 * parent of the group leader. The new ptracer will get
2484 * its notification when this task transitions into
2488 read_lock(&tasklist_lock);
2489 do_notify_parent_cldstop(current, false, notify);
2490 read_unlock(&tasklist_lock);
2493 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2494 cgroup_enter_frozen();
2499 * While ptraced, group stop is handled by STOP trap.
2500 * Schedule it and let the caller deal with it.
2502 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2508 * do_jobctl_trap - take care of ptrace jobctl traps
2510 * When PT_SEIZED, it's used for both group stop and explicit
2511 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2512 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2513 * the stop signal; otherwise, %SIGTRAP.
2515 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2516 * number as exit_code and no siginfo.
2519 * Must be called with @current->sighand->siglock held, which may be
2520 * released and re-acquired before returning with intervening sleep.
2522 static void do_jobctl_trap(void)
2524 struct signal_struct *signal = current->signal;
2525 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2527 if (current->ptrace & PT_SEIZED) {
2528 if (!signal->group_stop_count &&
2529 !(signal->flags & SIGNAL_STOP_STOPPED))
2531 WARN_ON_ONCE(!signr);
2532 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2535 WARN_ON_ONCE(!signr);
2536 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2541 * do_freezer_trap - handle the freezer jobctl trap
2543 * Puts the task into frozen state, if only the task is not about to quit.
2544 * In this case it drops JOBCTL_TRAP_FREEZE.
2547 * Must be called with @current->sighand->siglock held,
2548 * which is always released before returning.
2550 static void do_freezer_trap(void)
2551 __releases(¤t->sighand->siglock)
2554 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2555 * let's make another loop to give it a chance to be handled.
2556 * In any case, we'll return back.
2558 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2559 JOBCTL_TRAP_FREEZE) {
2560 spin_unlock_irq(¤t->sighand->siglock);
2565 * Now we're sure that there is no pending fatal signal and no
2566 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2567 * immediately (if there is a non-fatal signal pending), and
2568 * put the task into sleep.
2570 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2571 clear_thread_flag(TIF_SIGPENDING);
2572 spin_unlock_irq(¤t->sighand->siglock);
2573 cgroup_enter_frozen();
2577 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2580 * We do not check sig_kernel_stop(signr) but set this marker
2581 * unconditionally because we do not know whether debugger will
2582 * change signr. This flag has no meaning unless we are going
2583 * to stop after return from ptrace_stop(). In this case it will
2584 * be checked in do_signal_stop(), we should only stop if it was
2585 * not cleared by SIGCONT while we were sleeping. See also the
2586 * comment in dequeue_signal().
2588 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2589 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2591 /* We're back. Did the debugger cancel the sig? */
2596 * Update the siginfo structure if the signal has
2597 * changed. If the debugger wanted something
2598 * specific in the siginfo structure then it should
2599 * have updated *info via PTRACE_SETSIGINFO.
2601 if (signr != info->si_signo) {
2602 clear_siginfo(info);
2603 info->si_signo = signr;
2605 info->si_code = SI_USER;
2607 info->si_pid = task_pid_vnr(current->parent);
2608 info->si_uid = from_kuid_munged(current_user_ns(),
2609 task_uid(current->parent));
2613 /* If the (new) signal is now blocked, requeue it. */
2614 if (sigismember(¤t->blocked, signr) ||
2615 fatal_signal_pending(current)) {
2616 send_signal_locked(signr, info, current, type);
2623 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2625 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2627 case SIL_FAULT_TRAPNO:
2628 case SIL_FAULT_MCEERR:
2629 case SIL_FAULT_BNDERR:
2630 case SIL_FAULT_PKUERR:
2631 case SIL_FAULT_PERF_EVENT:
2632 ksig->info.si_addr = arch_untagged_si_addr(
2633 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2645 bool get_signal(struct ksignal *ksig)
2647 struct sighand_struct *sighand = current->sighand;
2648 struct signal_struct *signal = current->signal;
2651 clear_notify_signal();
2652 if (unlikely(task_work_pending(current)))
2655 if (!task_sigpending(current))
2658 if (unlikely(uprobe_deny_signal()))
2662 * Do this once, we can't return to user-mode if freezing() == T.
2663 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2664 * thus do not need another check after return.
2669 spin_lock_irq(&sighand->siglock);
2672 * Every stopped thread goes here after wakeup. Check to see if
2673 * we should notify the parent, prepare_signal(SIGCONT) encodes
2674 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2676 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2679 if (signal->flags & SIGNAL_CLD_CONTINUED)
2680 why = CLD_CONTINUED;
2684 signal->flags &= ~SIGNAL_CLD_MASK;
2686 spin_unlock_irq(&sighand->siglock);
2689 * Notify the parent that we're continuing. This event is
2690 * always per-process and doesn't make whole lot of sense
2691 * for ptracers, who shouldn't consume the state via
2692 * wait(2) either, but, for backward compatibility, notify
2693 * the ptracer of the group leader too unless it's gonna be
2696 read_lock(&tasklist_lock);
2697 do_notify_parent_cldstop(current, false, why);
2699 if (ptrace_reparented(current->group_leader))
2700 do_notify_parent_cldstop(current->group_leader,
2702 read_unlock(&tasklist_lock);
2708 struct k_sigaction *ka;
2711 /* Has this task already been marked for death? */
2712 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2713 signal->group_exec_task) {
2714 clear_siginfo(&ksig->info);
2715 ksig->info.si_signo = signr = SIGKILL;
2716 sigdelset(¤t->pending.signal, SIGKILL);
2717 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2718 &sighand->action[SIGKILL - 1]);
2719 recalc_sigpending();
2723 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2727 if (unlikely(current->jobctl &
2728 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2729 if (current->jobctl & JOBCTL_TRAP_MASK) {
2731 spin_unlock_irq(&sighand->siglock);
2732 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2739 * If the task is leaving the frozen state, let's update
2740 * cgroup counters and reset the frozen bit.
2742 if (unlikely(cgroup_task_frozen(current))) {
2743 spin_unlock_irq(&sighand->siglock);
2744 cgroup_leave_frozen(false);
2749 * Signals generated by the execution of an instruction
2750 * need to be delivered before any other pending signals
2751 * so that the instruction pointer in the signal stack
2752 * frame points to the faulting instruction.
2755 signr = dequeue_synchronous_signal(&ksig->info);
2757 signr = dequeue_signal(current, ¤t->blocked,
2758 &ksig->info, &type);
2761 break; /* will return 0 */
2763 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2764 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2765 signr = ptrace_signal(signr, &ksig->info, type);
2770 ka = &sighand->action[signr-1];
2772 /* Trace actually delivered signals. */
2773 trace_signal_deliver(signr, &ksig->info, ka);
2775 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2777 if (ka->sa.sa_handler != SIG_DFL) {
2778 /* Run the handler. */
2781 if (ka->sa.sa_flags & SA_ONESHOT)
2782 ka->sa.sa_handler = SIG_DFL;
2784 break; /* will return non-zero "signr" value */
2788 * Now we are doing the default action for this signal.
2790 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2794 * Global init gets no signals it doesn't want.
2795 * Container-init gets no signals it doesn't want from same
2798 * Note that if global/container-init sees a sig_kernel_only()
2799 * signal here, the signal must have been generated internally
2800 * or must have come from an ancestor namespace. In either
2801 * case, the signal cannot be dropped.
2803 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2804 !sig_kernel_only(signr))
2807 if (sig_kernel_stop(signr)) {
2809 * The default action is to stop all threads in
2810 * the thread group. The job control signals
2811 * do nothing in an orphaned pgrp, but SIGSTOP
2812 * always works. Note that siglock needs to be
2813 * dropped during the call to is_orphaned_pgrp()
2814 * because of lock ordering with tasklist_lock.
2815 * This allows an intervening SIGCONT to be posted.
2816 * We need to check for that and bail out if necessary.
2818 if (signr != SIGSTOP) {
2819 spin_unlock_irq(&sighand->siglock);
2821 /* signals can be posted during this window */
2823 if (is_current_pgrp_orphaned())
2826 spin_lock_irq(&sighand->siglock);
2829 if (likely(do_signal_stop(ksig->info.si_signo))) {
2830 /* It released the siglock. */
2835 * We didn't actually stop, due to a race
2836 * with SIGCONT or something like that.
2842 spin_unlock_irq(&sighand->siglock);
2843 if (unlikely(cgroup_task_frozen(current)))
2844 cgroup_leave_frozen(true);
2847 * Anything else is fatal, maybe with a core dump.
2849 current->flags |= PF_SIGNALED;
2851 if (sig_kernel_coredump(signr)) {
2852 if (print_fatal_signals)
2853 print_fatal_signal(ksig->info.si_signo);
2854 proc_coredump_connector(current);
2856 * If it was able to dump core, this kills all
2857 * other threads in the group and synchronizes with
2858 * their demise. If we lost the race with another
2859 * thread getting here, it set group_exit_code
2860 * first and our do_group_exit call below will use
2861 * that value and ignore the one we pass it.
2863 do_coredump(&ksig->info);
2867 * PF_USER_WORKER threads will catch and exit on fatal signals
2868 * themselves. They have cleanup that must be performed, so
2869 * we cannot call do_exit() on their behalf.
2871 if (current->flags & PF_USER_WORKER)
2875 * Death signals, no core dump.
2877 do_group_exit(ksig->info.si_signo);
2880 spin_unlock_irq(&sighand->siglock);
2884 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2885 hide_si_addr_tag_bits(ksig);
2887 return ksig->sig > 0;
2891 * signal_delivered - called after signal delivery to update blocked signals
2892 * @ksig: kernel signal struct
2893 * @stepping: nonzero if debugger single-step or block-step in use
2895 * This function should be called when a signal has successfully been
2896 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2897 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2898 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2900 static void signal_delivered(struct ksignal *ksig, int stepping)
2904 /* A signal was successfully delivered, and the
2905 saved sigmask was stored on the signal frame,
2906 and will be restored by sigreturn. So we can
2907 simply clear the restore sigmask flag. */
2908 clear_restore_sigmask();
2910 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2911 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2912 sigaddset(&blocked, ksig->sig);
2913 set_current_blocked(&blocked);
2914 if (current->sas_ss_flags & SS_AUTODISARM)
2915 sas_ss_reset(current);
2917 ptrace_notify(SIGTRAP, 0);
2920 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2923 force_sigsegv(ksig->sig);
2925 signal_delivered(ksig, stepping);
2929 * It could be that complete_signal() picked us to notify about the
2930 * group-wide signal. Other threads should be notified now to take
2931 * the shared signals in @which since we will not.
2933 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2936 struct task_struct *t;
2938 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2939 if (sigisemptyset(&retarget))
2943 while_each_thread(tsk, t) {
2944 if (t->flags & PF_EXITING)
2947 if (!has_pending_signals(&retarget, &t->blocked))
2949 /* Remove the signals this thread can handle. */
2950 sigandsets(&retarget, &retarget, &t->blocked);
2952 if (!task_sigpending(t))
2953 signal_wake_up(t, 0);
2955 if (sigisemptyset(&retarget))
2960 void exit_signals(struct task_struct *tsk)
2966 * @tsk is about to have PF_EXITING set - lock out users which
2967 * expect stable threadgroup.
2969 cgroup_threadgroup_change_begin(tsk);
2971 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2972 sched_mm_cid_exit_signals(tsk);
2973 tsk->flags |= PF_EXITING;
2974 cgroup_threadgroup_change_end(tsk);
2978 spin_lock_irq(&tsk->sighand->siglock);
2980 * From now this task is not visible for group-wide signals,
2981 * see wants_signal(), do_signal_stop().
2983 sched_mm_cid_exit_signals(tsk);
2984 tsk->flags |= PF_EXITING;
2986 cgroup_threadgroup_change_end(tsk);
2988 if (!task_sigpending(tsk))
2991 unblocked = tsk->blocked;
2992 signotset(&unblocked);
2993 retarget_shared_pending(tsk, &unblocked);
2995 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2996 task_participate_group_stop(tsk))
2997 group_stop = CLD_STOPPED;
2999 spin_unlock_irq(&tsk->sighand->siglock);
3002 * If group stop has completed, deliver the notification. This
3003 * should always go to the real parent of the group leader.
3005 if (unlikely(group_stop)) {
3006 read_lock(&tasklist_lock);
3007 do_notify_parent_cldstop(tsk, false, group_stop);
3008 read_unlock(&tasklist_lock);
3013 * System call entry points.
3017 * sys_restart_syscall - restart a system call
3019 SYSCALL_DEFINE0(restart_syscall)
3021 struct restart_block *restart = ¤t->restart_block;
3022 return restart->fn(restart);
3025 long do_no_restart_syscall(struct restart_block *param)
3030 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3032 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3033 sigset_t newblocked;
3034 /* A set of now blocked but previously unblocked signals. */
3035 sigandnsets(&newblocked, newset, ¤t->blocked);
3036 retarget_shared_pending(tsk, &newblocked);
3038 tsk->blocked = *newset;
3039 recalc_sigpending();
3043 * set_current_blocked - change current->blocked mask
3046 * It is wrong to change ->blocked directly, this helper should be used
3047 * to ensure the process can't miss a shared signal we are going to block.
3049 void set_current_blocked(sigset_t *newset)
3051 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3052 __set_current_blocked(newset);
3055 void __set_current_blocked(const sigset_t *newset)
3057 struct task_struct *tsk = current;
3060 * In case the signal mask hasn't changed, there is nothing we need
3061 * to do. The current->blocked shouldn't be modified by other task.
3063 if (sigequalsets(&tsk->blocked, newset))
3066 spin_lock_irq(&tsk->sighand->siglock);
3067 __set_task_blocked(tsk, newset);
3068 spin_unlock_irq(&tsk->sighand->siglock);
3072 * This is also useful for kernel threads that want to temporarily
3073 * (or permanently) block certain signals.
3075 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3076 * interface happily blocks "unblockable" signals like SIGKILL
3079 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3081 struct task_struct *tsk = current;
3084 /* Lockless, only current can change ->blocked, never from irq */
3086 *oldset = tsk->blocked;
3090 sigorsets(&newset, &tsk->blocked, set);
3093 sigandnsets(&newset, &tsk->blocked, set);
3102 __set_current_blocked(&newset);
3105 EXPORT_SYMBOL(sigprocmask);
3108 * The api helps set app-provided sigmasks.
3110 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3111 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3113 * Note that it does set_restore_sigmask() in advance, so it must be always
3114 * paired with restore_saved_sigmask_unless() before return from syscall.
3116 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3122 if (sigsetsize != sizeof(sigset_t))
3124 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3127 set_restore_sigmask();
3128 current->saved_sigmask = current->blocked;
3129 set_current_blocked(&kmask);
3134 #ifdef CONFIG_COMPAT
3135 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3142 if (sigsetsize != sizeof(compat_sigset_t))
3144 if (get_compat_sigset(&kmask, umask))
3147 set_restore_sigmask();
3148 current->saved_sigmask = current->blocked;
3149 set_current_blocked(&kmask);
3156 * sys_rt_sigprocmask - change the list of currently blocked signals
3157 * @how: whether to add, remove, or set signals
3158 * @nset: stores pending signals
3159 * @oset: previous value of signal mask if non-null
3160 * @sigsetsize: size of sigset_t type
3162 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3163 sigset_t __user *, oset, size_t, sigsetsize)
3165 sigset_t old_set, new_set;
3168 /* XXX: Don't preclude handling different sized sigset_t's. */
3169 if (sigsetsize != sizeof(sigset_t))
3172 old_set = current->blocked;
3175 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3177 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3179 error = sigprocmask(how, &new_set, NULL);
3185 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3192 #ifdef CONFIG_COMPAT
3193 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3194 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3196 sigset_t old_set = current->blocked;
3198 /* XXX: Don't preclude handling different sized sigset_t's. */
3199 if (sigsetsize != sizeof(sigset_t))
3205 if (get_compat_sigset(&new_set, nset))
3207 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3209 error = sigprocmask(how, &new_set, NULL);
3213 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3217 static void do_sigpending(sigset_t *set)
3219 spin_lock_irq(¤t->sighand->siglock);
3220 sigorsets(set, ¤t->pending.signal,
3221 ¤t->signal->shared_pending.signal);
3222 spin_unlock_irq(¤t->sighand->siglock);
3224 /* Outside the lock because only this thread touches it. */
3225 sigandsets(set, ¤t->blocked, set);
3229 * sys_rt_sigpending - examine a pending signal that has been raised
3231 * @uset: stores pending signals
3232 * @sigsetsize: size of sigset_t type or larger
3234 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3238 if (sigsetsize > sizeof(*uset))
3241 do_sigpending(&set);
3243 if (copy_to_user(uset, &set, sigsetsize))
3249 #ifdef CONFIG_COMPAT
3250 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3251 compat_size_t, sigsetsize)
3255 if (sigsetsize > sizeof(*uset))
3258 do_sigpending(&set);
3260 return put_compat_sigset(uset, &set, sigsetsize);
3264 static const struct {
3265 unsigned char limit, layout;
3267 [SIGILL] = { NSIGILL, SIL_FAULT },
3268 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3269 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3270 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3271 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3273 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3275 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3276 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3277 [SIGSYS] = { NSIGSYS, SIL_SYS },
3280 static bool known_siginfo_layout(unsigned sig, int si_code)
3282 if (si_code == SI_KERNEL)
3284 else if ((si_code > SI_USER)) {
3285 if (sig_specific_sicodes(sig)) {
3286 if (si_code <= sig_sicodes[sig].limit)
3289 else if (si_code <= NSIGPOLL)
3292 else if (si_code >= SI_DETHREAD)
3294 else if (si_code == SI_ASYNCNL)
3299 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3301 enum siginfo_layout layout = SIL_KILL;
3302 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3303 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3304 (si_code <= sig_sicodes[sig].limit)) {
3305 layout = sig_sicodes[sig].layout;
3306 /* Handle the exceptions */
3307 if ((sig == SIGBUS) &&
3308 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3309 layout = SIL_FAULT_MCEERR;
3310 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3311 layout = SIL_FAULT_BNDERR;
3313 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3314 layout = SIL_FAULT_PKUERR;
3316 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3317 layout = SIL_FAULT_PERF_EVENT;
3318 else if (IS_ENABLED(CONFIG_SPARC) &&
3319 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3320 layout = SIL_FAULT_TRAPNO;
3321 else if (IS_ENABLED(CONFIG_ALPHA) &&
3323 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3324 layout = SIL_FAULT_TRAPNO;
3326 else if (si_code <= NSIGPOLL)
3329 if (si_code == SI_TIMER)
3331 else if (si_code == SI_SIGIO)
3333 else if (si_code < 0)
3339 static inline char __user *si_expansion(const siginfo_t __user *info)
3341 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3344 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3346 char __user *expansion = si_expansion(to);
3347 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3349 if (clear_user(expansion, SI_EXPANSION_SIZE))
3354 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3355 const siginfo_t __user *from)
3357 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3358 char __user *expansion = si_expansion(from);
3359 char buf[SI_EXPANSION_SIZE];
3362 * An unknown si_code might need more than
3363 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3364 * extra bytes are 0. This guarantees copy_siginfo_to_user
3365 * will return this data to userspace exactly.
3367 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3369 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3377 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3378 const siginfo_t __user *from)
3380 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3382 to->si_signo = signo;
3383 return post_copy_siginfo_from_user(to, from);
3386 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3388 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3390 return post_copy_siginfo_from_user(to, from);
3393 #ifdef CONFIG_COMPAT
3395 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3396 * @to: compat siginfo destination
3397 * @from: kernel siginfo source
3399 * Note: This function does not work properly for the SIGCHLD on x32, but
3400 * fortunately it doesn't have to. The only valid callers for this function are
3401 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3402 * The latter does not care because SIGCHLD will never cause a coredump.
3404 void copy_siginfo_to_external32(struct compat_siginfo *to,
3405 const struct kernel_siginfo *from)
3407 memset(to, 0, sizeof(*to));
3409 to->si_signo = from->si_signo;
3410 to->si_errno = from->si_errno;
3411 to->si_code = from->si_code;
3412 switch(siginfo_layout(from->si_signo, from->si_code)) {
3414 to->si_pid = from->si_pid;
3415 to->si_uid = from->si_uid;
3418 to->si_tid = from->si_tid;
3419 to->si_overrun = from->si_overrun;
3420 to->si_int = from->si_int;
3423 to->si_band = from->si_band;
3424 to->si_fd = from->si_fd;
3427 to->si_addr = ptr_to_compat(from->si_addr);
3429 case SIL_FAULT_TRAPNO:
3430 to->si_addr = ptr_to_compat(from->si_addr);
3431 to->si_trapno = from->si_trapno;
3433 case SIL_FAULT_MCEERR:
3434 to->si_addr = ptr_to_compat(from->si_addr);
3435 to->si_addr_lsb = from->si_addr_lsb;
3437 case SIL_FAULT_BNDERR:
3438 to->si_addr = ptr_to_compat(from->si_addr);
3439 to->si_lower = ptr_to_compat(from->si_lower);
3440 to->si_upper = ptr_to_compat(from->si_upper);
3442 case SIL_FAULT_PKUERR:
3443 to->si_addr = ptr_to_compat(from->si_addr);
3444 to->si_pkey = from->si_pkey;
3446 case SIL_FAULT_PERF_EVENT:
3447 to->si_addr = ptr_to_compat(from->si_addr);
3448 to->si_perf_data = from->si_perf_data;
3449 to->si_perf_type = from->si_perf_type;
3450 to->si_perf_flags = from->si_perf_flags;
3453 to->si_pid = from->si_pid;
3454 to->si_uid = from->si_uid;
3455 to->si_status = from->si_status;
3456 to->si_utime = from->si_utime;
3457 to->si_stime = from->si_stime;
3460 to->si_pid = from->si_pid;
3461 to->si_uid = from->si_uid;
3462 to->si_int = from->si_int;
3465 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3466 to->si_syscall = from->si_syscall;
3467 to->si_arch = from->si_arch;
3472 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3473 const struct kernel_siginfo *from)
3475 struct compat_siginfo new;
3477 copy_siginfo_to_external32(&new, from);
3478 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3483 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3484 const struct compat_siginfo *from)
3487 to->si_signo = from->si_signo;
3488 to->si_errno = from->si_errno;
3489 to->si_code = from->si_code;
3490 switch(siginfo_layout(from->si_signo, from->si_code)) {
3492 to->si_pid = from->si_pid;
3493 to->si_uid = from->si_uid;
3496 to->si_tid = from->si_tid;
3497 to->si_overrun = from->si_overrun;
3498 to->si_int = from->si_int;
3501 to->si_band = from->si_band;
3502 to->si_fd = from->si_fd;
3505 to->si_addr = compat_ptr(from->si_addr);
3507 case SIL_FAULT_TRAPNO:
3508 to->si_addr = compat_ptr(from->si_addr);
3509 to->si_trapno = from->si_trapno;
3511 case SIL_FAULT_MCEERR:
3512 to->si_addr = compat_ptr(from->si_addr);
3513 to->si_addr_lsb = from->si_addr_lsb;
3515 case SIL_FAULT_BNDERR:
3516 to->si_addr = compat_ptr(from->si_addr);
3517 to->si_lower = compat_ptr(from->si_lower);
3518 to->si_upper = compat_ptr(from->si_upper);
3520 case SIL_FAULT_PKUERR:
3521 to->si_addr = compat_ptr(from->si_addr);
3522 to->si_pkey = from->si_pkey;
3524 case SIL_FAULT_PERF_EVENT:
3525 to->si_addr = compat_ptr(from->si_addr);
3526 to->si_perf_data = from->si_perf_data;
3527 to->si_perf_type = from->si_perf_type;
3528 to->si_perf_flags = from->si_perf_flags;
3531 to->si_pid = from->si_pid;
3532 to->si_uid = from->si_uid;
3533 to->si_status = from->si_status;
3534 #ifdef CONFIG_X86_X32_ABI
3535 if (in_x32_syscall()) {
3536 to->si_utime = from->_sifields._sigchld_x32._utime;
3537 to->si_stime = from->_sifields._sigchld_x32._stime;
3541 to->si_utime = from->si_utime;
3542 to->si_stime = from->si_stime;
3546 to->si_pid = from->si_pid;
3547 to->si_uid = from->si_uid;
3548 to->si_int = from->si_int;
3551 to->si_call_addr = compat_ptr(from->si_call_addr);
3552 to->si_syscall = from->si_syscall;
3553 to->si_arch = from->si_arch;
3559 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3560 const struct compat_siginfo __user *ufrom)
3562 struct compat_siginfo from;
3564 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3567 from.si_signo = signo;
3568 return post_copy_siginfo_from_user32(to, &from);
3571 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3572 const struct compat_siginfo __user *ufrom)
3574 struct compat_siginfo from;
3576 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3579 return post_copy_siginfo_from_user32(to, &from);
3581 #endif /* CONFIG_COMPAT */
3584 * do_sigtimedwait - wait for queued signals specified in @which
3585 * @which: queued signals to wait for
3586 * @info: if non-null, the signal's siginfo is returned here
3587 * @ts: upper bound on process time suspension
3589 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3590 const struct timespec64 *ts)
3592 ktime_t *to = NULL, timeout = KTIME_MAX;
3593 struct task_struct *tsk = current;
3594 sigset_t mask = *which;
3599 if (!timespec64_valid(ts))
3601 timeout = timespec64_to_ktime(*ts);
3606 * Invert the set of allowed signals to get those we want to block.
3608 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3611 spin_lock_irq(&tsk->sighand->siglock);
3612 sig = dequeue_signal(tsk, &mask, info, &type);
3613 if (!sig && timeout) {
3615 * None ready, temporarily unblock those we're interested
3616 * while we are sleeping in so that we'll be awakened when
3617 * they arrive. Unblocking is always fine, we can avoid
3618 * set_current_blocked().
3620 tsk->real_blocked = tsk->blocked;
3621 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3622 recalc_sigpending();
3623 spin_unlock_irq(&tsk->sighand->siglock);
3625 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3626 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3628 spin_lock_irq(&tsk->sighand->siglock);
3629 __set_task_blocked(tsk, &tsk->real_blocked);
3630 sigemptyset(&tsk->real_blocked);
3631 sig = dequeue_signal(tsk, &mask, info, &type);
3633 spin_unlock_irq(&tsk->sighand->siglock);
3637 return ret ? -EINTR : -EAGAIN;
3641 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3643 * @uthese: queued signals to wait for
3644 * @uinfo: if non-null, the signal's siginfo is returned here
3645 * @uts: upper bound on process time suspension
3646 * @sigsetsize: size of sigset_t type
3648 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3649 siginfo_t __user *, uinfo,
3650 const struct __kernel_timespec __user *, uts,
3654 struct timespec64 ts;
3655 kernel_siginfo_t info;
3658 /* XXX: Don't preclude handling different sized sigset_t's. */
3659 if (sigsetsize != sizeof(sigset_t))
3662 if (copy_from_user(&these, uthese, sizeof(these)))
3666 if (get_timespec64(&ts, uts))
3670 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3672 if (ret > 0 && uinfo) {
3673 if (copy_siginfo_to_user(uinfo, &info))
3680 #ifdef CONFIG_COMPAT_32BIT_TIME
3681 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3682 siginfo_t __user *, uinfo,
3683 const struct old_timespec32 __user *, uts,
3687 struct timespec64 ts;
3688 kernel_siginfo_t info;
3691 if (sigsetsize != sizeof(sigset_t))
3694 if (copy_from_user(&these, uthese, sizeof(these)))
3698 if (get_old_timespec32(&ts, uts))
3702 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3704 if (ret > 0 && uinfo) {
3705 if (copy_siginfo_to_user(uinfo, &info))
3713 #ifdef CONFIG_COMPAT
3714 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3715 struct compat_siginfo __user *, uinfo,
3716 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3719 struct timespec64 t;
3720 kernel_siginfo_t info;
3723 if (sigsetsize != sizeof(sigset_t))
3726 if (get_compat_sigset(&s, uthese))
3730 if (get_timespec64(&t, uts))
3734 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3736 if (ret > 0 && uinfo) {
3737 if (copy_siginfo_to_user32(uinfo, &info))
3744 #ifdef CONFIG_COMPAT_32BIT_TIME
3745 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3746 struct compat_siginfo __user *, uinfo,
3747 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3750 struct timespec64 t;
3751 kernel_siginfo_t info;
3754 if (sigsetsize != sizeof(sigset_t))
3757 if (get_compat_sigset(&s, uthese))
3761 if (get_old_timespec32(&t, uts))
3765 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3767 if (ret > 0 && uinfo) {
3768 if (copy_siginfo_to_user32(uinfo, &info))
3777 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3779 clear_siginfo(info);
3780 info->si_signo = sig;
3782 info->si_code = SI_USER;
3783 info->si_pid = task_tgid_vnr(current);
3784 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3788 * sys_kill - send a signal to a process
3789 * @pid: the PID of the process
3790 * @sig: signal to be sent
3792 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3794 struct kernel_siginfo info;
3796 prepare_kill_siginfo(sig, &info);
3798 return kill_something_info(sig, &info, pid);
3802 * Verify that the signaler and signalee either are in the same pid namespace
3803 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3806 static bool access_pidfd_pidns(struct pid *pid)
3808 struct pid_namespace *active = task_active_pid_ns(current);
3809 struct pid_namespace *p = ns_of_pid(pid);
3822 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3823 siginfo_t __user *info)
3825 #ifdef CONFIG_COMPAT
3827 * Avoid hooking up compat syscalls and instead handle necessary
3828 * conversions here. Note, this is a stop-gap measure and should not be
3829 * considered a generic solution.
3831 if (in_compat_syscall())
3832 return copy_siginfo_from_user32(
3833 kinfo, (struct compat_siginfo __user *)info);
3835 return copy_siginfo_from_user(kinfo, info);
3838 static struct pid *pidfd_to_pid(const struct file *file)
3842 pid = pidfd_pid(file);
3846 return tgid_pidfd_to_pid(file);
3850 * sys_pidfd_send_signal - Signal a process through a pidfd
3851 * @pidfd: file descriptor of the process
3852 * @sig: signal to send
3853 * @info: signal info
3854 * @flags: future flags
3856 * The syscall currently only signals via PIDTYPE_PID which covers
3857 * kill(<positive-pid>, <signal>. It does not signal threads or process
3859 * In order to extend the syscall to threads and process groups the @flags
3860 * argument should be used. In essence, the @flags argument will determine
3861 * what is signaled and not the file descriptor itself. Put in other words,
3862 * grouping is a property of the flags argument not a property of the file
3865 * Return: 0 on success, negative errno on failure
3867 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3868 siginfo_t __user *, info, unsigned int, flags)
3873 kernel_siginfo_t kinfo;
3875 /* Enforce flags be set to 0 until we add an extension. */
3883 /* Is this a pidfd? */
3884 pid = pidfd_to_pid(f.file);
3891 if (!access_pidfd_pidns(pid))
3895 ret = copy_siginfo_from_user_any(&kinfo, info);
3900 if (unlikely(sig != kinfo.si_signo))
3903 /* Only allow sending arbitrary signals to yourself. */
3905 if ((task_pid(current) != pid) &&
3906 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3909 prepare_kill_siginfo(sig, &kinfo);
3912 ret = kill_pid_info(sig, &kinfo, pid);
3920 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3922 struct task_struct *p;
3926 p = find_task_by_vpid(pid);
3927 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3928 error = check_kill_permission(sig, info, p);
3930 * The null signal is a permissions and process existence
3931 * probe. No signal is actually delivered.
3933 if (!error && sig) {
3934 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3936 * If lock_task_sighand() failed we pretend the task
3937 * dies after receiving the signal. The window is tiny,
3938 * and the signal is private anyway.
3940 if (unlikely(error == -ESRCH))
3949 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3951 struct kernel_siginfo info;
3953 clear_siginfo(&info);
3954 info.si_signo = sig;
3956 info.si_code = SI_TKILL;
3957 info.si_pid = task_tgid_vnr(current);
3958 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3960 return do_send_specific(tgid, pid, sig, &info);
3964 * sys_tgkill - send signal to one specific thread
3965 * @tgid: the thread group ID of the thread
3966 * @pid: the PID of the thread
3967 * @sig: signal to be sent
3969 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3970 * exists but it's not belonging to the target process anymore. This
3971 * method solves the problem of threads exiting and PIDs getting reused.
3973 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3975 /* This is only valid for single tasks */
3976 if (pid <= 0 || tgid <= 0)
3979 return do_tkill(tgid, pid, sig);
3983 * sys_tkill - send signal to one specific task
3984 * @pid: the PID of the task
3985 * @sig: signal to be sent
3987 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3989 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3991 /* This is only valid for single tasks */
3995 return do_tkill(0, pid, sig);
3998 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4000 /* Not even root can pretend to send signals from the kernel.
4001 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4003 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4004 (task_pid_vnr(current) != pid))
4007 /* POSIX.1b doesn't mention process groups. */
4008 return kill_proc_info(sig, info, pid);
4012 * sys_rt_sigqueueinfo - send signal information to a signal
4013 * @pid: the PID of the thread
4014 * @sig: signal to be sent
4015 * @uinfo: signal info to be sent
4017 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4018 siginfo_t __user *, uinfo)
4020 kernel_siginfo_t info;
4021 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4024 return do_rt_sigqueueinfo(pid, sig, &info);
4027 #ifdef CONFIG_COMPAT
4028 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4031 struct compat_siginfo __user *, uinfo)
4033 kernel_siginfo_t info;
4034 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4037 return do_rt_sigqueueinfo(pid, sig, &info);
4041 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4043 /* This is only valid for single tasks */
4044 if (pid <= 0 || tgid <= 0)
4047 /* Not even root can pretend to send signals from the kernel.
4048 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4050 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4051 (task_pid_vnr(current) != pid))
4054 return do_send_specific(tgid, pid, sig, info);
4057 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4058 siginfo_t __user *, uinfo)
4060 kernel_siginfo_t info;
4061 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4064 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4067 #ifdef CONFIG_COMPAT
4068 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4072 struct compat_siginfo __user *, uinfo)
4074 kernel_siginfo_t info;
4075 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4078 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4083 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4085 void kernel_sigaction(int sig, __sighandler_t action)
4087 spin_lock_irq(¤t->sighand->siglock);
4088 current->sighand->action[sig - 1].sa.sa_handler = action;
4089 if (action == SIG_IGN) {
4093 sigaddset(&mask, sig);
4095 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4096 flush_sigqueue_mask(&mask, ¤t->pending);
4097 recalc_sigpending();
4099 spin_unlock_irq(¤t->sighand->siglock);
4101 EXPORT_SYMBOL(kernel_sigaction);
4103 void __weak sigaction_compat_abi(struct k_sigaction *act,
4104 struct k_sigaction *oact)
4108 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4110 struct task_struct *p = current, *t;
4111 struct k_sigaction *k;
4114 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4117 k = &p->sighand->action[sig-1];
4119 spin_lock_irq(&p->sighand->siglock);
4120 if (k->sa.sa_flags & SA_IMMUTABLE) {
4121 spin_unlock_irq(&p->sighand->siglock);
4128 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4129 * e.g. by having an architecture use the bit in their uapi.
4131 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4134 * Clear unknown flag bits in order to allow userspace to detect missing
4135 * support for flag bits and to allow the kernel to use non-uapi bits
4139 act->sa.sa_flags &= UAPI_SA_FLAGS;
4141 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4143 sigaction_compat_abi(act, oact);
4146 sigdelsetmask(&act->sa.sa_mask,
4147 sigmask(SIGKILL) | sigmask(SIGSTOP));
4151 * "Setting a signal action to SIG_IGN for a signal that is
4152 * pending shall cause the pending signal to be discarded,
4153 * whether or not it is blocked."
4155 * "Setting a signal action to SIG_DFL for a signal that is
4156 * pending and whose default action is to ignore the signal
4157 * (for example, SIGCHLD), shall cause the pending signal to
4158 * be discarded, whether or not it is blocked"
4160 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4162 sigaddset(&mask, sig);
4163 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4164 for_each_thread(p, t)
4165 flush_sigqueue_mask(&mask, &t->pending);
4169 spin_unlock_irq(&p->sighand->siglock);
4173 #ifdef CONFIG_DYNAMIC_SIGFRAME
4174 static inline void sigaltstack_lock(void)
4175 __acquires(¤t->sighand->siglock)
4177 spin_lock_irq(¤t->sighand->siglock);
4180 static inline void sigaltstack_unlock(void)
4181 __releases(¤t->sighand->siglock)
4183 spin_unlock_irq(¤t->sighand->siglock);
4186 static inline void sigaltstack_lock(void) { }
4187 static inline void sigaltstack_unlock(void) { }
4191 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4194 struct task_struct *t = current;
4198 memset(oss, 0, sizeof(stack_t));
4199 oss->ss_sp = (void __user *) t->sas_ss_sp;
4200 oss->ss_size = t->sas_ss_size;
4201 oss->ss_flags = sas_ss_flags(sp) |
4202 (current->sas_ss_flags & SS_FLAG_BITS);
4206 void __user *ss_sp = ss->ss_sp;
4207 size_t ss_size = ss->ss_size;
4208 unsigned ss_flags = ss->ss_flags;
4211 if (unlikely(on_sig_stack(sp)))
4214 ss_mode = ss_flags & ~SS_FLAG_BITS;
4215 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4220 * Return before taking any locks if no actual
4221 * sigaltstack changes were requested.
4223 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4224 t->sas_ss_size == ss_size &&
4225 t->sas_ss_flags == ss_flags)
4229 if (ss_mode == SS_DISABLE) {
4233 if (unlikely(ss_size < min_ss_size))
4235 if (!sigaltstack_size_valid(ss_size))
4239 t->sas_ss_sp = (unsigned long) ss_sp;
4240 t->sas_ss_size = ss_size;
4241 t->sas_ss_flags = ss_flags;
4243 sigaltstack_unlock();
4248 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4252 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4254 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4255 current_user_stack_pointer(),
4257 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4262 int restore_altstack(const stack_t __user *uss)
4265 if (copy_from_user(&new, uss, sizeof(stack_t)))
4267 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4269 /* squash all but EFAULT for now */
4273 int __save_altstack(stack_t __user *uss, unsigned long sp)
4275 struct task_struct *t = current;
4276 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4277 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4278 __put_user(t->sas_ss_size, &uss->ss_size);
4282 #ifdef CONFIG_COMPAT
4283 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4284 compat_stack_t __user *uoss_ptr)
4290 compat_stack_t uss32;
4291 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4293 uss.ss_sp = compat_ptr(uss32.ss_sp);
4294 uss.ss_flags = uss32.ss_flags;
4295 uss.ss_size = uss32.ss_size;
4297 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4298 compat_user_stack_pointer(),
4299 COMPAT_MINSIGSTKSZ);
4300 if (ret >= 0 && uoss_ptr) {
4302 memset(&old, 0, sizeof(old));
4303 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4304 old.ss_flags = uoss.ss_flags;
4305 old.ss_size = uoss.ss_size;
4306 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4312 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4313 const compat_stack_t __user *, uss_ptr,
4314 compat_stack_t __user *, uoss_ptr)
4316 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4319 int compat_restore_altstack(const compat_stack_t __user *uss)
4321 int err = do_compat_sigaltstack(uss, NULL);
4322 /* squash all but -EFAULT for now */
4323 return err == -EFAULT ? err : 0;
4326 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4329 struct task_struct *t = current;
4330 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4332 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4333 __put_user(t->sas_ss_size, &uss->ss_size);
4338 #ifdef __ARCH_WANT_SYS_SIGPENDING
4341 * sys_sigpending - examine pending signals
4342 * @uset: where mask of pending signal is returned
4344 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4348 if (sizeof(old_sigset_t) > sizeof(*uset))
4351 do_sigpending(&set);
4353 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4359 #ifdef CONFIG_COMPAT
4360 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4364 do_sigpending(&set);
4366 return put_user(set.sig[0], set32);
4372 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4374 * sys_sigprocmask - examine and change blocked signals
4375 * @how: whether to add, remove, or set signals
4376 * @nset: signals to add or remove (if non-null)
4377 * @oset: previous value of signal mask if non-null
4379 * Some platforms have their own version with special arguments;
4380 * others support only sys_rt_sigprocmask.
4383 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4384 old_sigset_t __user *, oset)
4386 old_sigset_t old_set, new_set;
4387 sigset_t new_blocked;
4389 old_set = current->blocked.sig[0];
4392 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4395 new_blocked = current->blocked;
4399 sigaddsetmask(&new_blocked, new_set);
4402 sigdelsetmask(&new_blocked, new_set);
4405 new_blocked.sig[0] = new_set;
4411 set_current_blocked(&new_blocked);
4415 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4421 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4423 #ifndef CONFIG_ODD_RT_SIGACTION
4425 * sys_rt_sigaction - alter an action taken by a process
4426 * @sig: signal to be sent
4427 * @act: new sigaction
4428 * @oact: used to save the previous sigaction
4429 * @sigsetsize: size of sigset_t type
4431 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4432 const struct sigaction __user *, act,
4433 struct sigaction __user *, oact,
4436 struct k_sigaction new_sa, old_sa;
4439 /* XXX: Don't preclude handling different sized sigset_t's. */
4440 if (sigsetsize != sizeof(sigset_t))
4443 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4446 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4450 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4455 #ifdef CONFIG_COMPAT
4456 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4457 const struct compat_sigaction __user *, act,
4458 struct compat_sigaction __user *, oact,
4459 compat_size_t, sigsetsize)
4461 struct k_sigaction new_ka, old_ka;
4462 #ifdef __ARCH_HAS_SA_RESTORER
4463 compat_uptr_t restorer;
4467 /* XXX: Don't preclude handling different sized sigset_t's. */
4468 if (sigsetsize != sizeof(compat_sigset_t))
4472 compat_uptr_t handler;
4473 ret = get_user(handler, &act->sa_handler);
4474 new_ka.sa.sa_handler = compat_ptr(handler);
4475 #ifdef __ARCH_HAS_SA_RESTORER
4476 ret |= get_user(restorer, &act->sa_restorer);
4477 new_ka.sa.sa_restorer = compat_ptr(restorer);
4479 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4480 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4485 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4487 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4489 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4490 sizeof(oact->sa_mask));
4491 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4492 #ifdef __ARCH_HAS_SA_RESTORER
4493 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4494 &oact->sa_restorer);
4500 #endif /* !CONFIG_ODD_RT_SIGACTION */
4502 #ifdef CONFIG_OLD_SIGACTION
4503 SYSCALL_DEFINE3(sigaction, int, sig,
4504 const struct old_sigaction __user *, act,
4505 struct old_sigaction __user *, oact)
4507 struct k_sigaction new_ka, old_ka;
4512 if (!access_ok(act, sizeof(*act)) ||
4513 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4514 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4515 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4516 __get_user(mask, &act->sa_mask))
4518 #ifdef __ARCH_HAS_KA_RESTORER
4519 new_ka.ka_restorer = NULL;
4521 siginitset(&new_ka.sa.sa_mask, mask);
4524 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4527 if (!access_ok(oact, sizeof(*oact)) ||
4528 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4529 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4530 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4531 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4538 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4539 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4540 const struct compat_old_sigaction __user *, act,
4541 struct compat_old_sigaction __user *, oact)
4543 struct k_sigaction new_ka, old_ka;
4545 compat_old_sigset_t mask;
4546 compat_uptr_t handler, restorer;
4549 if (!access_ok(act, sizeof(*act)) ||
4550 __get_user(handler, &act->sa_handler) ||
4551 __get_user(restorer, &act->sa_restorer) ||
4552 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4553 __get_user(mask, &act->sa_mask))
4556 #ifdef __ARCH_HAS_KA_RESTORER
4557 new_ka.ka_restorer = NULL;
4559 new_ka.sa.sa_handler = compat_ptr(handler);
4560 new_ka.sa.sa_restorer = compat_ptr(restorer);
4561 siginitset(&new_ka.sa.sa_mask, mask);
4564 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4567 if (!access_ok(oact, sizeof(*oact)) ||
4568 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4569 &oact->sa_handler) ||
4570 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4571 &oact->sa_restorer) ||
4572 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4573 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4580 #ifdef CONFIG_SGETMASK_SYSCALL
4583 * For backwards compatibility. Functionality superseded by sigprocmask.
4585 SYSCALL_DEFINE0(sgetmask)
4588 return current->blocked.sig[0];
4591 SYSCALL_DEFINE1(ssetmask, int, newmask)
4593 int old = current->blocked.sig[0];
4596 siginitset(&newset, newmask);
4597 set_current_blocked(&newset);
4601 #endif /* CONFIG_SGETMASK_SYSCALL */
4603 #ifdef __ARCH_WANT_SYS_SIGNAL
4605 * For backwards compatibility. Functionality superseded by sigaction.
4607 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4609 struct k_sigaction new_sa, old_sa;
4612 new_sa.sa.sa_handler = handler;
4613 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4614 sigemptyset(&new_sa.sa.sa_mask);
4616 ret = do_sigaction(sig, &new_sa, &old_sa);
4618 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4620 #endif /* __ARCH_WANT_SYS_SIGNAL */
4622 #ifdef __ARCH_WANT_SYS_PAUSE
4624 SYSCALL_DEFINE0(pause)
4626 while (!signal_pending(current)) {
4627 __set_current_state(TASK_INTERRUPTIBLE);
4630 return -ERESTARTNOHAND;
4635 static int sigsuspend(sigset_t *set)
4637 current->saved_sigmask = current->blocked;
4638 set_current_blocked(set);
4640 while (!signal_pending(current)) {
4641 __set_current_state(TASK_INTERRUPTIBLE);
4644 set_restore_sigmask();
4645 return -ERESTARTNOHAND;
4649 * sys_rt_sigsuspend - replace the signal mask for a value with the
4650 * @unewset value until a signal is received
4651 * @unewset: new signal mask value
4652 * @sigsetsize: size of sigset_t type
4654 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4658 /* XXX: Don't preclude handling different sized sigset_t's. */
4659 if (sigsetsize != sizeof(sigset_t))
4662 if (copy_from_user(&newset, unewset, sizeof(newset)))
4664 return sigsuspend(&newset);
4667 #ifdef CONFIG_COMPAT
4668 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4672 /* XXX: Don't preclude handling different sized sigset_t's. */
4673 if (sigsetsize != sizeof(sigset_t))
4676 if (get_compat_sigset(&newset, unewset))
4678 return sigsuspend(&newset);
4682 #ifdef CONFIG_OLD_SIGSUSPEND
4683 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4686 siginitset(&blocked, mask);
4687 return sigsuspend(&blocked);
4690 #ifdef CONFIG_OLD_SIGSUSPEND3
4691 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4694 siginitset(&blocked, mask);
4695 return sigsuspend(&blocked);
4699 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4704 static inline void siginfo_buildtime_checks(void)
4706 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4708 /* Verify the offsets in the two siginfos match */
4709 #define CHECK_OFFSET(field) \
4710 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4713 CHECK_OFFSET(si_pid);
4714 CHECK_OFFSET(si_uid);
4717 CHECK_OFFSET(si_tid);
4718 CHECK_OFFSET(si_overrun);
4719 CHECK_OFFSET(si_value);
4722 CHECK_OFFSET(si_pid);
4723 CHECK_OFFSET(si_uid);
4724 CHECK_OFFSET(si_value);
4727 CHECK_OFFSET(si_pid);
4728 CHECK_OFFSET(si_uid);
4729 CHECK_OFFSET(si_status);
4730 CHECK_OFFSET(si_utime);
4731 CHECK_OFFSET(si_stime);
4734 CHECK_OFFSET(si_addr);
4735 CHECK_OFFSET(si_trapno);
4736 CHECK_OFFSET(si_addr_lsb);
4737 CHECK_OFFSET(si_lower);
4738 CHECK_OFFSET(si_upper);
4739 CHECK_OFFSET(si_pkey);
4740 CHECK_OFFSET(si_perf_data);
4741 CHECK_OFFSET(si_perf_type);
4742 CHECK_OFFSET(si_perf_flags);
4745 CHECK_OFFSET(si_band);
4746 CHECK_OFFSET(si_fd);
4749 CHECK_OFFSET(si_call_addr);
4750 CHECK_OFFSET(si_syscall);
4751 CHECK_OFFSET(si_arch);
4755 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4756 offsetof(struct siginfo, si_addr));
4757 if (sizeof(int) == sizeof(void __user *)) {
4758 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4759 sizeof(void __user *));
4761 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4762 sizeof_field(struct siginfo, si_uid)) !=
4763 sizeof(void __user *));
4764 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4765 offsetof(struct siginfo, si_uid));
4767 #ifdef CONFIG_COMPAT
4768 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4769 offsetof(struct compat_siginfo, si_addr));
4770 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4771 sizeof(compat_uptr_t));
4772 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4773 sizeof_field(struct siginfo, si_pid));
4777 #if defined(CONFIG_SYSCTL)
4778 static struct ctl_table signal_debug_table[] = {
4779 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4781 .procname = "exception-trace",
4782 .data = &show_unhandled_signals,
4783 .maxlen = sizeof(int),
4785 .proc_handler = proc_dointvec
4791 static int __init init_signal_sysctls(void)
4793 register_sysctl_init("debug", signal_debug_table);
4796 early_initcall(init_signal_sysctls);
4797 #endif /* CONFIG_SYSCTL */
4799 void __init signals_init(void)
4801 siginfo_buildtime_checks();
4803 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4806 #ifdef CONFIG_KGDB_KDB
4807 #include <linux/kdb.h>
4809 * kdb_send_sig - Allows kdb to send signals without exposing
4810 * signal internals. This function checks if the required locks are
4811 * available before calling the main signal code, to avoid kdb
4814 void kdb_send_sig(struct task_struct *t, int sig)
4816 static struct task_struct *kdb_prev_t;
4818 if (!spin_trylock(&t->sighand->siglock)) {
4819 kdb_printf("Can't do kill command now.\n"
4820 "The sigmask lock is held somewhere else in "
4821 "kernel, try again later\n");
4824 new_t = kdb_prev_t != t;
4826 if (!task_is_running(t) && new_t) {
4827 spin_unlock(&t->sighand->siglock);
4828 kdb_printf("Process is not RUNNING, sending a signal from "
4829 "kdb risks deadlock\n"
4830 "on the run queue locks. "
4831 "The signal has _not_ been sent.\n"
4832 "Reissue the kill command if you want to risk "
4836 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4837 spin_unlock(&t->sighand->siglock);
4839 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4842 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4844 #endif /* CONFIG_KGDB_KDB */