1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 task->jobctl |= mask;
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
311 * Must be called with @task->sighand->siglock held.
313 void task_clear_jobctl_trapping(struct task_struct *task)
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
323 * task_clear_jobctl_pending - clear jobctl pending bits
325 * @mask: pending bits to clear
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
335 * Must be called with @task->sighand->siglock held.
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 task->jobctl &= ~mask;
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
360 * Must be called with @task->sighand->siglock held.
363 * %true if group stop completion should be notified to the parent, %false
366 static bool task_participate_group_stop(struct task_struct *task)
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
392 void task_join_group_stop(struct task_struct *task)
394 /* Have the new thread join an on-going signal group stop */
395 unsigned long jobctl = current->jobctl;
396 if (jobctl & JOBCTL_STOP_PENDING) {
397 struct signal_struct *sig = current->signal;
398 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
399 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
400 if (task_set_jobctl_pending(task, signr | gstop)) {
401 sig->group_stop_count++;
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, flags);
436 print_dropped_signal(sig);
439 if (unlikely(q == NULL)) {
440 if (atomic_dec_and_test(&user->sigpending))
443 INIT_LIST_HEAD(&q->list);
451 static void __sigqueue_free(struct sigqueue *q)
453 if (q->flags & SIGQUEUE_PREALLOC)
455 if (atomic_dec_and_test(&q->user->sigpending))
457 kmem_cache_free(sigqueue_cachep, q);
460 void flush_sigqueue(struct sigpending *queue)
464 sigemptyset(&queue->signal);
465 while (!list_empty(&queue->list)) {
466 q = list_entry(queue->list.next, struct sigqueue , list);
467 list_del_init(&q->list);
473 * Flush all pending signals for this kthread.
475 void flush_signals(struct task_struct *t)
479 spin_lock_irqsave(&t->sighand->siglock, flags);
480 clear_tsk_thread_flag(t, TIF_SIGPENDING);
481 flush_sigqueue(&t->pending);
482 flush_sigqueue(&t->signal->shared_pending);
483 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 EXPORT_SYMBOL(flush_signals);
487 #ifdef CONFIG_POSIX_TIMERS
488 static void __flush_itimer_signals(struct sigpending *pending)
490 sigset_t signal, retain;
491 struct sigqueue *q, *n;
493 signal = pending->signal;
494 sigemptyset(&retain);
496 list_for_each_entry_safe(q, n, &pending->list, list) {
497 int sig = q->info.si_signo;
499 if (likely(q->info.si_code != SI_TIMER)) {
500 sigaddset(&retain, sig);
502 sigdelset(&signal, sig);
503 list_del_init(&q->list);
508 sigorsets(&pending->signal, &signal, &retain);
511 void flush_itimer_signals(void)
513 struct task_struct *tsk = current;
516 spin_lock_irqsave(&tsk->sighand->siglock, flags);
517 __flush_itimer_signals(&tsk->pending);
518 __flush_itimer_signals(&tsk->signal->shared_pending);
519 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
523 void ignore_signals(struct task_struct *t)
527 for (i = 0; i < _NSIG; ++i)
528 t->sighand->action[i].sa.sa_handler = SIG_IGN;
534 * Flush all handlers for a task.
538 flush_signal_handlers(struct task_struct *t, int force_default)
541 struct k_sigaction *ka = &t->sighand->action[0];
542 for (i = _NSIG ; i != 0 ; i--) {
543 if (force_default || ka->sa.sa_handler != SIG_IGN)
544 ka->sa.sa_handler = SIG_DFL;
546 #ifdef __ARCH_HAS_SA_RESTORER
547 ka->sa.sa_restorer = NULL;
549 sigemptyset(&ka->sa.sa_mask);
554 bool unhandled_signal(struct task_struct *tsk, int sig)
556 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
557 if (is_global_init(tsk))
560 if (handler != SIG_IGN && handler != SIG_DFL)
563 /* if ptraced, let the tracer determine */
567 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
570 struct sigqueue *q, *first = NULL;
573 * Collect the siginfo appropriate to this signal. Check if
574 * there is another siginfo for the same signal.
576 list_for_each_entry(q, &list->list, list) {
577 if (q->info.si_signo == sig) {
584 sigdelset(&list->signal, sig);
588 list_del_init(&first->list);
589 copy_siginfo(info, &first->info);
592 (first->flags & SIGQUEUE_PREALLOC) &&
593 (info->si_code == SI_TIMER) &&
594 (info->si_sys_private);
596 __sigqueue_free(first);
599 * Ok, it wasn't in the queue. This must be
600 * a fast-pathed signal or we must have been
601 * out of queue space. So zero out the info.
604 info->si_signo = sig;
606 info->si_code = SI_USER;
612 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
613 kernel_siginfo_t *info, bool *resched_timer)
615 int sig = next_signal(pending, mask);
618 collect_signal(sig, pending, info, resched_timer);
623 * Dequeue a signal and return the element to the caller, which is
624 * expected to free it.
626 * All callers have to hold the siglock.
628 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 bool resched_timer = false;
633 /* We only dequeue private signals from ourselves, we don't let
634 * signalfd steal them
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640 #ifdef CONFIG_POSIX_TIMERS
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
671 if (unlikely(sig_kernel_stop(signr))) {
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 #ifdef CONFIG_POSIX_TIMERS
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
704 EXPORT_SYMBOL_GPL(dequeue_signal);
706 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
713 * Might a synchronous signal be in the queue?
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
719 * Return the first synchronous signal in the queue.
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a postive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 * Check if there is another siginfo for the same signal.
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
739 sigdelset(&pending->signal, sync->info.si_signo);
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
749 * Tell a process that it has a new active signal..
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
759 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 set_tsk_thread_flag(t, TIF_SIGPENDING);
763 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
764 * case. We don't check t->state here because there is a race with it
765 * executing another processor and just now entering stopped state.
766 * By using wake_up_state, we ensure the process will wake up and
767 * handle its death signal.
769 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
774 * Remove signals in mask from the pending set and queue.
775 * Returns 1 if any signals were found.
777 * All callers must be holding the siglock.
779 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 struct sigqueue *q, *n;
784 sigandsets(&m, mask, &s->signal);
785 if (sigisemptyset(&m))
788 sigandnsets(&s->signal, &s->signal, mask);
789 list_for_each_entry_safe(q, n, &s->list, list) {
790 if (sigismember(mask, q->info.si_signo)) {
791 list_del_init(&q->list);
797 static inline int is_si_special(const struct kernel_siginfo *info)
799 return info <= SEND_SIG_PRIV;
802 static inline bool si_fromuser(const struct kernel_siginfo *info)
804 return info == SEND_SIG_NOINFO ||
805 (!is_si_special(info) && SI_FROMUSER(info));
809 * called with RCU read lock from check_kill_permission()
811 static bool kill_ok_by_cred(struct task_struct *t)
813 const struct cred *cred = current_cred();
814 const struct cred *tcred = __task_cred(t);
816 return uid_eq(cred->euid, tcred->suid) ||
817 uid_eq(cred->euid, tcred->uid) ||
818 uid_eq(cred->uid, tcred->suid) ||
819 uid_eq(cred->uid, tcred->uid) ||
820 ns_capable(tcred->user_ns, CAP_KILL);
824 * Bad permissions for sending the signal
825 * - the caller must hold the RCU read lock
827 static int check_kill_permission(int sig, struct kernel_siginfo *info,
828 struct task_struct *t)
833 if (!valid_signal(sig))
836 if (!si_fromuser(info))
839 error = audit_signal_info(sig, t); /* Let audit system see the signal */
843 if (!same_thread_group(current, t) &&
844 !kill_ok_by_cred(t)) {
847 sid = task_session(t);
849 * We don't return the error if sid == NULL. The
850 * task was unhashed, the caller must notice this.
852 if (!sid || sid == task_session(current))
860 return security_task_kill(t, info, sig, NULL);
864 * ptrace_trap_notify - schedule trap to notify ptracer
865 * @t: tracee wanting to notify tracer
867 * This function schedules sticky ptrace trap which is cleared on the next
868 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
871 * If @t is running, STOP trap will be taken. If trapped for STOP and
872 * ptracer is listening for events, tracee is woken up so that it can
873 * re-trap for the new event. If trapped otherwise, STOP trap will be
874 * eventually taken without returning to userland after the existing traps
875 * are finished by PTRACE_CONT.
878 * Must be called with @task->sighand->siglock held.
880 static void ptrace_trap_notify(struct task_struct *t)
882 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
883 assert_spin_locked(&t->sighand->siglock);
885 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
886 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
890 * Handle magic process-wide effects of stop/continue signals. Unlike
891 * the signal actions, these happen immediately at signal-generation
892 * time regardless of blocking, ignoring, or handling. This does the
893 * actual continuing for SIGCONT, but not the actual stopping for stop
894 * signals. The process stop is done as a signal action for SIG_DFL.
896 * Returns true if the signal should be actually delivered, otherwise
897 * it should be dropped.
899 static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 struct signal_struct *signal = p->signal;
902 struct task_struct *t;
905 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
906 if (!(signal->flags & SIGNAL_GROUP_EXIT))
907 return sig == SIGKILL;
909 * The process is in the middle of dying, nothing to do.
911 } else if (sig_kernel_stop(sig)) {
913 * This is a stop signal. Remove SIGCONT from all queues.
915 siginitset(&flush, sigmask(SIGCONT));
916 flush_sigqueue_mask(&flush, &signal->shared_pending);
917 for_each_thread(p, t)
918 flush_sigqueue_mask(&flush, &t->pending);
919 } else if (sig == SIGCONT) {
922 * Remove all stop signals from all queues, wake all threads.
924 siginitset(&flush, SIG_KERNEL_STOP_MASK);
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t) {
927 flush_sigqueue_mask(&flush, &t->pending);
928 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
929 if (likely(!(t->ptrace & PT_SEIZED)))
930 wake_up_state(t, __TASK_STOPPED);
932 ptrace_trap_notify(t);
936 * Notify the parent with CLD_CONTINUED if we were stopped.
938 * If we were in the middle of a group stop, we pretend it
939 * was already finished, and then continued. Since SIGCHLD
940 * doesn't queue we report only CLD_STOPPED, as if the next
941 * CLD_CONTINUED was dropped.
944 if (signal->flags & SIGNAL_STOP_STOPPED)
945 why |= SIGNAL_CLD_CONTINUED;
946 else if (signal->group_stop_count)
947 why |= SIGNAL_CLD_STOPPED;
951 * The first thread which returns from do_signal_stop()
952 * will take ->siglock, notice SIGNAL_CLD_MASK, and
953 * notify its parent. See get_signal().
955 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
956 signal->group_stop_count = 0;
957 signal->group_exit_code = 0;
961 return !sig_ignored(p, sig, force);
965 * Test if P wants to take SIG. After we've checked all threads with this,
966 * it's equivalent to finding no threads not blocking SIG. Any threads not
967 * blocking SIG were ruled out because they are not running and already
968 * have pending signals. Such threads will dequeue from the shared queue
969 * as soon as they're available, so putting the signal on the shared queue
970 * will be equivalent to sending it to one such thread.
972 static inline bool wants_signal(int sig, struct task_struct *p)
974 if (sigismember(&p->blocked, sig))
977 if (p->flags & PF_EXITING)
983 if (task_is_stopped_or_traced(p))
986 return task_curr(p) || !signal_pending(p);
989 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 struct signal_struct *signal = p->signal;
992 struct task_struct *t;
995 * Now find a thread we can wake up to take the signal off the queue.
997 * If the main thread wants the signal, it gets first crack.
998 * Probably the least surprising to the average bear.
1000 if (wants_signal(sig, p))
1002 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 * There is just one thread and it does not need to be woken.
1005 * It will dequeue unblocked signals before it runs again.
1010 * Otherwise try to find a suitable thread.
1012 t = signal->curr_target;
1013 while (!wants_signal(sig, t)) {
1015 if (t == signal->curr_target)
1017 * No thread needs to be woken.
1018 * Any eligible threads will see
1019 * the signal in the queue soon.
1023 signal->curr_target = t;
1027 * Found a killable thread. If the signal will be fatal,
1028 * then start taking the whole group down immediately.
1030 if (sig_fatal(p, sig) &&
1031 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032 !sigismember(&t->real_blocked, sig) &&
1033 (sig == SIGKILL || !p->ptrace)) {
1035 * This signal will be fatal to the whole group.
1037 if (!sig_kernel_coredump(sig)) {
1039 * Start a group exit and wake everybody up.
1040 * This way we don't have other threads
1041 * running and doing things after a slower
1042 * thread has the fatal signal pending.
1044 signal->flags = SIGNAL_GROUP_EXIT;
1045 signal->group_exit_code = sig;
1046 signal->group_stop_count = 0;
1049 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050 sigaddset(&t->pending.signal, SIGKILL);
1051 signal_wake_up(t, 1);
1052 } while_each_thread(p, t);
1058 * The signal is already in the shared-pending queue.
1059 * Tell the chosen thread to wake up and dequeue it.
1061 signal_wake_up(t, sig == SIGKILL);
1065 static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1070 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071 enum pid_type type, bool force)
1073 struct sigpending *pending;
1075 int override_rlimit;
1076 int ret = 0, result;
1078 assert_spin_locked(&t->sighand->siglock);
1080 result = TRACE_SIGNAL_IGNORED;
1081 if (!prepare_signal(sig, t, force))
1084 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 * Short-circuit ignored signals and support queuing
1087 * exactly one non-rt signal, so that we can get more
1088 * detailed information about the cause of the signal.
1090 result = TRACE_SIGNAL_ALREADY_PENDING;
1091 if (legacy_queue(pending, sig))
1094 result = TRACE_SIGNAL_DELIVERED;
1096 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1102 * Real-time signals must be queued if sent by sigqueue, or
1103 * some other real-time mechanism. It is implementation
1104 * defined whether kill() does so. We attempt to do so, on
1105 * the principle of least surprise, but since kill is not
1106 * allowed to fail with EAGAIN when low on memory we just
1107 * make sure at least one signal gets delivered and don't
1108 * pass on the info struct.
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 override_rlimit = 0;
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1117 list_add_tail(&q->list, &pending->list);
1118 switch ((unsigned long) info) {
1119 case (unsigned long) SEND_SIG_NOINFO:
1120 clear_siginfo(&q->info);
1121 q->info.si_signo = sig;
1122 q->info.si_errno = 0;
1123 q->info.si_code = SI_USER;
1124 q->info.si_pid = task_tgid_nr_ns(current,
1125 task_active_pid_ns(t));
1128 from_kuid_munged(task_cred_xxx(t, user_ns),
1132 case (unsigned long) SEND_SIG_PRIV:
1133 clear_siginfo(&q->info);
1134 q->info.si_signo = sig;
1135 q->info.si_errno = 0;
1136 q->info.si_code = SI_KERNEL;
1141 copy_siginfo(&q->info, info);
1144 } else if (!is_si_special(info) &&
1145 sig >= SIGRTMIN && info->si_code != SI_USER) {
1147 * Queue overflow, abort. We may abort if the
1148 * signal was rt and sent by user using something
1149 * other than kill().
1151 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156 * This is a silent loss of information. We still
1157 * send the signal, but the *info bits are lost.
1159 result = TRACE_SIGNAL_LOSE_INFO;
1163 signalfd_notify(t, sig);
1164 sigaddset(&pending->signal, sig);
1166 /* Let multiprocess signals appear after on-going forks */
1167 if (type > PIDTYPE_TGID) {
1168 struct multiprocess_signals *delayed;
1169 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1170 sigset_t *signal = &delayed->signal;
1171 /* Can't queue both a stop and a continue signal */
1173 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1174 else if (sig_kernel_stop(sig))
1175 sigdelset(signal, SIGCONT);
1176 sigaddset(signal, sig);
1180 complete_signal(sig, t, type);
1182 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1186 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189 switch (siginfo_layout(info->si_signo, info->si_code)) {
1198 case SIL_FAULT_MCEERR:
1199 case SIL_FAULT_BNDERR:
1200 case SIL_FAULT_PKUERR:
1208 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1211 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1214 if (info == SEND_SIG_NOINFO) {
1215 /* Force if sent from an ancestor pid namespace */
1216 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1217 } else if (info == SEND_SIG_PRIV) {
1218 /* Don't ignore kernel generated signals */
1220 } else if (has_si_pid_and_uid(info)) {
1221 /* SIGKILL and SIGSTOP is special or has ids */
1222 struct user_namespace *t_user_ns;
1225 t_user_ns = task_cred_xxx(t, user_ns);
1226 if (current_user_ns() != t_user_ns) {
1227 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1228 info->si_uid = from_kuid_munged(t_user_ns, uid);
1232 /* A kernel generated signal? */
1233 force = (info->si_code == SI_KERNEL);
1235 /* From an ancestor pid namespace? */
1236 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241 return __send_signal(sig, info, t, type, force);
1244 static void print_fatal_signal(int signr)
1246 struct pt_regs *regs = signal_pt_regs();
1247 pr_info("potentially unexpected fatal signal %d.\n", signr);
1249 #if defined(__i386__) && !defined(__arch_um__)
1250 pr_info("code at %08lx: ", regs->ip);
1253 for (i = 0; i < 16; i++) {
1256 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1258 pr_cont("%02x ", insn);
1268 static int __init setup_print_fatal_signals(char *str)
1270 get_option (&str, &print_fatal_signals);
1275 __setup("print-fatal-signals=", setup_print_fatal_signals);
1278 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1280 return send_signal(sig, info, p, PIDTYPE_TGID);
1283 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1286 unsigned long flags;
1289 if (lock_task_sighand(p, &flags)) {
1290 ret = send_signal(sig, info, p, type);
1291 unlock_task_sighand(p, &flags);
1298 * Force a signal that the process can't ignore: if necessary
1299 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1301 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1302 * since we do not want to have a signal handler that was blocked
1303 * be invoked when user space had explicitly blocked it.
1305 * We don't want to have recursive SIGSEGV's etc, for example,
1306 * that is why we also clear SIGNAL_UNKILLABLE.
1309 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1311 unsigned long int flags;
1312 int ret, blocked, ignored;
1313 struct k_sigaction *action;
1314 int sig = info->si_signo;
1316 spin_lock_irqsave(&t->sighand->siglock, flags);
1317 action = &t->sighand->action[sig-1];
1318 ignored = action->sa.sa_handler == SIG_IGN;
1319 blocked = sigismember(&t->blocked, sig);
1320 if (blocked || ignored) {
1321 action->sa.sa_handler = SIG_DFL;
1323 sigdelset(&t->blocked, sig);
1324 recalc_sigpending_and_wake(t);
1328 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1329 * debugging to leave init killable.
1331 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1332 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1333 ret = send_signal(sig, info, t, PIDTYPE_PID);
1334 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1339 int force_sig_info(struct kernel_siginfo *info)
1341 return force_sig_info_to_task(info, current);
1345 * Nuke all other threads in the group.
1347 int zap_other_threads(struct task_struct *p)
1349 struct task_struct *t = p;
1352 p->signal->group_stop_count = 0;
1354 while_each_thread(p, t) {
1355 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1358 /* Don't bother with already dead threads */
1361 sigaddset(&t->pending.signal, SIGKILL);
1362 signal_wake_up(t, 1);
1368 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1369 unsigned long *flags)
1371 struct sighand_struct *sighand;
1375 sighand = rcu_dereference(tsk->sighand);
1376 if (unlikely(sighand == NULL))
1380 * This sighand can be already freed and even reused, but
1381 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1382 * initializes ->siglock: this slab can't go away, it has
1383 * the same object type, ->siglock can't be reinitialized.
1385 * We need to ensure that tsk->sighand is still the same
1386 * after we take the lock, we can race with de_thread() or
1387 * __exit_signal(). In the latter case the next iteration
1388 * must see ->sighand == NULL.
1390 spin_lock_irqsave(&sighand->siglock, *flags);
1391 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1393 spin_unlock_irqrestore(&sighand->siglock, *flags);
1401 * send signal info to all the members of a group
1403 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1404 struct task_struct *p, enum pid_type type)
1409 ret = check_kill_permission(sig, info, p);
1413 ret = do_send_sig_info(sig, info, p, type);
1419 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1420 * control characters do (^C, ^Z etc)
1421 * - the caller must hold at least a readlock on tasklist_lock
1423 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1425 struct task_struct *p = NULL;
1426 int retval, success;
1430 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1431 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1434 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1435 return success ? 0 : retval;
1438 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1441 struct task_struct *p;
1445 p = pid_task(pid, PIDTYPE_PID);
1447 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1449 if (likely(!p || error != -ESRCH))
1453 * The task was unhashed in between, try again. If it
1454 * is dead, pid_task() will return NULL, if we race with
1455 * de_thread() it will find the new leader.
1460 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1464 error = kill_pid_info(sig, info, find_vpid(pid));
1469 static inline bool kill_as_cred_perm(const struct cred *cred,
1470 struct task_struct *target)
1472 const struct cred *pcred = __task_cred(target);
1474 return uid_eq(cred->euid, pcred->suid) ||
1475 uid_eq(cred->euid, pcred->uid) ||
1476 uid_eq(cred->uid, pcred->suid) ||
1477 uid_eq(cred->uid, pcred->uid);
1481 * The usb asyncio usage of siginfo is wrong. The glibc support
1482 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1483 * AKA after the generic fields:
1484 * kernel_pid_t si_pid;
1485 * kernel_uid32_t si_uid;
1486 * sigval_t si_value;
1488 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1489 * after the generic fields is:
1490 * void __user *si_addr;
1492 * This is a practical problem when there is a 64bit big endian kernel
1493 * and a 32bit userspace. As the 32bit address will encoded in the low
1494 * 32bits of the pointer. Those low 32bits will be stored at higher
1495 * address than appear in a 32 bit pointer. So userspace will not
1496 * see the address it was expecting for it's completions.
1498 * There is nothing in the encoding that can allow
1499 * copy_siginfo_to_user32 to detect this confusion of formats, so
1500 * handle this by requiring the caller of kill_pid_usb_asyncio to
1501 * notice when this situration takes place and to store the 32bit
1502 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1505 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1506 struct pid *pid, const struct cred *cred)
1508 struct kernel_siginfo info;
1509 struct task_struct *p;
1510 unsigned long flags;
1513 clear_siginfo(&info);
1514 info.si_signo = sig;
1515 info.si_errno = errno;
1516 info.si_code = SI_ASYNCIO;
1517 *((sigval_t *)&info.si_pid) = addr;
1519 if (!valid_signal(sig))
1523 p = pid_task(pid, PIDTYPE_PID);
1528 if (!kill_as_cred_perm(cred, p)) {
1532 ret = security_task_kill(p, &info, sig, cred);
1537 if (lock_task_sighand(p, &flags)) {
1538 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1539 unlock_task_sighand(p, &flags);
1547 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1550 * kill_something_info() interprets pid in interesting ways just like kill(2).
1552 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1553 * is probably wrong. Should make it like BSD or SYSV.
1556 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1562 ret = kill_pid_info(sig, info, find_vpid(pid));
1567 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1571 read_lock(&tasklist_lock);
1573 ret = __kill_pgrp_info(sig, info,
1574 pid ? find_vpid(-pid) : task_pgrp(current));
1576 int retval = 0, count = 0;
1577 struct task_struct * p;
1579 for_each_process(p) {
1580 if (task_pid_vnr(p) > 1 &&
1581 !same_thread_group(p, current)) {
1582 int err = group_send_sig_info(sig, info, p,
1589 ret = count ? retval : -ESRCH;
1591 read_unlock(&tasklist_lock);
1597 * These are for backward compatibility with the rest of the kernel source.
1600 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1603 * Make sure legacy kernel users don't send in bad values
1604 * (normal paths check this in check_kill_permission).
1606 if (!valid_signal(sig))
1609 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1611 EXPORT_SYMBOL(send_sig_info);
1613 #define __si_special(priv) \
1614 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1617 send_sig(int sig, struct task_struct *p, int priv)
1619 return send_sig_info(sig, __si_special(priv), p);
1621 EXPORT_SYMBOL(send_sig);
1623 void force_sig(int sig)
1625 struct kernel_siginfo info;
1627 clear_siginfo(&info);
1628 info.si_signo = sig;
1630 info.si_code = SI_KERNEL;
1633 force_sig_info(&info);
1635 EXPORT_SYMBOL(force_sig);
1638 * When things go south during signal handling, we
1639 * will force a SIGSEGV. And if the signal that caused
1640 * the problem was already a SIGSEGV, we'll want to
1641 * make sure we don't even try to deliver the signal..
1643 void force_sigsegv(int sig)
1645 struct task_struct *p = current;
1647 if (sig == SIGSEGV) {
1648 unsigned long flags;
1649 spin_lock_irqsave(&p->sighand->siglock, flags);
1650 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1651 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1656 int force_sig_fault_to_task(int sig, int code, void __user *addr
1657 ___ARCH_SI_TRAPNO(int trapno)
1658 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1659 , struct task_struct *t)
1661 struct kernel_siginfo info;
1663 clear_siginfo(&info);
1664 info.si_signo = sig;
1666 info.si_code = code;
1667 info.si_addr = addr;
1668 #ifdef __ARCH_SI_TRAPNO
1669 info.si_trapno = trapno;
1673 info.si_flags = flags;
1676 return force_sig_info_to_task(&info, t);
1679 int force_sig_fault(int sig, int code, void __user *addr
1680 ___ARCH_SI_TRAPNO(int trapno)
1681 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1683 return force_sig_fault_to_task(sig, code, addr
1684 ___ARCH_SI_TRAPNO(trapno)
1685 ___ARCH_SI_IA64(imm, flags, isr), current);
1688 int send_sig_fault(int sig, int code, void __user *addr
1689 ___ARCH_SI_TRAPNO(int trapno)
1690 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1691 , struct task_struct *t)
1693 struct kernel_siginfo info;
1695 clear_siginfo(&info);
1696 info.si_signo = sig;
1698 info.si_code = code;
1699 info.si_addr = addr;
1700 #ifdef __ARCH_SI_TRAPNO
1701 info.si_trapno = trapno;
1705 info.si_flags = flags;
1708 return send_sig_info(info.si_signo, &info, t);
1711 int force_sig_mceerr(int code, void __user *addr, short lsb)
1713 struct kernel_siginfo info;
1715 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1716 clear_siginfo(&info);
1717 info.si_signo = SIGBUS;
1719 info.si_code = code;
1720 info.si_addr = addr;
1721 info.si_addr_lsb = lsb;
1722 return force_sig_info(&info);
1725 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1727 struct kernel_siginfo info;
1729 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1730 clear_siginfo(&info);
1731 info.si_signo = SIGBUS;
1733 info.si_code = code;
1734 info.si_addr = addr;
1735 info.si_addr_lsb = lsb;
1736 return send_sig_info(info.si_signo, &info, t);
1738 EXPORT_SYMBOL(send_sig_mceerr);
1740 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1742 struct kernel_siginfo info;
1744 clear_siginfo(&info);
1745 info.si_signo = SIGSEGV;
1747 info.si_code = SEGV_BNDERR;
1748 info.si_addr = addr;
1749 info.si_lower = lower;
1750 info.si_upper = upper;
1751 return force_sig_info(&info);
1755 int force_sig_pkuerr(void __user *addr, u32 pkey)
1757 struct kernel_siginfo info;
1759 clear_siginfo(&info);
1760 info.si_signo = SIGSEGV;
1762 info.si_code = SEGV_PKUERR;
1763 info.si_addr = addr;
1764 info.si_pkey = pkey;
1765 return force_sig_info(&info);
1769 /* For the crazy architectures that include trap information in
1770 * the errno field, instead of an actual errno value.
1772 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1774 struct kernel_siginfo info;
1776 clear_siginfo(&info);
1777 info.si_signo = SIGTRAP;
1778 info.si_errno = errno;
1779 info.si_code = TRAP_HWBKPT;
1780 info.si_addr = addr;
1781 return force_sig_info(&info);
1784 int kill_pgrp(struct pid *pid, int sig, int priv)
1788 read_lock(&tasklist_lock);
1789 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1790 read_unlock(&tasklist_lock);
1794 EXPORT_SYMBOL(kill_pgrp);
1796 int kill_pid(struct pid *pid, int sig, int priv)
1798 return kill_pid_info(sig, __si_special(priv), pid);
1800 EXPORT_SYMBOL(kill_pid);
1803 * These functions support sending signals using preallocated sigqueue
1804 * structures. This is needed "because realtime applications cannot
1805 * afford to lose notifications of asynchronous events, like timer
1806 * expirations or I/O completions". In the case of POSIX Timers
1807 * we allocate the sigqueue structure from the timer_create. If this
1808 * allocation fails we are able to report the failure to the application
1809 * with an EAGAIN error.
1811 struct sigqueue *sigqueue_alloc(void)
1813 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1816 q->flags |= SIGQUEUE_PREALLOC;
1821 void sigqueue_free(struct sigqueue *q)
1823 unsigned long flags;
1824 spinlock_t *lock = ¤t->sighand->siglock;
1826 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1828 * We must hold ->siglock while testing q->list
1829 * to serialize with collect_signal() or with
1830 * __exit_signal()->flush_sigqueue().
1832 spin_lock_irqsave(lock, flags);
1833 q->flags &= ~SIGQUEUE_PREALLOC;
1835 * If it is queued it will be freed when dequeued,
1836 * like the "regular" sigqueue.
1838 if (!list_empty(&q->list))
1840 spin_unlock_irqrestore(lock, flags);
1846 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1848 int sig = q->info.si_signo;
1849 struct sigpending *pending;
1850 struct task_struct *t;
1851 unsigned long flags;
1854 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1858 t = pid_task(pid, type);
1859 if (!t || !likely(lock_task_sighand(t, &flags)))
1862 ret = 1; /* the signal is ignored */
1863 result = TRACE_SIGNAL_IGNORED;
1864 if (!prepare_signal(sig, t, false))
1868 if (unlikely(!list_empty(&q->list))) {
1870 * If an SI_TIMER entry is already queue just increment
1871 * the overrun count.
1873 BUG_ON(q->info.si_code != SI_TIMER);
1874 q->info.si_overrun++;
1875 result = TRACE_SIGNAL_ALREADY_PENDING;
1878 q->info.si_overrun = 0;
1880 signalfd_notify(t, sig);
1881 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1882 list_add_tail(&q->list, &pending->list);
1883 sigaddset(&pending->signal, sig);
1884 complete_signal(sig, t, type);
1885 result = TRACE_SIGNAL_DELIVERED;
1887 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1888 unlock_task_sighand(t, &flags);
1894 static void do_notify_pidfd(struct task_struct *task)
1898 WARN_ON(task->exit_state == 0);
1899 pid = task_pid(task);
1900 wake_up_all(&pid->wait_pidfd);
1904 * Let a parent know about the death of a child.
1905 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1907 * Returns true if our parent ignored us and so we've switched to
1910 bool do_notify_parent(struct task_struct *tsk, int sig)
1912 struct kernel_siginfo info;
1913 unsigned long flags;
1914 struct sighand_struct *psig;
1915 bool autoreap = false;
1920 /* do_notify_parent_cldstop should have been called instead. */
1921 BUG_ON(task_is_stopped_or_traced(tsk));
1923 BUG_ON(!tsk->ptrace &&
1924 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1926 /* Wake up all pidfd waiters */
1927 do_notify_pidfd(tsk);
1929 if (sig != SIGCHLD) {
1931 * This is only possible if parent == real_parent.
1932 * Check if it has changed security domain.
1934 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1938 clear_siginfo(&info);
1939 info.si_signo = sig;
1942 * We are under tasklist_lock here so our parent is tied to
1943 * us and cannot change.
1945 * task_active_pid_ns will always return the same pid namespace
1946 * until a task passes through release_task.
1948 * write_lock() currently calls preempt_disable() which is the
1949 * same as rcu_read_lock(), but according to Oleg, this is not
1950 * correct to rely on this
1953 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1954 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1958 task_cputime(tsk, &utime, &stime);
1959 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1960 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1962 info.si_status = tsk->exit_code & 0x7f;
1963 if (tsk->exit_code & 0x80)
1964 info.si_code = CLD_DUMPED;
1965 else if (tsk->exit_code & 0x7f)
1966 info.si_code = CLD_KILLED;
1968 info.si_code = CLD_EXITED;
1969 info.si_status = tsk->exit_code >> 8;
1972 psig = tsk->parent->sighand;
1973 spin_lock_irqsave(&psig->siglock, flags);
1974 if (!tsk->ptrace && sig == SIGCHLD &&
1975 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1976 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1978 * We are exiting and our parent doesn't care. POSIX.1
1979 * defines special semantics for setting SIGCHLD to SIG_IGN
1980 * or setting the SA_NOCLDWAIT flag: we should be reaped
1981 * automatically and not left for our parent's wait4 call.
1982 * Rather than having the parent do it as a magic kind of
1983 * signal handler, we just set this to tell do_exit that we
1984 * can be cleaned up without becoming a zombie. Note that
1985 * we still call __wake_up_parent in this case, because a
1986 * blocked sys_wait4 might now return -ECHILD.
1988 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1989 * is implementation-defined: we do (if you don't want
1990 * it, just use SIG_IGN instead).
1993 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1996 if (valid_signal(sig) && sig)
1997 __group_send_sig_info(sig, &info, tsk->parent);
1998 __wake_up_parent(tsk, tsk->parent);
1999 spin_unlock_irqrestore(&psig->siglock, flags);
2005 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2006 * @tsk: task reporting the state change
2007 * @for_ptracer: the notification is for ptracer
2008 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2010 * Notify @tsk's parent that the stopped/continued state has changed. If
2011 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2012 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2015 * Must be called with tasklist_lock at least read locked.
2017 static void do_notify_parent_cldstop(struct task_struct *tsk,
2018 bool for_ptracer, int why)
2020 struct kernel_siginfo info;
2021 unsigned long flags;
2022 struct task_struct *parent;
2023 struct sighand_struct *sighand;
2027 parent = tsk->parent;
2029 tsk = tsk->group_leader;
2030 parent = tsk->real_parent;
2033 clear_siginfo(&info);
2034 info.si_signo = SIGCHLD;
2037 * see comment in do_notify_parent() about the following 4 lines
2040 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2041 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2044 task_cputime(tsk, &utime, &stime);
2045 info.si_utime = nsec_to_clock_t(utime);
2046 info.si_stime = nsec_to_clock_t(stime);
2051 info.si_status = SIGCONT;
2054 info.si_status = tsk->signal->group_exit_code & 0x7f;
2057 info.si_status = tsk->exit_code & 0x7f;
2063 sighand = parent->sighand;
2064 spin_lock_irqsave(&sighand->siglock, flags);
2065 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2066 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2067 __group_send_sig_info(SIGCHLD, &info, parent);
2069 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2071 __wake_up_parent(tsk, parent);
2072 spin_unlock_irqrestore(&sighand->siglock, flags);
2075 static inline bool may_ptrace_stop(void)
2077 if (!likely(current->ptrace))
2080 * Are we in the middle of do_coredump?
2081 * If so and our tracer is also part of the coredump stopping
2082 * is a deadlock situation, and pointless because our tracer
2083 * is dead so don't allow us to stop.
2084 * If SIGKILL was already sent before the caller unlocked
2085 * ->siglock we must see ->core_state != NULL. Otherwise it
2086 * is safe to enter schedule().
2088 * This is almost outdated, a task with the pending SIGKILL can't
2089 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2090 * after SIGKILL was already dequeued.
2092 if (unlikely(current->mm->core_state) &&
2093 unlikely(current->mm == current->parent->mm))
2100 * Return non-zero if there is a SIGKILL that should be waking us up.
2101 * Called with the siglock held.
2103 static bool sigkill_pending(struct task_struct *tsk)
2105 return sigismember(&tsk->pending.signal, SIGKILL) ||
2106 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2110 * This must be called with current->sighand->siglock held.
2112 * This should be the path for all ptrace stops.
2113 * We always set current->last_siginfo while stopped here.
2114 * That makes it a way to test a stopped process for
2115 * being ptrace-stopped vs being job-control-stopped.
2117 * If we actually decide not to stop at all because the tracer
2118 * is gone, we keep current->exit_code unless clear_code.
2120 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2121 __releases(¤t->sighand->siglock)
2122 __acquires(¤t->sighand->siglock)
2124 bool gstop_done = false;
2126 if (arch_ptrace_stop_needed(exit_code, info)) {
2128 * The arch code has something special to do before a
2129 * ptrace stop. This is allowed to block, e.g. for faults
2130 * on user stack pages. We can't keep the siglock while
2131 * calling arch_ptrace_stop, so we must release it now.
2132 * To preserve proper semantics, we must do this before
2133 * any signal bookkeeping like checking group_stop_count.
2134 * Meanwhile, a SIGKILL could come in before we retake the
2135 * siglock. That must prevent us from sleeping in TASK_TRACED.
2136 * So after regaining the lock, we must check for SIGKILL.
2138 spin_unlock_irq(¤t->sighand->siglock);
2139 arch_ptrace_stop(exit_code, info);
2140 spin_lock_irq(¤t->sighand->siglock);
2141 if (sigkill_pending(current))
2145 set_special_state(TASK_TRACED);
2148 * We're committing to trapping. TRACED should be visible before
2149 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2150 * Also, transition to TRACED and updates to ->jobctl should be
2151 * atomic with respect to siglock and should be done after the arch
2152 * hook as siglock is released and regrabbed across it.
2157 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2159 * set_current_state() smp_wmb();
2161 * wait_task_stopped()
2162 * task_stopped_code()
2163 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2167 current->last_siginfo = info;
2168 current->exit_code = exit_code;
2171 * If @why is CLD_STOPPED, we're trapping to participate in a group
2172 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2173 * across siglock relocks since INTERRUPT was scheduled, PENDING
2174 * could be clear now. We act as if SIGCONT is received after
2175 * TASK_TRACED is entered - ignore it.
2177 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2178 gstop_done = task_participate_group_stop(current);
2180 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2181 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2182 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2183 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2185 /* entering a trap, clear TRAPPING */
2186 task_clear_jobctl_trapping(current);
2188 spin_unlock_irq(¤t->sighand->siglock);
2189 read_lock(&tasklist_lock);
2190 if (may_ptrace_stop()) {
2192 * Notify parents of the stop.
2194 * While ptraced, there are two parents - the ptracer and
2195 * the real_parent of the group_leader. The ptracer should
2196 * know about every stop while the real parent is only
2197 * interested in the completion of group stop. The states
2198 * for the two don't interact with each other. Notify
2199 * separately unless they're gonna be duplicates.
2201 do_notify_parent_cldstop(current, true, why);
2202 if (gstop_done && ptrace_reparented(current))
2203 do_notify_parent_cldstop(current, false, why);
2206 * Don't want to allow preemption here, because
2207 * sys_ptrace() needs this task to be inactive.
2209 * XXX: implement read_unlock_no_resched().
2212 read_unlock(&tasklist_lock);
2213 cgroup_enter_frozen();
2214 preempt_enable_no_resched();
2215 freezable_schedule();
2216 cgroup_leave_frozen(true);
2219 * By the time we got the lock, our tracer went away.
2220 * Don't drop the lock yet, another tracer may come.
2222 * If @gstop_done, the ptracer went away between group stop
2223 * completion and here. During detach, it would have set
2224 * JOBCTL_STOP_PENDING on us and we'll re-enter
2225 * TASK_STOPPED in do_signal_stop() on return, so notifying
2226 * the real parent of the group stop completion is enough.
2229 do_notify_parent_cldstop(current, false, why);
2231 /* tasklist protects us from ptrace_freeze_traced() */
2232 __set_current_state(TASK_RUNNING);
2234 current->exit_code = 0;
2235 read_unlock(&tasklist_lock);
2239 * We are back. Now reacquire the siglock before touching
2240 * last_siginfo, so that we are sure to have synchronized with
2241 * any signal-sending on another CPU that wants to examine it.
2243 spin_lock_irq(¤t->sighand->siglock);
2244 current->last_siginfo = NULL;
2246 /* LISTENING can be set only during STOP traps, clear it */
2247 current->jobctl &= ~JOBCTL_LISTENING;
2250 * Queued signals ignored us while we were stopped for tracing.
2251 * So check for any that we should take before resuming user mode.
2252 * This sets TIF_SIGPENDING, but never clears it.
2254 recalc_sigpending_tsk(current);
2257 static void ptrace_do_notify(int signr, int exit_code, int why)
2259 kernel_siginfo_t info;
2261 clear_siginfo(&info);
2262 info.si_signo = signr;
2263 info.si_code = exit_code;
2264 info.si_pid = task_pid_vnr(current);
2265 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2267 /* Let the debugger run. */
2268 ptrace_stop(exit_code, why, 1, &info);
2271 void ptrace_notify(int exit_code)
2273 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2274 if (unlikely(current->task_works))
2277 spin_lock_irq(¤t->sighand->siglock);
2278 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2279 spin_unlock_irq(¤t->sighand->siglock);
2283 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2284 * @signr: signr causing group stop if initiating
2286 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2287 * and participate in it. If already set, participate in the existing
2288 * group stop. If participated in a group stop (and thus slept), %true is
2289 * returned with siglock released.
2291 * If ptraced, this function doesn't handle stop itself. Instead,
2292 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2293 * untouched. The caller must ensure that INTERRUPT trap handling takes
2294 * places afterwards.
2297 * Must be called with @current->sighand->siglock held, which is released
2301 * %false if group stop is already cancelled or ptrace trap is scheduled.
2302 * %true if participated in group stop.
2304 static bool do_signal_stop(int signr)
2305 __releases(¤t->sighand->siglock)
2307 struct signal_struct *sig = current->signal;
2309 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2310 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2311 struct task_struct *t;
2313 /* signr will be recorded in task->jobctl for retries */
2314 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2316 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2317 unlikely(signal_group_exit(sig)))
2320 * There is no group stop already in progress. We must
2323 * While ptraced, a task may be resumed while group stop is
2324 * still in effect and then receive a stop signal and
2325 * initiate another group stop. This deviates from the
2326 * usual behavior as two consecutive stop signals can't
2327 * cause two group stops when !ptraced. That is why we
2328 * also check !task_is_stopped(t) below.
2330 * The condition can be distinguished by testing whether
2331 * SIGNAL_STOP_STOPPED is already set. Don't generate
2332 * group_exit_code in such case.
2334 * This is not necessary for SIGNAL_STOP_CONTINUED because
2335 * an intervening stop signal is required to cause two
2336 * continued events regardless of ptrace.
2338 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2339 sig->group_exit_code = signr;
2341 sig->group_stop_count = 0;
2343 if (task_set_jobctl_pending(current, signr | gstop))
2344 sig->group_stop_count++;
2347 while_each_thread(current, t) {
2349 * Setting state to TASK_STOPPED for a group
2350 * stop is always done with the siglock held,
2351 * so this check has no races.
2353 if (!task_is_stopped(t) &&
2354 task_set_jobctl_pending(t, signr | gstop)) {
2355 sig->group_stop_count++;
2356 if (likely(!(t->ptrace & PT_SEIZED)))
2357 signal_wake_up(t, 0);
2359 ptrace_trap_notify(t);
2364 if (likely(!current->ptrace)) {
2368 * If there are no other threads in the group, or if there
2369 * is a group stop in progress and we are the last to stop,
2370 * report to the parent.
2372 if (task_participate_group_stop(current))
2373 notify = CLD_STOPPED;
2375 set_special_state(TASK_STOPPED);
2376 spin_unlock_irq(¤t->sighand->siglock);
2379 * Notify the parent of the group stop completion. Because
2380 * we're not holding either the siglock or tasklist_lock
2381 * here, ptracer may attach inbetween; however, this is for
2382 * group stop and should always be delivered to the real
2383 * parent of the group leader. The new ptracer will get
2384 * its notification when this task transitions into
2388 read_lock(&tasklist_lock);
2389 do_notify_parent_cldstop(current, false, notify);
2390 read_unlock(&tasklist_lock);
2393 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2394 cgroup_enter_frozen();
2395 freezable_schedule();
2399 * While ptraced, group stop is handled by STOP trap.
2400 * Schedule it and let the caller deal with it.
2402 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2408 * do_jobctl_trap - take care of ptrace jobctl traps
2410 * When PT_SEIZED, it's used for both group stop and explicit
2411 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2412 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2413 * the stop signal; otherwise, %SIGTRAP.
2415 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2416 * number as exit_code and no siginfo.
2419 * Must be called with @current->sighand->siglock held, which may be
2420 * released and re-acquired before returning with intervening sleep.
2422 static void do_jobctl_trap(void)
2424 struct signal_struct *signal = current->signal;
2425 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2427 if (current->ptrace & PT_SEIZED) {
2428 if (!signal->group_stop_count &&
2429 !(signal->flags & SIGNAL_STOP_STOPPED))
2431 WARN_ON_ONCE(!signr);
2432 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2435 WARN_ON_ONCE(!signr);
2436 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2437 current->exit_code = 0;
2442 * do_freezer_trap - handle the freezer jobctl trap
2444 * Puts the task into frozen state, if only the task is not about to quit.
2445 * In this case it drops JOBCTL_TRAP_FREEZE.
2448 * Must be called with @current->sighand->siglock held,
2449 * which is always released before returning.
2451 static void do_freezer_trap(void)
2452 __releases(¤t->sighand->siglock)
2455 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2456 * let's make another loop to give it a chance to be handled.
2457 * In any case, we'll return back.
2459 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2460 JOBCTL_TRAP_FREEZE) {
2461 spin_unlock_irq(¤t->sighand->siglock);
2466 * Now we're sure that there is no pending fatal signal and no
2467 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2468 * immediately (if there is a non-fatal signal pending), and
2469 * put the task into sleep.
2471 __set_current_state(TASK_INTERRUPTIBLE);
2472 clear_thread_flag(TIF_SIGPENDING);
2473 spin_unlock_irq(¤t->sighand->siglock);
2474 cgroup_enter_frozen();
2475 freezable_schedule();
2478 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2481 * We do not check sig_kernel_stop(signr) but set this marker
2482 * unconditionally because we do not know whether debugger will
2483 * change signr. This flag has no meaning unless we are going
2484 * to stop after return from ptrace_stop(). In this case it will
2485 * be checked in do_signal_stop(), we should only stop if it was
2486 * not cleared by SIGCONT while we were sleeping. See also the
2487 * comment in dequeue_signal().
2489 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2490 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2492 /* We're back. Did the debugger cancel the sig? */
2493 signr = current->exit_code;
2497 current->exit_code = 0;
2500 * Update the siginfo structure if the signal has
2501 * changed. If the debugger wanted something
2502 * specific in the siginfo structure then it should
2503 * have updated *info via PTRACE_SETSIGINFO.
2505 if (signr != info->si_signo) {
2506 clear_siginfo(info);
2507 info->si_signo = signr;
2509 info->si_code = SI_USER;
2511 info->si_pid = task_pid_vnr(current->parent);
2512 info->si_uid = from_kuid_munged(current_user_ns(),
2513 task_uid(current->parent));
2517 /* If the (new) signal is now blocked, requeue it. */
2518 if (sigismember(¤t->blocked, signr)) {
2519 send_signal(signr, info, current, PIDTYPE_PID);
2526 bool get_signal(struct ksignal *ksig)
2528 struct sighand_struct *sighand = current->sighand;
2529 struct signal_struct *signal = current->signal;
2532 if (unlikely(current->task_works))
2535 if (unlikely(uprobe_deny_signal()))
2539 * Do this once, we can't return to user-mode if freezing() == T.
2540 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2541 * thus do not need another check after return.
2546 spin_lock_irq(&sighand->siglock);
2548 * Every stopped thread goes here after wakeup. Check to see if
2549 * we should notify the parent, prepare_signal(SIGCONT) encodes
2550 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2552 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2555 if (signal->flags & SIGNAL_CLD_CONTINUED)
2556 why = CLD_CONTINUED;
2560 signal->flags &= ~SIGNAL_CLD_MASK;
2562 spin_unlock_irq(&sighand->siglock);
2565 * Notify the parent that we're continuing. This event is
2566 * always per-process and doesn't make whole lot of sense
2567 * for ptracers, who shouldn't consume the state via
2568 * wait(2) either, but, for backward compatibility, notify
2569 * the ptracer of the group leader too unless it's gonna be
2572 read_lock(&tasklist_lock);
2573 do_notify_parent_cldstop(current, false, why);
2575 if (ptrace_reparented(current->group_leader))
2576 do_notify_parent_cldstop(current->group_leader,
2578 read_unlock(&tasklist_lock);
2583 /* Has this task already been marked for death? */
2584 if (signal_group_exit(signal)) {
2585 ksig->info.si_signo = signr = SIGKILL;
2586 sigdelset(¤t->pending.signal, SIGKILL);
2587 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2588 &sighand->action[SIGKILL - 1]);
2589 recalc_sigpending();
2594 struct k_sigaction *ka;
2596 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2600 if (unlikely(current->jobctl &
2601 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2602 if (current->jobctl & JOBCTL_TRAP_MASK) {
2604 spin_unlock_irq(&sighand->siglock);
2605 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2612 * If the task is leaving the frozen state, let's update
2613 * cgroup counters and reset the frozen bit.
2615 if (unlikely(cgroup_task_frozen(current))) {
2616 spin_unlock_irq(&sighand->siglock);
2617 cgroup_leave_frozen(false);
2622 * Signals generated by the execution of an instruction
2623 * need to be delivered before any other pending signals
2624 * so that the instruction pointer in the signal stack
2625 * frame points to the faulting instruction.
2627 signr = dequeue_synchronous_signal(&ksig->info);
2629 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2632 break; /* will return 0 */
2634 if (unlikely(current->ptrace) && signr != SIGKILL) {
2635 signr = ptrace_signal(signr, &ksig->info);
2640 ka = &sighand->action[signr-1];
2642 /* Trace actually delivered signals. */
2643 trace_signal_deliver(signr, &ksig->info, ka);
2645 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2647 if (ka->sa.sa_handler != SIG_DFL) {
2648 /* Run the handler. */
2651 if (ka->sa.sa_flags & SA_ONESHOT)
2652 ka->sa.sa_handler = SIG_DFL;
2654 break; /* will return non-zero "signr" value */
2658 * Now we are doing the default action for this signal.
2660 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2664 * Global init gets no signals it doesn't want.
2665 * Container-init gets no signals it doesn't want from same
2668 * Note that if global/container-init sees a sig_kernel_only()
2669 * signal here, the signal must have been generated internally
2670 * or must have come from an ancestor namespace. In either
2671 * case, the signal cannot be dropped.
2673 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2674 !sig_kernel_only(signr))
2677 if (sig_kernel_stop(signr)) {
2679 * The default action is to stop all threads in
2680 * the thread group. The job control signals
2681 * do nothing in an orphaned pgrp, but SIGSTOP
2682 * always works. Note that siglock needs to be
2683 * dropped during the call to is_orphaned_pgrp()
2684 * because of lock ordering with tasklist_lock.
2685 * This allows an intervening SIGCONT to be posted.
2686 * We need to check for that and bail out if necessary.
2688 if (signr != SIGSTOP) {
2689 spin_unlock_irq(&sighand->siglock);
2691 /* signals can be posted during this window */
2693 if (is_current_pgrp_orphaned())
2696 spin_lock_irq(&sighand->siglock);
2699 if (likely(do_signal_stop(ksig->info.si_signo))) {
2700 /* It released the siglock. */
2705 * We didn't actually stop, due to a race
2706 * with SIGCONT or something like that.
2712 spin_unlock_irq(&sighand->siglock);
2713 if (unlikely(cgroup_task_frozen(current)))
2714 cgroup_leave_frozen(true);
2717 * Anything else is fatal, maybe with a core dump.
2719 current->flags |= PF_SIGNALED;
2721 if (sig_kernel_coredump(signr)) {
2722 if (print_fatal_signals)
2723 print_fatal_signal(ksig->info.si_signo);
2724 proc_coredump_connector(current);
2726 * If it was able to dump core, this kills all
2727 * other threads in the group and synchronizes with
2728 * their demise. If we lost the race with another
2729 * thread getting here, it set group_exit_code
2730 * first and our do_group_exit call below will use
2731 * that value and ignore the one we pass it.
2733 do_coredump(&ksig->info);
2737 * Death signals, no core dump.
2739 do_group_exit(ksig->info.si_signo);
2742 spin_unlock_irq(&sighand->siglock);
2745 return ksig->sig > 0;
2749 * signal_delivered -
2750 * @ksig: kernel signal struct
2751 * @stepping: nonzero if debugger single-step or block-step in use
2753 * This function should be called when a signal has successfully been
2754 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2755 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2756 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2758 static void signal_delivered(struct ksignal *ksig, int stepping)
2762 /* A signal was successfully delivered, and the
2763 saved sigmask was stored on the signal frame,
2764 and will be restored by sigreturn. So we can
2765 simply clear the restore sigmask flag. */
2766 clear_restore_sigmask();
2768 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2769 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2770 sigaddset(&blocked, ksig->sig);
2771 set_current_blocked(&blocked);
2772 tracehook_signal_handler(stepping);
2775 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2778 force_sigsegv(ksig->sig);
2780 signal_delivered(ksig, stepping);
2784 * It could be that complete_signal() picked us to notify about the
2785 * group-wide signal. Other threads should be notified now to take
2786 * the shared signals in @which since we will not.
2788 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2791 struct task_struct *t;
2793 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2794 if (sigisemptyset(&retarget))
2798 while_each_thread(tsk, t) {
2799 if (t->flags & PF_EXITING)
2802 if (!has_pending_signals(&retarget, &t->blocked))
2804 /* Remove the signals this thread can handle. */
2805 sigandsets(&retarget, &retarget, &t->blocked);
2807 if (!signal_pending(t))
2808 signal_wake_up(t, 0);
2810 if (sigisemptyset(&retarget))
2815 void exit_signals(struct task_struct *tsk)
2821 * @tsk is about to have PF_EXITING set - lock out users which
2822 * expect stable threadgroup.
2824 cgroup_threadgroup_change_begin(tsk);
2826 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2827 tsk->flags |= PF_EXITING;
2828 cgroup_threadgroup_change_end(tsk);
2832 spin_lock_irq(&tsk->sighand->siglock);
2834 * From now this task is not visible for group-wide signals,
2835 * see wants_signal(), do_signal_stop().
2837 tsk->flags |= PF_EXITING;
2839 cgroup_threadgroup_change_end(tsk);
2841 if (!signal_pending(tsk))
2844 unblocked = tsk->blocked;
2845 signotset(&unblocked);
2846 retarget_shared_pending(tsk, &unblocked);
2848 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2849 task_participate_group_stop(tsk))
2850 group_stop = CLD_STOPPED;
2852 spin_unlock_irq(&tsk->sighand->siglock);
2855 * If group stop has completed, deliver the notification. This
2856 * should always go to the real parent of the group leader.
2858 if (unlikely(group_stop)) {
2859 read_lock(&tasklist_lock);
2860 do_notify_parent_cldstop(tsk, false, group_stop);
2861 read_unlock(&tasklist_lock);
2866 * System call entry points.
2870 * sys_restart_syscall - restart a system call
2872 SYSCALL_DEFINE0(restart_syscall)
2874 struct restart_block *restart = ¤t->restart_block;
2875 return restart->fn(restart);
2878 long do_no_restart_syscall(struct restart_block *param)
2883 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2885 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2886 sigset_t newblocked;
2887 /* A set of now blocked but previously unblocked signals. */
2888 sigandnsets(&newblocked, newset, ¤t->blocked);
2889 retarget_shared_pending(tsk, &newblocked);
2891 tsk->blocked = *newset;
2892 recalc_sigpending();
2896 * set_current_blocked - change current->blocked mask
2899 * It is wrong to change ->blocked directly, this helper should be used
2900 * to ensure the process can't miss a shared signal we are going to block.
2902 void set_current_blocked(sigset_t *newset)
2904 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2905 __set_current_blocked(newset);
2908 void __set_current_blocked(const sigset_t *newset)
2910 struct task_struct *tsk = current;
2913 * In case the signal mask hasn't changed, there is nothing we need
2914 * to do. The current->blocked shouldn't be modified by other task.
2916 if (sigequalsets(&tsk->blocked, newset))
2919 spin_lock_irq(&tsk->sighand->siglock);
2920 __set_task_blocked(tsk, newset);
2921 spin_unlock_irq(&tsk->sighand->siglock);
2925 * This is also useful for kernel threads that want to temporarily
2926 * (or permanently) block certain signals.
2928 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2929 * interface happily blocks "unblockable" signals like SIGKILL
2932 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2934 struct task_struct *tsk = current;
2937 /* Lockless, only current can change ->blocked, never from irq */
2939 *oldset = tsk->blocked;
2943 sigorsets(&newset, &tsk->blocked, set);
2946 sigandnsets(&newset, &tsk->blocked, set);
2955 __set_current_blocked(&newset);
2958 EXPORT_SYMBOL(sigprocmask);
2961 * The api helps set app-provided sigmasks.
2963 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2964 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2966 * Note that it does set_restore_sigmask() in advance, so it must be always
2967 * paired with restore_saved_sigmask_unless() before return from syscall.
2969 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2975 if (sigsetsize != sizeof(sigset_t))
2977 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2980 set_restore_sigmask();
2981 current->saved_sigmask = current->blocked;
2982 set_current_blocked(&kmask);
2987 #ifdef CONFIG_COMPAT
2988 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
2995 if (sigsetsize != sizeof(compat_sigset_t))
2997 if (get_compat_sigset(&kmask, umask))
3000 set_restore_sigmask();
3001 current->saved_sigmask = current->blocked;
3002 set_current_blocked(&kmask);
3009 * sys_rt_sigprocmask - change the list of currently blocked signals
3010 * @how: whether to add, remove, or set signals
3011 * @nset: stores pending signals
3012 * @oset: previous value of signal mask if non-null
3013 * @sigsetsize: size of sigset_t type
3015 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3016 sigset_t __user *, oset, size_t, sigsetsize)
3018 sigset_t old_set, new_set;
3021 /* XXX: Don't preclude handling different sized sigset_t's. */
3022 if (sigsetsize != sizeof(sigset_t))
3025 old_set = current->blocked;
3028 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3030 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3032 error = sigprocmask(how, &new_set, NULL);
3038 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3045 #ifdef CONFIG_COMPAT
3046 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3047 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3049 sigset_t old_set = current->blocked;
3051 /* XXX: Don't preclude handling different sized sigset_t's. */
3052 if (sigsetsize != sizeof(sigset_t))
3058 if (get_compat_sigset(&new_set, nset))
3060 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3062 error = sigprocmask(how, &new_set, NULL);
3066 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3070 static void do_sigpending(sigset_t *set)
3072 spin_lock_irq(¤t->sighand->siglock);
3073 sigorsets(set, ¤t->pending.signal,
3074 ¤t->signal->shared_pending.signal);
3075 spin_unlock_irq(¤t->sighand->siglock);
3077 /* Outside the lock because only this thread touches it. */
3078 sigandsets(set, ¤t->blocked, set);
3082 * sys_rt_sigpending - examine a pending signal that has been raised
3084 * @uset: stores pending signals
3085 * @sigsetsize: size of sigset_t type or larger
3087 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3091 if (sigsetsize > sizeof(*uset))
3094 do_sigpending(&set);
3096 if (copy_to_user(uset, &set, sigsetsize))
3102 #ifdef CONFIG_COMPAT
3103 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3104 compat_size_t, sigsetsize)
3108 if (sigsetsize > sizeof(*uset))
3111 do_sigpending(&set);
3113 return put_compat_sigset(uset, &set, sigsetsize);
3117 static const struct {
3118 unsigned char limit, layout;
3120 [SIGILL] = { NSIGILL, SIL_FAULT },
3121 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3122 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3123 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3124 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3126 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3128 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3129 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3130 [SIGSYS] = { NSIGSYS, SIL_SYS },
3133 static bool known_siginfo_layout(unsigned sig, int si_code)
3135 if (si_code == SI_KERNEL)
3137 else if ((si_code > SI_USER)) {
3138 if (sig_specific_sicodes(sig)) {
3139 if (si_code <= sig_sicodes[sig].limit)
3142 else if (si_code <= NSIGPOLL)
3145 else if (si_code >= SI_DETHREAD)
3147 else if (si_code == SI_ASYNCNL)
3152 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3154 enum siginfo_layout layout = SIL_KILL;
3155 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3156 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3157 (si_code <= sig_sicodes[sig].limit)) {
3158 layout = sig_sicodes[sig].layout;
3159 /* Handle the exceptions */
3160 if ((sig == SIGBUS) &&
3161 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3162 layout = SIL_FAULT_MCEERR;
3163 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3164 layout = SIL_FAULT_BNDERR;
3166 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3167 layout = SIL_FAULT_PKUERR;
3170 else if (si_code <= NSIGPOLL)
3173 if (si_code == SI_TIMER)
3175 else if (si_code == SI_SIGIO)
3177 else if (si_code < 0)
3183 static inline char __user *si_expansion(const siginfo_t __user *info)
3185 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3188 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3190 char __user *expansion = si_expansion(to);
3191 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3193 if (clear_user(expansion, SI_EXPANSION_SIZE))
3198 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3199 const siginfo_t __user *from)
3201 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3202 char __user *expansion = si_expansion(from);
3203 char buf[SI_EXPANSION_SIZE];
3206 * An unknown si_code might need more than
3207 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3208 * extra bytes are 0. This guarantees copy_siginfo_to_user
3209 * will return this data to userspace exactly.
3211 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3213 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3221 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3222 const siginfo_t __user *from)
3224 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3226 to->si_signo = signo;
3227 return post_copy_siginfo_from_user(to, from);
3230 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3232 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3234 return post_copy_siginfo_from_user(to, from);
3237 #ifdef CONFIG_COMPAT
3238 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3239 const struct kernel_siginfo *from)
3240 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3242 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3244 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3245 const struct kernel_siginfo *from, bool x32_ABI)
3248 struct compat_siginfo new;
3249 memset(&new, 0, sizeof(new));
3251 new.si_signo = from->si_signo;
3252 new.si_errno = from->si_errno;
3253 new.si_code = from->si_code;
3254 switch(siginfo_layout(from->si_signo, from->si_code)) {
3256 new.si_pid = from->si_pid;
3257 new.si_uid = from->si_uid;
3260 new.si_tid = from->si_tid;
3261 new.si_overrun = from->si_overrun;
3262 new.si_int = from->si_int;
3265 new.si_band = from->si_band;
3266 new.si_fd = from->si_fd;
3269 new.si_addr = ptr_to_compat(from->si_addr);
3270 #ifdef __ARCH_SI_TRAPNO
3271 new.si_trapno = from->si_trapno;
3274 case SIL_FAULT_MCEERR:
3275 new.si_addr = ptr_to_compat(from->si_addr);
3276 #ifdef __ARCH_SI_TRAPNO
3277 new.si_trapno = from->si_trapno;
3279 new.si_addr_lsb = from->si_addr_lsb;
3281 case SIL_FAULT_BNDERR:
3282 new.si_addr = ptr_to_compat(from->si_addr);
3283 #ifdef __ARCH_SI_TRAPNO
3284 new.si_trapno = from->si_trapno;
3286 new.si_lower = ptr_to_compat(from->si_lower);
3287 new.si_upper = ptr_to_compat(from->si_upper);
3289 case SIL_FAULT_PKUERR:
3290 new.si_addr = ptr_to_compat(from->si_addr);
3291 #ifdef __ARCH_SI_TRAPNO
3292 new.si_trapno = from->si_trapno;
3294 new.si_pkey = from->si_pkey;
3297 new.si_pid = from->si_pid;
3298 new.si_uid = from->si_uid;
3299 new.si_status = from->si_status;
3300 #ifdef CONFIG_X86_X32_ABI
3302 new._sifields._sigchld_x32._utime = from->si_utime;
3303 new._sifields._sigchld_x32._stime = from->si_stime;
3307 new.si_utime = from->si_utime;
3308 new.si_stime = from->si_stime;
3312 new.si_pid = from->si_pid;
3313 new.si_uid = from->si_uid;
3314 new.si_int = from->si_int;
3317 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3318 new.si_syscall = from->si_syscall;
3319 new.si_arch = from->si_arch;
3323 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3329 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3330 const struct compat_siginfo *from)
3333 to->si_signo = from->si_signo;
3334 to->si_errno = from->si_errno;
3335 to->si_code = from->si_code;
3336 switch(siginfo_layout(from->si_signo, from->si_code)) {
3338 to->si_pid = from->si_pid;
3339 to->si_uid = from->si_uid;
3342 to->si_tid = from->si_tid;
3343 to->si_overrun = from->si_overrun;
3344 to->si_int = from->si_int;
3347 to->si_band = from->si_band;
3348 to->si_fd = from->si_fd;
3351 to->si_addr = compat_ptr(from->si_addr);
3352 #ifdef __ARCH_SI_TRAPNO
3353 to->si_trapno = from->si_trapno;
3356 case SIL_FAULT_MCEERR:
3357 to->si_addr = compat_ptr(from->si_addr);
3358 #ifdef __ARCH_SI_TRAPNO
3359 to->si_trapno = from->si_trapno;
3361 to->si_addr_lsb = from->si_addr_lsb;
3363 case SIL_FAULT_BNDERR:
3364 to->si_addr = compat_ptr(from->si_addr);
3365 #ifdef __ARCH_SI_TRAPNO
3366 to->si_trapno = from->si_trapno;
3368 to->si_lower = compat_ptr(from->si_lower);
3369 to->si_upper = compat_ptr(from->si_upper);
3371 case SIL_FAULT_PKUERR:
3372 to->si_addr = compat_ptr(from->si_addr);
3373 #ifdef __ARCH_SI_TRAPNO
3374 to->si_trapno = from->si_trapno;
3376 to->si_pkey = from->si_pkey;
3379 to->si_pid = from->si_pid;
3380 to->si_uid = from->si_uid;
3381 to->si_status = from->si_status;
3382 #ifdef CONFIG_X86_X32_ABI
3383 if (in_x32_syscall()) {
3384 to->si_utime = from->_sifields._sigchld_x32._utime;
3385 to->si_stime = from->_sifields._sigchld_x32._stime;
3389 to->si_utime = from->si_utime;
3390 to->si_stime = from->si_stime;
3394 to->si_pid = from->si_pid;
3395 to->si_uid = from->si_uid;
3396 to->si_int = from->si_int;
3399 to->si_call_addr = compat_ptr(from->si_call_addr);
3400 to->si_syscall = from->si_syscall;
3401 to->si_arch = from->si_arch;
3407 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3408 const struct compat_siginfo __user *ufrom)
3410 struct compat_siginfo from;
3412 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3415 from.si_signo = signo;
3416 return post_copy_siginfo_from_user32(to, &from);
3419 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3420 const struct compat_siginfo __user *ufrom)
3422 struct compat_siginfo from;
3424 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3427 return post_copy_siginfo_from_user32(to, &from);
3429 #endif /* CONFIG_COMPAT */
3432 * do_sigtimedwait - wait for queued signals specified in @which
3433 * @which: queued signals to wait for
3434 * @info: if non-null, the signal's siginfo is returned here
3435 * @ts: upper bound on process time suspension
3437 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3438 const struct timespec64 *ts)
3440 ktime_t *to = NULL, timeout = KTIME_MAX;
3441 struct task_struct *tsk = current;
3442 sigset_t mask = *which;
3446 if (!timespec64_valid(ts))
3448 timeout = timespec64_to_ktime(*ts);
3453 * Invert the set of allowed signals to get those we want to block.
3455 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3458 spin_lock_irq(&tsk->sighand->siglock);
3459 sig = dequeue_signal(tsk, &mask, info);
3460 if (!sig && timeout) {
3462 * None ready, temporarily unblock those we're interested
3463 * while we are sleeping in so that we'll be awakened when
3464 * they arrive. Unblocking is always fine, we can avoid
3465 * set_current_blocked().
3467 tsk->real_blocked = tsk->blocked;
3468 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3469 recalc_sigpending();
3470 spin_unlock_irq(&tsk->sighand->siglock);
3472 __set_current_state(TASK_INTERRUPTIBLE);
3473 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3475 spin_lock_irq(&tsk->sighand->siglock);
3476 __set_task_blocked(tsk, &tsk->real_blocked);
3477 sigemptyset(&tsk->real_blocked);
3478 sig = dequeue_signal(tsk, &mask, info);
3480 spin_unlock_irq(&tsk->sighand->siglock);
3484 return ret ? -EINTR : -EAGAIN;
3488 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3490 * @uthese: queued signals to wait for
3491 * @uinfo: if non-null, the signal's siginfo is returned here
3492 * @uts: upper bound on process time suspension
3493 * @sigsetsize: size of sigset_t type
3495 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3496 siginfo_t __user *, uinfo,
3497 const struct __kernel_timespec __user *, uts,
3501 struct timespec64 ts;
3502 kernel_siginfo_t info;
3505 /* XXX: Don't preclude handling different sized sigset_t's. */
3506 if (sigsetsize != sizeof(sigset_t))
3509 if (copy_from_user(&these, uthese, sizeof(these)))
3513 if (get_timespec64(&ts, uts))
3517 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3519 if (ret > 0 && uinfo) {
3520 if (copy_siginfo_to_user(uinfo, &info))
3527 #ifdef CONFIG_COMPAT_32BIT_TIME
3528 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3529 siginfo_t __user *, uinfo,
3530 const struct old_timespec32 __user *, uts,
3534 struct timespec64 ts;
3535 kernel_siginfo_t info;
3538 if (sigsetsize != sizeof(sigset_t))
3541 if (copy_from_user(&these, uthese, sizeof(these)))
3545 if (get_old_timespec32(&ts, uts))
3549 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3551 if (ret > 0 && uinfo) {
3552 if (copy_siginfo_to_user(uinfo, &info))
3560 #ifdef CONFIG_COMPAT
3561 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3562 struct compat_siginfo __user *, uinfo,
3563 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3566 struct timespec64 t;
3567 kernel_siginfo_t info;
3570 if (sigsetsize != sizeof(sigset_t))
3573 if (get_compat_sigset(&s, uthese))
3577 if (get_timespec64(&t, uts))
3581 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3583 if (ret > 0 && uinfo) {
3584 if (copy_siginfo_to_user32(uinfo, &info))
3591 #ifdef CONFIG_COMPAT_32BIT_TIME
3592 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3593 struct compat_siginfo __user *, uinfo,
3594 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3597 struct timespec64 t;
3598 kernel_siginfo_t info;
3601 if (sigsetsize != sizeof(sigset_t))
3604 if (get_compat_sigset(&s, uthese))
3608 if (get_old_timespec32(&t, uts))
3612 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3614 if (ret > 0 && uinfo) {
3615 if (copy_siginfo_to_user32(uinfo, &info))
3624 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3626 clear_siginfo(info);
3627 info->si_signo = sig;
3629 info->si_code = SI_USER;
3630 info->si_pid = task_tgid_vnr(current);
3631 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3635 * sys_kill - send a signal to a process
3636 * @pid: the PID of the process
3637 * @sig: signal to be sent
3639 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3641 struct kernel_siginfo info;
3643 prepare_kill_siginfo(sig, &info);
3645 return kill_something_info(sig, &info, pid);
3649 * Verify that the signaler and signalee either are in the same pid namespace
3650 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3653 static bool access_pidfd_pidns(struct pid *pid)
3655 struct pid_namespace *active = task_active_pid_ns(current);
3656 struct pid_namespace *p = ns_of_pid(pid);
3669 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3671 #ifdef CONFIG_COMPAT
3673 * Avoid hooking up compat syscalls and instead handle necessary
3674 * conversions here. Note, this is a stop-gap measure and should not be
3675 * considered a generic solution.
3677 if (in_compat_syscall())
3678 return copy_siginfo_from_user32(
3679 kinfo, (struct compat_siginfo __user *)info);
3681 return copy_siginfo_from_user(kinfo, info);
3684 static struct pid *pidfd_to_pid(const struct file *file)
3688 pid = pidfd_pid(file);
3692 return tgid_pidfd_to_pid(file);
3696 * sys_pidfd_send_signal - Signal a process through a pidfd
3697 * @pidfd: file descriptor of the process
3698 * @sig: signal to send
3699 * @info: signal info
3700 * @flags: future flags
3702 * The syscall currently only signals via PIDTYPE_PID which covers
3703 * kill(<positive-pid>, <signal>. It does not signal threads or process
3705 * In order to extend the syscall to threads and process groups the @flags
3706 * argument should be used. In essence, the @flags argument will determine
3707 * what is signaled and not the file descriptor itself. Put in other words,
3708 * grouping is a property of the flags argument not a property of the file
3711 * Return: 0 on success, negative errno on failure
3713 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3714 siginfo_t __user *, info, unsigned int, flags)
3719 kernel_siginfo_t kinfo;
3721 /* Enforce flags be set to 0 until we add an extension. */
3729 /* Is this a pidfd? */
3730 pid = pidfd_to_pid(f.file);
3737 if (!access_pidfd_pidns(pid))
3741 ret = copy_siginfo_from_user_any(&kinfo, info);
3746 if (unlikely(sig != kinfo.si_signo))
3749 /* Only allow sending arbitrary signals to yourself. */
3751 if ((task_pid(current) != pid) &&
3752 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3755 prepare_kill_siginfo(sig, &kinfo);
3758 ret = kill_pid_info(sig, &kinfo, pid);
3766 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3768 struct task_struct *p;
3772 p = find_task_by_vpid(pid);
3773 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3774 error = check_kill_permission(sig, info, p);
3776 * The null signal is a permissions and process existence
3777 * probe. No signal is actually delivered.
3779 if (!error && sig) {
3780 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3782 * If lock_task_sighand() failed we pretend the task
3783 * dies after receiving the signal. The window is tiny,
3784 * and the signal is private anyway.
3786 if (unlikely(error == -ESRCH))
3795 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3797 struct kernel_siginfo info;
3799 clear_siginfo(&info);
3800 info.si_signo = sig;
3802 info.si_code = SI_TKILL;
3803 info.si_pid = task_tgid_vnr(current);
3804 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3806 return do_send_specific(tgid, pid, sig, &info);
3810 * sys_tgkill - send signal to one specific thread
3811 * @tgid: the thread group ID of the thread
3812 * @pid: the PID of the thread
3813 * @sig: signal to be sent
3815 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3816 * exists but it's not belonging to the target process anymore. This
3817 * method solves the problem of threads exiting and PIDs getting reused.
3819 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3821 /* This is only valid for single tasks */
3822 if (pid <= 0 || tgid <= 0)
3825 return do_tkill(tgid, pid, sig);
3829 * sys_tkill - send signal to one specific task
3830 * @pid: the PID of the task
3831 * @sig: signal to be sent
3833 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3835 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3837 /* This is only valid for single tasks */
3841 return do_tkill(0, pid, sig);
3844 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3846 /* Not even root can pretend to send signals from the kernel.
3847 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3849 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3850 (task_pid_vnr(current) != pid))
3853 /* POSIX.1b doesn't mention process groups. */
3854 return kill_proc_info(sig, info, pid);
3858 * sys_rt_sigqueueinfo - send signal information to a signal
3859 * @pid: the PID of the thread
3860 * @sig: signal to be sent
3861 * @uinfo: signal info to be sent
3863 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3864 siginfo_t __user *, uinfo)
3866 kernel_siginfo_t info;
3867 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3870 return do_rt_sigqueueinfo(pid, sig, &info);
3873 #ifdef CONFIG_COMPAT
3874 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3877 struct compat_siginfo __user *, uinfo)
3879 kernel_siginfo_t info;
3880 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3883 return do_rt_sigqueueinfo(pid, sig, &info);
3887 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3889 /* This is only valid for single tasks */
3890 if (pid <= 0 || tgid <= 0)
3893 /* Not even root can pretend to send signals from the kernel.
3894 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3896 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3897 (task_pid_vnr(current) != pid))
3900 return do_send_specific(tgid, pid, sig, info);
3903 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3904 siginfo_t __user *, uinfo)
3906 kernel_siginfo_t info;
3907 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3910 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3913 #ifdef CONFIG_COMPAT
3914 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3918 struct compat_siginfo __user *, uinfo)
3920 kernel_siginfo_t info;
3921 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3924 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3929 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3931 void kernel_sigaction(int sig, __sighandler_t action)
3933 spin_lock_irq(¤t->sighand->siglock);
3934 current->sighand->action[sig - 1].sa.sa_handler = action;
3935 if (action == SIG_IGN) {
3939 sigaddset(&mask, sig);
3941 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3942 flush_sigqueue_mask(&mask, ¤t->pending);
3943 recalc_sigpending();
3945 spin_unlock_irq(¤t->sighand->siglock);
3947 EXPORT_SYMBOL(kernel_sigaction);
3949 void __weak sigaction_compat_abi(struct k_sigaction *act,
3950 struct k_sigaction *oact)
3954 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3956 struct task_struct *p = current, *t;
3957 struct k_sigaction *k;
3960 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3963 k = &p->sighand->action[sig-1];
3965 spin_lock_irq(&p->sighand->siglock);
3969 sigaction_compat_abi(act, oact);
3972 sigdelsetmask(&act->sa.sa_mask,
3973 sigmask(SIGKILL) | sigmask(SIGSTOP));
3977 * "Setting a signal action to SIG_IGN for a signal that is
3978 * pending shall cause the pending signal to be discarded,
3979 * whether or not it is blocked."
3981 * "Setting a signal action to SIG_DFL for a signal that is
3982 * pending and whose default action is to ignore the signal
3983 * (for example, SIGCHLD), shall cause the pending signal to
3984 * be discarded, whether or not it is blocked"
3986 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3988 sigaddset(&mask, sig);
3989 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3990 for_each_thread(p, t)
3991 flush_sigqueue_mask(&mask, &t->pending);
3995 spin_unlock_irq(&p->sighand->siglock);
4000 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4003 struct task_struct *t = current;
4006 memset(oss, 0, sizeof(stack_t));
4007 oss->ss_sp = (void __user *) t->sas_ss_sp;
4008 oss->ss_size = t->sas_ss_size;
4009 oss->ss_flags = sas_ss_flags(sp) |
4010 (current->sas_ss_flags & SS_FLAG_BITS);
4014 void __user *ss_sp = ss->ss_sp;
4015 size_t ss_size = ss->ss_size;
4016 unsigned ss_flags = ss->ss_flags;
4019 if (unlikely(on_sig_stack(sp)))
4022 ss_mode = ss_flags & ~SS_FLAG_BITS;
4023 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4027 if (ss_mode == SS_DISABLE) {
4031 if (unlikely(ss_size < min_ss_size))
4035 t->sas_ss_sp = (unsigned long) ss_sp;
4036 t->sas_ss_size = ss_size;
4037 t->sas_ss_flags = ss_flags;
4042 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4046 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4048 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4049 current_user_stack_pointer(),
4051 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4056 int restore_altstack(const stack_t __user *uss)
4059 if (copy_from_user(&new, uss, sizeof(stack_t)))
4061 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4063 /* squash all but EFAULT for now */
4067 int __save_altstack(stack_t __user *uss, unsigned long sp)
4069 struct task_struct *t = current;
4070 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4071 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4072 __put_user(t->sas_ss_size, &uss->ss_size);
4075 if (t->sas_ss_flags & SS_AUTODISARM)
4080 #ifdef CONFIG_COMPAT
4081 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4082 compat_stack_t __user *uoss_ptr)
4088 compat_stack_t uss32;
4089 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4091 uss.ss_sp = compat_ptr(uss32.ss_sp);
4092 uss.ss_flags = uss32.ss_flags;
4093 uss.ss_size = uss32.ss_size;
4095 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4096 compat_user_stack_pointer(),
4097 COMPAT_MINSIGSTKSZ);
4098 if (ret >= 0 && uoss_ptr) {
4100 memset(&old, 0, sizeof(old));
4101 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4102 old.ss_flags = uoss.ss_flags;
4103 old.ss_size = uoss.ss_size;
4104 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4110 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4111 const compat_stack_t __user *, uss_ptr,
4112 compat_stack_t __user *, uoss_ptr)
4114 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4117 int compat_restore_altstack(const compat_stack_t __user *uss)
4119 int err = do_compat_sigaltstack(uss, NULL);
4120 /* squash all but -EFAULT for now */
4121 return err == -EFAULT ? err : 0;
4124 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4127 struct task_struct *t = current;
4128 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4130 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4131 __put_user(t->sas_ss_size, &uss->ss_size);
4134 if (t->sas_ss_flags & SS_AUTODISARM)
4140 #ifdef __ARCH_WANT_SYS_SIGPENDING
4143 * sys_sigpending - examine pending signals
4144 * @uset: where mask of pending signal is returned
4146 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4150 if (sizeof(old_sigset_t) > sizeof(*uset))
4153 do_sigpending(&set);
4155 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4161 #ifdef CONFIG_COMPAT
4162 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4166 do_sigpending(&set);
4168 return put_user(set.sig[0], set32);
4174 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4176 * sys_sigprocmask - examine and change blocked signals
4177 * @how: whether to add, remove, or set signals
4178 * @nset: signals to add or remove (if non-null)
4179 * @oset: previous value of signal mask if non-null
4181 * Some platforms have their own version with special arguments;
4182 * others support only sys_rt_sigprocmask.
4185 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4186 old_sigset_t __user *, oset)
4188 old_sigset_t old_set, new_set;
4189 sigset_t new_blocked;
4191 old_set = current->blocked.sig[0];
4194 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4197 new_blocked = current->blocked;
4201 sigaddsetmask(&new_blocked, new_set);
4204 sigdelsetmask(&new_blocked, new_set);
4207 new_blocked.sig[0] = new_set;
4213 set_current_blocked(&new_blocked);
4217 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4223 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4225 #ifndef CONFIG_ODD_RT_SIGACTION
4227 * sys_rt_sigaction - alter an action taken by a process
4228 * @sig: signal to be sent
4229 * @act: new sigaction
4230 * @oact: used to save the previous sigaction
4231 * @sigsetsize: size of sigset_t type
4233 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4234 const struct sigaction __user *, act,
4235 struct sigaction __user *, oact,
4238 struct k_sigaction new_sa, old_sa;
4241 /* XXX: Don't preclude handling different sized sigset_t's. */
4242 if (sigsetsize != sizeof(sigset_t))
4245 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4248 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4252 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4257 #ifdef CONFIG_COMPAT
4258 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4259 const struct compat_sigaction __user *, act,
4260 struct compat_sigaction __user *, oact,
4261 compat_size_t, sigsetsize)
4263 struct k_sigaction new_ka, old_ka;
4264 #ifdef __ARCH_HAS_SA_RESTORER
4265 compat_uptr_t restorer;
4269 /* XXX: Don't preclude handling different sized sigset_t's. */
4270 if (sigsetsize != sizeof(compat_sigset_t))
4274 compat_uptr_t handler;
4275 ret = get_user(handler, &act->sa_handler);
4276 new_ka.sa.sa_handler = compat_ptr(handler);
4277 #ifdef __ARCH_HAS_SA_RESTORER
4278 ret |= get_user(restorer, &act->sa_restorer);
4279 new_ka.sa.sa_restorer = compat_ptr(restorer);
4281 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4282 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4287 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4289 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4291 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4292 sizeof(oact->sa_mask));
4293 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4294 #ifdef __ARCH_HAS_SA_RESTORER
4295 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4296 &oact->sa_restorer);
4302 #endif /* !CONFIG_ODD_RT_SIGACTION */
4304 #ifdef CONFIG_OLD_SIGACTION
4305 SYSCALL_DEFINE3(sigaction, int, sig,
4306 const struct old_sigaction __user *, act,
4307 struct old_sigaction __user *, oact)
4309 struct k_sigaction new_ka, old_ka;
4314 if (!access_ok(act, sizeof(*act)) ||
4315 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4316 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4317 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4318 __get_user(mask, &act->sa_mask))
4320 #ifdef __ARCH_HAS_KA_RESTORER
4321 new_ka.ka_restorer = NULL;
4323 siginitset(&new_ka.sa.sa_mask, mask);
4326 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4329 if (!access_ok(oact, sizeof(*oact)) ||
4330 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4331 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4332 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4333 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4340 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4341 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4342 const struct compat_old_sigaction __user *, act,
4343 struct compat_old_sigaction __user *, oact)
4345 struct k_sigaction new_ka, old_ka;
4347 compat_old_sigset_t mask;
4348 compat_uptr_t handler, restorer;
4351 if (!access_ok(act, sizeof(*act)) ||
4352 __get_user(handler, &act->sa_handler) ||
4353 __get_user(restorer, &act->sa_restorer) ||
4354 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4355 __get_user(mask, &act->sa_mask))
4358 #ifdef __ARCH_HAS_KA_RESTORER
4359 new_ka.ka_restorer = NULL;
4361 new_ka.sa.sa_handler = compat_ptr(handler);
4362 new_ka.sa.sa_restorer = compat_ptr(restorer);
4363 siginitset(&new_ka.sa.sa_mask, mask);
4366 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4369 if (!access_ok(oact, sizeof(*oact)) ||
4370 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4371 &oact->sa_handler) ||
4372 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4373 &oact->sa_restorer) ||
4374 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4375 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4382 #ifdef CONFIG_SGETMASK_SYSCALL
4385 * For backwards compatibility. Functionality superseded by sigprocmask.
4387 SYSCALL_DEFINE0(sgetmask)
4390 return current->blocked.sig[0];
4393 SYSCALL_DEFINE1(ssetmask, int, newmask)
4395 int old = current->blocked.sig[0];
4398 siginitset(&newset, newmask);
4399 set_current_blocked(&newset);
4403 #endif /* CONFIG_SGETMASK_SYSCALL */
4405 #ifdef __ARCH_WANT_SYS_SIGNAL
4407 * For backwards compatibility. Functionality superseded by sigaction.
4409 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4411 struct k_sigaction new_sa, old_sa;
4414 new_sa.sa.sa_handler = handler;
4415 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4416 sigemptyset(&new_sa.sa.sa_mask);
4418 ret = do_sigaction(sig, &new_sa, &old_sa);
4420 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4422 #endif /* __ARCH_WANT_SYS_SIGNAL */
4424 #ifdef __ARCH_WANT_SYS_PAUSE
4426 SYSCALL_DEFINE0(pause)
4428 while (!signal_pending(current)) {
4429 __set_current_state(TASK_INTERRUPTIBLE);
4432 return -ERESTARTNOHAND;
4437 static int sigsuspend(sigset_t *set)
4439 current->saved_sigmask = current->blocked;
4440 set_current_blocked(set);
4442 while (!signal_pending(current)) {
4443 __set_current_state(TASK_INTERRUPTIBLE);
4446 set_restore_sigmask();
4447 return -ERESTARTNOHAND;
4451 * sys_rt_sigsuspend - replace the signal mask for a value with the
4452 * @unewset value until a signal is received
4453 * @unewset: new signal mask value
4454 * @sigsetsize: size of sigset_t type
4456 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4460 /* XXX: Don't preclude handling different sized sigset_t's. */
4461 if (sigsetsize != sizeof(sigset_t))
4464 if (copy_from_user(&newset, unewset, sizeof(newset)))
4466 return sigsuspend(&newset);
4469 #ifdef CONFIG_COMPAT
4470 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4474 /* XXX: Don't preclude handling different sized sigset_t's. */
4475 if (sigsetsize != sizeof(sigset_t))
4478 if (get_compat_sigset(&newset, unewset))
4480 return sigsuspend(&newset);
4484 #ifdef CONFIG_OLD_SIGSUSPEND
4485 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4488 siginitset(&blocked, mask);
4489 return sigsuspend(&blocked);
4492 #ifdef CONFIG_OLD_SIGSUSPEND3
4493 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4496 siginitset(&blocked, mask);
4497 return sigsuspend(&blocked);
4501 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4506 static inline void siginfo_buildtime_checks(void)
4508 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4510 /* Verify the offsets in the two siginfos match */
4511 #define CHECK_OFFSET(field) \
4512 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4515 CHECK_OFFSET(si_pid);
4516 CHECK_OFFSET(si_uid);
4519 CHECK_OFFSET(si_tid);
4520 CHECK_OFFSET(si_overrun);
4521 CHECK_OFFSET(si_value);
4524 CHECK_OFFSET(si_pid);
4525 CHECK_OFFSET(si_uid);
4526 CHECK_OFFSET(si_value);
4529 CHECK_OFFSET(si_pid);
4530 CHECK_OFFSET(si_uid);
4531 CHECK_OFFSET(si_status);
4532 CHECK_OFFSET(si_utime);
4533 CHECK_OFFSET(si_stime);
4536 CHECK_OFFSET(si_addr);
4537 CHECK_OFFSET(si_addr_lsb);
4538 CHECK_OFFSET(si_lower);
4539 CHECK_OFFSET(si_upper);
4540 CHECK_OFFSET(si_pkey);
4543 CHECK_OFFSET(si_band);
4544 CHECK_OFFSET(si_fd);
4547 CHECK_OFFSET(si_call_addr);
4548 CHECK_OFFSET(si_syscall);
4549 CHECK_OFFSET(si_arch);
4553 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4554 offsetof(struct siginfo, si_addr));
4555 if (sizeof(int) == sizeof(void __user *)) {
4556 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4557 sizeof(void __user *));
4559 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4560 sizeof_field(struct siginfo, si_uid)) !=
4561 sizeof(void __user *));
4562 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4563 offsetof(struct siginfo, si_uid));
4565 #ifdef CONFIG_COMPAT
4566 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4567 offsetof(struct compat_siginfo, si_addr));
4568 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4569 sizeof(compat_uptr_t));
4570 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4571 sizeof_field(struct siginfo, si_pid));
4575 void __init signals_init(void)
4577 siginfo_buildtime_checks();
4579 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4582 #ifdef CONFIG_KGDB_KDB
4583 #include <linux/kdb.h>
4585 * kdb_send_sig - Allows kdb to send signals without exposing
4586 * signal internals. This function checks if the required locks are
4587 * available before calling the main signal code, to avoid kdb
4590 void kdb_send_sig(struct task_struct *t, int sig)
4592 static struct task_struct *kdb_prev_t;
4594 if (!spin_trylock(&t->sighand->siglock)) {
4595 kdb_printf("Can't do kill command now.\n"
4596 "The sigmask lock is held somewhere else in "
4597 "kernel, try again later\n");
4600 new_t = kdb_prev_t != t;
4602 if (t->state != TASK_RUNNING && new_t) {
4603 spin_unlock(&t->sighand->siglock);
4604 kdb_printf("Process is not RUNNING, sending a signal from "
4605 "kdb risks deadlock\n"
4606 "on the run queue locks. "
4607 "The signal has _not_ been sent.\n"
4608 "Reissue the kill command if you want to risk "
4612 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4613 spin_unlock(&t->sighand->siglock);
4615 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4618 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4620 #endif /* CONFIG_KGDB_KDB */