1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/sched/rt.h>
24 #include <linux/file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/tracehook.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/livepatch.h>
48 #include <linux/cgroup.h>
49 #include <linux/audit.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/signal.h>
54 #include <asm/param.h>
55 #include <linux/uaccess.h>
56 #include <asm/unistd.h>
57 #include <asm/siginfo.h>
58 #include <asm/cacheflush.h>
61 * SLAB caches for signal bits.
64 static struct kmem_cache *sigqueue_cachep;
66 int print_fatal_signals __read_mostly;
68 static void __user *sig_handler(struct task_struct *t, int sig)
70 return t->sighand->action[sig - 1].sa.sa_handler;
73 static inline bool sig_handler_ignored(void __user *handler, int sig)
75 /* Is it explicitly or implicitly ignored? */
76 return handler == SIG_IGN ||
77 (handler == SIG_DFL && sig_kernel_ignore(sig));
80 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
84 handler = sig_handler(t, sig);
86 /* SIGKILL and SIGSTOP may not be sent to the global init */
87 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
90 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
91 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
94 /* Only allow kernel generated signals to this kthread */
95 if (unlikely((t->flags & PF_KTHREAD) &&
96 (handler == SIG_KTHREAD_KERNEL) && !force))
99 return sig_handler_ignored(handler, sig);
102 static bool sig_ignored(struct task_struct *t, int sig, bool force)
105 * Blocked signals are never ignored, since the
106 * signal handler may change by the time it is
109 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
113 * Tracers may want to know about even ignored signal unless it
114 * is SIGKILL which can't be reported anyway but can be ignored
115 * by SIGNAL_UNKILLABLE task.
117 if (t->ptrace && sig != SIGKILL)
120 return sig_task_ignored(t, sig, force);
124 * Re-calculate pending state from the set of locally pending
125 * signals, globally pending signals, and blocked signals.
127 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
132 switch (_NSIG_WORDS) {
134 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
135 ready |= signal->sig[i] &~ blocked->sig[i];
138 case 4: ready = signal->sig[3] &~ blocked->sig[3];
139 ready |= signal->sig[2] &~ blocked->sig[2];
140 ready |= signal->sig[1] &~ blocked->sig[1];
141 ready |= signal->sig[0] &~ blocked->sig[0];
144 case 2: ready = signal->sig[1] &~ blocked->sig[1];
145 ready |= signal->sig[0] &~ blocked->sig[0];
148 case 1: ready = signal->sig[0] &~ blocked->sig[0];
153 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
155 static bool recalc_sigpending_tsk(struct task_struct *t)
157 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
158 PENDING(&t->pending, &t->blocked) ||
159 PENDING(&t->signal->shared_pending, &t->blocked) ||
160 cgroup_task_frozen(t)) {
161 set_tsk_thread_flag(t, TIF_SIGPENDING);
166 * We must never clear the flag in another thread, or in current
167 * when it's possible the current syscall is returning -ERESTART*.
168 * So we don't clear it here, and only callers who know they should do.
174 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
175 * This is superfluous when called on current, the wakeup is a harmless no-op.
177 void recalc_sigpending_and_wake(struct task_struct *t)
179 if (recalc_sigpending_tsk(t))
180 signal_wake_up(t, 0);
183 void recalc_sigpending(void)
185 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
186 !klp_patch_pending(current))
187 clear_thread_flag(TIF_SIGPENDING);
190 EXPORT_SYMBOL(recalc_sigpending);
192 void calculate_sigpending(void)
194 /* Have any signals or users of TIF_SIGPENDING been delayed
197 spin_lock_irq(¤t->sighand->siglock);
198 set_tsk_thread_flag(current, TIF_SIGPENDING);
200 spin_unlock_irq(¤t->sighand->siglock);
203 /* Given the mask, find the first available signal that should be serviced. */
205 #define SYNCHRONOUS_MASK \
206 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
207 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
209 int next_signal(struct sigpending *pending, sigset_t *mask)
211 unsigned long i, *s, *m, x;
214 s = pending->signal.sig;
218 * Handle the first word specially: it contains the
219 * synchronous signals that need to be dequeued first.
223 if (x & SYNCHRONOUS_MASK)
224 x &= SYNCHRONOUS_MASK;
229 switch (_NSIG_WORDS) {
231 for (i = 1; i < _NSIG_WORDS; ++i) {
235 sig = ffz(~x) + i*_NSIG_BPW + 1;
244 sig = ffz(~x) + _NSIG_BPW + 1;
255 static inline void print_dropped_signal(int sig)
257 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
259 if (!print_fatal_signals)
262 if (!__ratelimit(&ratelimit_state))
265 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
266 current->comm, current->pid, sig);
270 * task_set_jobctl_pending - set jobctl pending bits
272 * @mask: pending bits to set
274 * Clear @mask from @task->jobctl. @mask must be subset of
275 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
276 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
277 * cleared. If @task is already being killed or exiting, this function
281 * Must be called with @task->sighand->siglock held.
284 * %true if @mask is set, %false if made noop because @task was dying.
286 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
288 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
289 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
290 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
292 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
295 if (mask & JOBCTL_STOP_SIGMASK)
296 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
298 task->jobctl |= mask;
303 * task_clear_jobctl_trapping - clear jobctl trapping bit
306 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
307 * Clear it and wake up the ptracer. Note that we don't need any further
308 * locking. @task->siglock guarantees that @task->parent points to the
312 * Must be called with @task->sighand->siglock held.
314 void task_clear_jobctl_trapping(struct task_struct *task)
316 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
317 task->jobctl &= ~JOBCTL_TRAPPING;
318 smp_mb(); /* advised by wake_up_bit() */
319 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
324 * task_clear_jobctl_pending - clear jobctl pending bits
326 * @mask: pending bits to clear
328 * Clear @mask from @task->jobctl. @mask must be subset of
329 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
330 * STOP bits are cleared together.
332 * If clearing of @mask leaves no stop or trap pending, this function calls
333 * task_clear_jobctl_trapping().
336 * Must be called with @task->sighand->siglock held.
338 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
340 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
342 if (mask & JOBCTL_STOP_PENDING)
343 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
345 task->jobctl &= ~mask;
347 if (!(task->jobctl & JOBCTL_PENDING_MASK))
348 task_clear_jobctl_trapping(task);
352 * task_participate_group_stop - participate in a group stop
353 * @task: task participating in a group stop
355 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
356 * Group stop states are cleared and the group stop count is consumed if
357 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
358 * stop, the appropriate `SIGNAL_*` flags are set.
361 * Must be called with @task->sighand->siglock held.
364 * %true if group stop completion should be notified to the parent, %false
367 static bool task_participate_group_stop(struct task_struct *task)
369 struct signal_struct *sig = task->signal;
370 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
372 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
374 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
379 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
380 sig->group_stop_count--;
383 * Tell the caller to notify completion iff we are entering into a
384 * fresh group stop. Read comment in do_signal_stop() for details.
386 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
387 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
393 void task_join_group_stop(struct task_struct *task)
395 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
396 struct signal_struct *sig = current->signal;
398 if (sig->group_stop_count) {
399 sig->group_stop_count++;
400 mask |= JOBCTL_STOP_CONSUME;
401 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
404 /* Have the new thread join an on-going signal group stop */
405 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
408 static inline struct sigqueue *get_task_cache(struct task_struct *t)
410 struct sigqueue *q = t->sigqueue_cache;
412 if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
417 static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
419 if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
425 * allocate a new signal queue record
426 * - this may be called without locks if and only if t == current, otherwise an
427 * appropriate lock must be held to stop the target task from exiting
429 static struct sigqueue *
430 __sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
431 int override_rlimit, int fromslab)
433 struct sigqueue *q = NULL;
434 struct user_struct *user;
438 * Protect access to @t credentials. This can go away when all
439 * callers hold rcu read lock.
441 * NOTE! A pending signal will hold on to the user refcount,
442 * and we get/put the refcount only when the sigpending count
443 * changes from/to zero.
446 user = __task_cred(t)->user;
447 sigpending = atomic_inc_return(&user->sigpending);
452 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
454 q = get_task_cache(t);
456 q = kmem_cache_alloc(sigqueue_cachep, flags);
458 print_dropped_signal(sig);
461 if (unlikely(q == NULL)) {
462 if (atomic_dec_and_test(&user->sigpending))
465 INIT_LIST_HEAD(&q->list);
473 static struct sigqueue *
474 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
477 return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
480 static void __sigqueue_free(struct sigqueue *q)
482 if (q->flags & SIGQUEUE_PREALLOC)
484 if (atomic_dec_and_test(&q->user->sigpending))
486 kmem_cache_free(sigqueue_cachep, q);
489 static void sigqueue_free_current(struct sigqueue *q)
491 struct user_struct *up;
493 if (q->flags & SIGQUEUE_PREALLOC)
497 if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
498 if (atomic_dec_and_test(&up->sigpending))
504 void flush_sigqueue(struct sigpending *queue)
508 sigemptyset(&queue->signal);
509 while (!list_empty(&queue->list)) {
510 q = list_entry(queue->list.next, struct sigqueue , list);
511 list_del_init(&q->list);
517 * Called from __exit_signal. Flush tsk->pending and
518 * tsk->sigqueue_cache
520 void flush_task_sigqueue(struct task_struct *tsk)
524 flush_sigqueue(&tsk->pending);
526 q = get_task_cache(tsk);
528 kmem_cache_free(sigqueue_cachep, q);
532 * Flush all pending signals for this kthread.
534 void flush_signals(struct task_struct *t)
538 spin_lock_irqsave(&t->sighand->siglock, flags);
539 clear_tsk_thread_flag(t, TIF_SIGPENDING);
540 flush_sigqueue(&t->pending);
541 flush_sigqueue(&t->signal->shared_pending);
542 spin_unlock_irqrestore(&t->sighand->siglock, flags);
544 EXPORT_SYMBOL(flush_signals);
546 #ifdef CONFIG_POSIX_TIMERS
547 static void __flush_itimer_signals(struct sigpending *pending)
549 sigset_t signal, retain;
550 struct sigqueue *q, *n;
552 signal = pending->signal;
553 sigemptyset(&retain);
555 list_for_each_entry_safe(q, n, &pending->list, list) {
556 int sig = q->info.si_signo;
558 if (likely(q->info.si_code != SI_TIMER)) {
559 sigaddset(&retain, sig);
561 sigdelset(&signal, sig);
562 list_del_init(&q->list);
567 sigorsets(&pending->signal, &signal, &retain);
570 void flush_itimer_signals(void)
572 struct task_struct *tsk = current;
575 spin_lock_irqsave(&tsk->sighand->siglock, flags);
576 __flush_itimer_signals(&tsk->pending);
577 __flush_itimer_signals(&tsk->signal->shared_pending);
578 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
582 void ignore_signals(struct task_struct *t)
586 for (i = 0; i < _NSIG; ++i)
587 t->sighand->action[i].sa.sa_handler = SIG_IGN;
593 * Flush all handlers for a task.
597 flush_signal_handlers(struct task_struct *t, int force_default)
600 struct k_sigaction *ka = &t->sighand->action[0];
601 for (i = _NSIG ; i != 0 ; i--) {
602 if (force_default || ka->sa.sa_handler != SIG_IGN)
603 ka->sa.sa_handler = SIG_DFL;
605 #ifdef __ARCH_HAS_SA_RESTORER
606 ka->sa.sa_restorer = NULL;
608 sigemptyset(&ka->sa.sa_mask);
613 bool unhandled_signal(struct task_struct *tsk, int sig)
615 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
616 if (is_global_init(tsk))
619 if (handler != SIG_IGN && handler != SIG_DFL)
622 /* if ptraced, let the tracer determine */
626 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
629 struct sigqueue *q, *first = NULL;
632 * Collect the siginfo appropriate to this signal. Check if
633 * there is another siginfo for the same signal.
635 list_for_each_entry(q, &list->list, list) {
636 if (q->info.si_signo == sig) {
643 sigdelset(&list->signal, sig);
647 list_del_init(&first->list);
648 copy_siginfo(info, &first->info);
651 (first->flags & SIGQUEUE_PREALLOC) &&
652 (info->si_code == SI_TIMER) &&
653 (info->si_sys_private);
655 sigqueue_free_current(first);
658 * Ok, it wasn't in the queue. This must be
659 * a fast-pathed signal or we must have been
660 * out of queue space. So zero out the info.
663 info->si_signo = sig;
665 info->si_code = SI_USER;
671 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
672 kernel_siginfo_t *info, bool *resched_timer)
674 int sig = next_signal(pending, mask);
677 collect_signal(sig, pending, info, resched_timer);
682 * Dequeue a signal and return the element to the caller, which is
683 * expected to free it.
685 * All callers have to hold the siglock.
687 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
689 bool resched_timer = false;
692 WARN_ON_ONCE(tsk != current);
694 /* We only dequeue private signals from ourselves, we don't let
695 * signalfd steal them
697 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
699 signr = __dequeue_signal(&tsk->signal->shared_pending,
700 mask, info, &resched_timer);
701 #ifdef CONFIG_POSIX_TIMERS
705 * itimers are process shared and we restart periodic
706 * itimers in the signal delivery path to prevent DoS
707 * attacks in the high resolution timer case. This is
708 * compliant with the old way of self-restarting
709 * itimers, as the SIGALRM is a legacy signal and only
710 * queued once. Changing the restart behaviour to
711 * restart the timer in the signal dequeue path is
712 * reducing the timer noise on heavy loaded !highres
715 if (unlikely(signr == SIGALRM)) {
716 struct hrtimer *tmr = &tsk->signal->real_timer;
718 if (!hrtimer_is_queued(tmr) &&
719 tsk->signal->it_real_incr != 0) {
720 hrtimer_forward(tmr, tmr->base->get_time(),
721 tsk->signal->it_real_incr);
722 hrtimer_restart(tmr);
732 if (unlikely(sig_kernel_stop(signr))) {
734 * Set a marker that we have dequeued a stop signal. Our
735 * caller might release the siglock and then the pending
736 * stop signal it is about to process is no longer in the
737 * pending bitmasks, but must still be cleared by a SIGCONT
738 * (and overruled by a SIGKILL). So those cases clear this
739 * shared flag after we've set it. Note that this flag may
740 * remain set after the signal we return is ignored or
741 * handled. That doesn't matter because its only purpose
742 * is to alert stop-signal processing code when another
743 * processor has come along and cleared the flag.
745 current->jobctl |= JOBCTL_STOP_DEQUEUED;
747 #ifdef CONFIG_POSIX_TIMERS
750 * Release the siglock to ensure proper locking order
751 * of timer locks outside of siglocks. Note, we leave
752 * irqs disabled here, since the posix-timers code is
753 * about to disable them again anyway.
755 spin_unlock(&tsk->sighand->siglock);
756 posixtimer_rearm(info);
757 spin_lock(&tsk->sighand->siglock);
759 /* Don't expose the si_sys_private value to userspace */
760 info->si_sys_private = 0;
765 EXPORT_SYMBOL_GPL(dequeue_signal);
767 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
769 struct task_struct *tsk = current;
770 struct sigpending *pending = &tsk->pending;
771 struct sigqueue *q, *sync = NULL;
774 * Might a synchronous signal be in the queue?
776 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
780 * Return the first synchronous signal in the queue.
782 list_for_each_entry(q, &pending->list, list) {
783 /* Synchronous signals have a positive si_code */
784 if ((q->info.si_code > SI_USER) &&
785 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
793 * Check if there is another siginfo for the same signal.
795 list_for_each_entry_continue(q, &pending->list, list) {
796 if (q->info.si_signo == sync->info.si_signo)
800 sigdelset(&pending->signal, sync->info.si_signo);
803 list_del_init(&sync->list);
804 copy_siginfo(info, &sync->info);
805 __sigqueue_free(sync);
806 return info->si_signo;
810 * Tell a process that it has a new active signal..
812 * NOTE! we rely on the previous spin_lock to
813 * lock interrupts for us! We can only be called with
814 * "siglock" held, and the local interrupt must
815 * have been disabled when that got acquired!
817 * No need to set need_resched since signal event passing
818 * goes through ->blocked
820 void signal_wake_up_state(struct task_struct *t, unsigned int state)
822 set_tsk_thread_flag(t, TIF_SIGPENDING);
824 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
825 * case. We don't check t->state here because there is a race with it
826 * executing another processor and just now entering stopped state.
827 * By using wake_up_state, we ensure the process will wake up and
828 * handle its death signal.
830 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
835 * Remove signals in mask from the pending set and queue.
836 * Returns 1 if any signals were found.
838 * All callers must be holding the siglock.
840 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
842 struct sigqueue *q, *n;
845 sigandsets(&m, mask, &s->signal);
846 if (sigisemptyset(&m))
849 sigandnsets(&s->signal, &s->signal, mask);
850 list_for_each_entry_safe(q, n, &s->list, list) {
851 if (sigismember(mask, q->info.si_signo)) {
852 list_del_init(&q->list);
858 static inline int is_si_special(const struct kernel_siginfo *info)
860 return info <= SEND_SIG_PRIV;
863 static inline bool si_fromuser(const struct kernel_siginfo *info)
865 return info == SEND_SIG_NOINFO ||
866 (!is_si_special(info) && SI_FROMUSER(info));
870 * called with RCU read lock from check_kill_permission()
872 static bool kill_ok_by_cred(struct task_struct *t)
874 const struct cred *cred = current_cred();
875 const struct cred *tcred = __task_cred(t);
877 return uid_eq(cred->euid, tcred->suid) ||
878 uid_eq(cred->euid, tcred->uid) ||
879 uid_eq(cred->uid, tcred->suid) ||
880 uid_eq(cred->uid, tcred->uid) ||
881 ns_capable(tcred->user_ns, CAP_KILL);
885 * Bad permissions for sending the signal
886 * - the caller must hold the RCU read lock
888 static int check_kill_permission(int sig, struct kernel_siginfo *info,
889 struct task_struct *t)
894 if (!valid_signal(sig))
897 if (!si_fromuser(info))
900 error = audit_signal_info(sig, t); /* Let audit system see the signal */
904 if (!same_thread_group(current, t) &&
905 !kill_ok_by_cred(t)) {
908 sid = task_session(t);
910 * We don't return the error if sid == NULL. The
911 * task was unhashed, the caller must notice this.
913 if (!sid || sid == task_session(current))
921 return security_task_kill(t, info, sig, NULL);
925 * ptrace_trap_notify - schedule trap to notify ptracer
926 * @t: tracee wanting to notify tracer
928 * This function schedules sticky ptrace trap which is cleared on the next
929 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
932 * If @t is running, STOP trap will be taken. If trapped for STOP and
933 * ptracer is listening for events, tracee is woken up so that it can
934 * re-trap for the new event. If trapped otherwise, STOP trap will be
935 * eventually taken without returning to userland after the existing traps
936 * are finished by PTRACE_CONT.
939 * Must be called with @task->sighand->siglock held.
941 static void ptrace_trap_notify(struct task_struct *t)
943 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
944 assert_spin_locked(&t->sighand->siglock);
946 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
947 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
951 * Handle magic process-wide effects of stop/continue signals. Unlike
952 * the signal actions, these happen immediately at signal-generation
953 * time regardless of blocking, ignoring, or handling. This does the
954 * actual continuing for SIGCONT, but not the actual stopping for stop
955 * signals. The process stop is done as a signal action for SIG_DFL.
957 * Returns true if the signal should be actually delivered, otherwise
958 * it should be dropped.
960 static bool prepare_signal(int sig, struct task_struct *p, bool force)
962 struct signal_struct *signal = p->signal;
963 struct task_struct *t;
966 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
967 if (!(signal->flags & SIGNAL_GROUP_EXIT))
968 return sig == SIGKILL;
970 * The process is in the middle of dying, nothing to do.
972 } else if (sig_kernel_stop(sig)) {
974 * This is a stop signal. Remove SIGCONT from all queues.
976 siginitset(&flush, sigmask(SIGCONT));
977 flush_sigqueue_mask(&flush, &signal->shared_pending);
978 for_each_thread(p, t)
979 flush_sigqueue_mask(&flush, &t->pending);
980 } else if (sig == SIGCONT) {
983 * Remove all stop signals from all queues, wake all threads.
985 siginitset(&flush, SIG_KERNEL_STOP_MASK);
986 flush_sigqueue_mask(&flush, &signal->shared_pending);
987 for_each_thread(p, t) {
988 flush_sigqueue_mask(&flush, &t->pending);
989 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
990 if (likely(!(t->ptrace & PT_SEIZED)))
991 wake_up_state(t, __TASK_STOPPED);
993 ptrace_trap_notify(t);
997 * Notify the parent with CLD_CONTINUED if we were stopped.
999 * If we were in the middle of a group stop, we pretend it
1000 * was already finished, and then continued. Since SIGCHLD
1001 * doesn't queue we report only CLD_STOPPED, as if the next
1002 * CLD_CONTINUED was dropped.
1005 if (signal->flags & SIGNAL_STOP_STOPPED)
1006 why |= SIGNAL_CLD_CONTINUED;
1007 else if (signal->group_stop_count)
1008 why |= SIGNAL_CLD_STOPPED;
1012 * The first thread which returns from do_signal_stop()
1013 * will take ->siglock, notice SIGNAL_CLD_MASK, and
1014 * notify its parent. See get_signal().
1016 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
1017 signal->group_stop_count = 0;
1018 signal->group_exit_code = 0;
1022 return !sig_ignored(p, sig, force);
1026 * Test if P wants to take SIG. After we've checked all threads with this,
1027 * it's equivalent to finding no threads not blocking SIG. Any threads not
1028 * blocking SIG were ruled out because they are not running and already
1029 * have pending signals. Such threads will dequeue from the shared queue
1030 * as soon as they're available, so putting the signal on the shared queue
1031 * will be equivalent to sending it to one such thread.
1033 static inline bool wants_signal(int sig, struct task_struct *p)
1035 if (sigismember(&p->blocked, sig))
1038 if (p->flags & PF_EXITING)
1044 if (task_is_stopped_or_traced(p))
1047 return task_curr(p) || !signal_pending(p);
1050 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
1052 struct signal_struct *signal = p->signal;
1053 struct task_struct *t;
1056 * Now find a thread we can wake up to take the signal off the queue.
1058 * If the main thread wants the signal, it gets first crack.
1059 * Probably the least surprising to the average bear.
1061 if (wants_signal(sig, p))
1063 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1065 * There is just one thread and it does not need to be woken.
1066 * It will dequeue unblocked signals before it runs again.
1071 * Otherwise try to find a suitable thread.
1073 t = signal->curr_target;
1074 while (!wants_signal(sig, t)) {
1076 if (t == signal->curr_target)
1078 * No thread needs to be woken.
1079 * Any eligible threads will see
1080 * the signal in the queue soon.
1084 signal->curr_target = t;
1088 * Found a killable thread. If the signal will be fatal,
1089 * then start taking the whole group down immediately.
1091 if (sig_fatal(p, sig) &&
1092 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1093 !sigismember(&t->real_blocked, sig) &&
1094 (sig == SIGKILL || !p->ptrace)) {
1096 * This signal will be fatal to the whole group.
1098 if (!sig_kernel_coredump(sig)) {
1100 * Start a group exit and wake everybody up.
1101 * This way we don't have other threads
1102 * running and doing things after a slower
1103 * thread has the fatal signal pending.
1105 signal->flags = SIGNAL_GROUP_EXIT;
1106 signal->group_exit_code = sig;
1107 signal->group_stop_count = 0;
1110 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1111 sigaddset(&t->pending.signal, SIGKILL);
1112 signal_wake_up(t, 1);
1113 } while_each_thread(p, t);
1119 * The signal is already in the shared-pending queue.
1120 * Tell the chosen thread to wake up and dequeue it.
1122 signal_wake_up(t, sig == SIGKILL);
1126 static inline bool legacy_queue(struct sigpending *signals, int sig)
1128 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1131 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1132 enum pid_type type, bool force)
1134 struct sigpending *pending;
1136 int override_rlimit;
1137 int ret = 0, result;
1139 assert_spin_locked(&t->sighand->siglock);
1141 result = TRACE_SIGNAL_IGNORED;
1142 if (!prepare_signal(sig, t, force))
1145 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1147 * Short-circuit ignored signals and support queuing
1148 * exactly one non-rt signal, so that we can get more
1149 * detailed information about the cause of the signal.
1151 result = TRACE_SIGNAL_ALREADY_PENDING;
1152 if (legacy_queue(pending, sig))
1155 result = TRACE_SIGNAL_DELIVERED;
1157 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1159 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1163 * Real-time signals must be queued if sent by sigqueue, or
1164 * some other real-time mechanism. It is implementation
1165 * defined whether kill() does so. We attempt to do so, on
1166 * the principle of least surprise, but since kill is not
1167 * allowed to fail with EAGAIN when low on memory we just
1168 * make sure at least one signal gets delivered and don't
1169 * pass on the info struct.
1172 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1174 override_rlimit = 0;
1176 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1178 list_add_tail(&q->list, &pending->list);
1179 switch ((unsigned long) info) {
1180 case (unsigned long) SEND_SIG_NOINFO:
1181 clear_siginfo(&q->info);
1182 q->info.si_signo = sig;
1183 q->info.si_errno = 0;
1184 q->info.si_code = SI_USER;
1185 q->info.si_pid = task_tgid_nr_ns(current,
1186 task_active_pid_ns(t));
1189 from_kuid_munged(task_cred_xxx(t, user_ns),
1193 case (unsigned long) SEND_SIG_PRIV:
1194 clear_siginfo(&q->info);
1195 q->info.si_signo = sig;
1196 q->info.si_errno = 0;
1197 q->info.si_code = SI_KERNEL;
1202 copy_siginfo(&q->info, info);
1205 } else if (!is_si_special(info) &&
1206 sig >= SIGRTMIN && info->si_code != SI_USER) {
1208 * Queue overflow, abort. We may abort if the
1209 * signal was rt and sent by user using something
1210 * other than kill().
1212 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1217 * This is a silent loss of information. We still
1218 * send the signal, but the *info bits are lost.
1220 result = TRACE_SIGNAL_LOSE_INFO;
1224 signalfd_notify(t, sig);
1225 sigaddset(&pending->signal, sig);
1227 /* Let multiprocess signals appear after on-going forks */
1228 if (type > PIDTYPE_TGID) {
1229 struct multiprocess_signals *delayed;
1230 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1231 sigset_t *signal = &delayed->signal;
1232 /* Can't queue both a stop and a continue signal */
1234 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1235 else if (sig_kernel_stop(sig))
1236 sigdelset(signal, SIGCONT);
1237 sigaddset(signal, sig);
1241 complete_signal(sig, t, type);
1243 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1247 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1250 switch (siginfo_layout(info->si_signo, info->si_code)) {
1259 case SIL_FAULT_MCEERR:
1260 case SIL_FAULT_BNDERR:
1261 case SIL_FAULT_PKUERR:
1269 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1272 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1275 if (info == SEND_SIG_NOINFO) {
1276 /* Force if sent from an ancestor pid namespace */
1277 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1278 } else if (info == SEND_SIG_PRIV) {
1279 /* Don't ignore kernel generated signals */
1281 } else if (has_si_pid_and_uid(info)) {
1282 /* SIGKILL and SIGSTOP is special or has ids */
1283 struct user_namespace *t_user_ns;
1286 t_user_ns = task_cred_xxx(t, user_ns);
1287 if (current_user_ns() != t_user_ns) {
1288 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1289 info->si_uid = from_kuid_munged(t_user_ns, uid);
1293 /* A kernel generated signal? */
1294 force = (info->si_code == SI_KERNEL);
1296 /* From an ancestor pid namespace? */
1297 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1302 return __send_signal(sig, info, t, type, force);
1305 static void print_fatal_signal(int signr)
1307 struct pt_regs *regs = signal_pt_regs();
1308 pr_info("potentially unexpected fatal signal %d.\n", signr);
1310 #if defined(__i386__) && !defined(__arch_um__)
1311 pr_info("code at %08lx: ", regs->ip);
1314 for (i = 0; i < 16; i++) {
1317 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1319 pr_cont("%02x ", insn);
1329 static int __init setup_print_fatal_signals(char *str)
1331 get_option (&str, &print_fatal_signals);
1336 __setup("print-fatal-signals=", setup_print_fatal_signals);
1339 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1341 return send_signal(sig, info, p, PIDTYPE_TGID);
1344 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1347 unsigned long flags;
1350 if (lock_task_sighand(p, &flags)) {
1351 ret = send_signal(sig, info, p, type);
1352 unlock_task_sighand(p, &flags);
1359 * Force a signal that the process can't ignore: if necessary
1360 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1362 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1363 * since we do not want to have a signal handler that was blocked
1364 * be invoked when user space had explicitly blocked it.
1366 * We don't want to have recursive SIGSEGV's etc, for example,
1367 * that is why we also clear SIGNAL_UNKILLABLE.
1370 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1372 unsigned long int flags;
1373 int ret, blocked, ignored;
1374 struct k_sigaction *action;
1375 int sig = info->si_signo;
1378 * On some archs, PREEMPT_RT has to delay sending a signal from a trap
1379 * since it can not enable preemption, and the signal code's spin_locks
1380 * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
1381 * send the signal on exit of the trap.
1383 #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
1385 struct task_struct *t = current;
1387 if (WARN_ON_ONCE(t->forced_info.si_signo))
1390 if (is_si_special(info)) {
1391 WARN_ON_ONCE(info != SEND_SIG_PRIV);
1392 t->forced_info.si_signo = info->si_signo;
1393 t->forced_info.si_errno = 0;
1394 t->forced_info.si_code = SI_KERNEL;
1395 t->forced_info.si_pid = 0;
1396 t->forced_info.si_uid = 0;
1398 t->forced_info = *info;
1401 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1405 spin_lock_irqsave(&t->sighand->siglock, flags);
1406 action = &t->sighand->action[sig-1];
1407 ignored = action->sa.sa_handler == SIG_IGN;
1408 blocked = sigismember(&t->blocked, sig);
1409 if (blocked || ignored) {
1410 action->sa.sa_handler = SIG_DFL;
1412 sigdelset(&t->blocked, sig);
1413 recalc_sigpending_and_wake(t);
1417 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1418 * debugging to leave init killable.
1420 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1421 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1422 ret = send_signal(sig, info, t, PIDTYPE_PID);
1423 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1428 int force_sig_info(struct kernel_siginfo *info)
1430 return force_sig_info_to_task(info, current);
1434 * Nuke all other threads in the group.
1436 int zap_other_threads(struct task_struct *p)
1438 struct task_struct *t = p;
1441 p->signal->group_stop_count = 0;
1443 while_each_thread(p, t) {
1444 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1447 /* Don't bother with already dead threads */
1450 sigaddset(&t->pending.signal, SIGKILL);
1451 signal_wake_up(t, 1);
1457 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1458 unsigned long *flags)
1460 struct sighand_struct *sighand;
1464 sighand = rcu_dereference(tsk->sighand);
1465 if (unlikely(sighand == NULL))
1469 * This sighand can be already freed and even reused, but
1470 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1471 * initializes ->siglock: this slab can't go away, it has
1472 * the same object type, ->siglock can't be reinitialized.
1474 * We need to ensure that tsk->sighand is still the same
1475 * after we take the lock, we can race with de_thread() or
1476 * __exit_signal(). In the latter case the next iteration
1477 * must see ->sighand == NULL.
1479 spin_lock_irqsave(&sighand->siglock, *flags);
1480 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1482 spin_unlock_irqrestore(&sighand->siglock, *flags);
1490 * send signal info to all the members of a group
1492 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1493 struct task_struct *p, enum pid_type type)
1498 ret = check_kill_permission(sig, info, p);
1502 ret = do_send_sig_info(sig, info, p, type);
1508 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1509 * control characters do (^C, ^Z etc)
1510 * - the caller must hold at least a readlock on tasklist_lock
1512 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1514 struct task_struct *p = NULL;
1515 int retval, success;
1519 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1520 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1523 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1524 return success ? 0 : retval;
1527 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1530 struct task_struct *p;
1534 p = pid_task(pid, PIDTYPE_PID);
1536 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1538 if (likely(!p || error != -ESRCH))
1542 * The task was unhashed in between, try again. If it
1543 * is dead, pid_task() will return NULL, if we race with
1544 * de_thread() it will find the new leader.
1549 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1553 error = kill_pid_info(sig, info, find_vpid(pid));
1558 static inline bool kill_as_cred_perm(const struct cred *cred,
1559 struct task_struct *target)
1561 const struct cred *pcred = __task_cred(target);
1563 return uid_eq(cred->euid, pcred->suid) ||
1564 uid_eq(cred->euid, pcred->uid) ||
1565 uid_eq(cred->uid, pcred->suid) ||
1566 uid_eq(cred->uid, pcred->uid);
1570 * The usb asyncio usage of siginfo is wrong. The glibc support
1571 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1572 * AKA after the generic fields:
1573 * kernel_pid_t si_pid;
1574 * kernel_uid32_t si_uid;
1575 * sigval_t si_value;
1577 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1578 * after the generic fields is:
1579 * void __user *si_addr;
1581 * This is a practical problem when there is a 64bit big endian kernel
1582 * and a 32bit userspace. As the 32bit address will encoded in the low
1583 * 32bits of the pointer. Those low 32bits will be stored at higher
1584 * address than appear in a 32 bit pointer. So userspace will not
1585 * see the address it was expecting for it's completions.
1587 * There is nothing in the encoding that can allow
1588 * copy_siginfo_to_user32 to detect this confusion of formats, so
1589 * handle this by requiring the caller of kill_pid_usb_asyncio to
1590 * notice when this situration takes place and to store the 32bit
1591 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1594 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1595 struct pid *pid, const struct cred *cred)
1597 struct kernel_siginfo info;
1598 struct task_struct *p;
1599 unsigned long flags;
1602 if (!valid_signal(sig))
1605 clear_siginfo(&info);
1606 info.si_signo = sig;
1607 info.si_errno = errno;
1608 info.si_code = SI_ASYNCIO;
1609 *((sigval_t *)&info.si_pid) = addr;
1612 p = pid_task(pid, PIDTYPE_PID);
1617 if (!kill_as_cred_perm(cred, p)) {
1621 ret = security_task_kill(p, &info, sig, cred);
1626 if (lock_task_sighand(p, &flags)) {
1627 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1628 unlock_task_sighand(p, &flags);
1636 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1639 * kill_something_info() interprets pid in interesting ways just like kill(2).
1641 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1642 * is probably wrong. Should make it like BSD or SYSV.
1645 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1650 return kill_proc_info(sig, info, pid);
1652 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1656 read_lock(&tasklist_lock);
1658 ret = __kill_pgrp_info(sig, info,
1659 pid ? find_vpid(-pid) : task_pgrp(current));
1661 int retval = 0, count = 0;
1662 struct task_struct * p;
1664 for_each_process(p) {
1665 if (task_pid_vnr(p) > 1 &&
1666 !same_thread_group(p, current)) {
1667 int err = group_send_sig_info(sig, info, p,
1674 ret = count ? retval : -ESRCH;
1676 read_unlock(&tasklist_lock);
1682 * These are for backward compatibility with the rest of the kernel source.
1685 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1688 * Make sure legacy kernel users don't send in bad values
1689 * (normal paths check this in check_kill_permission).
1691 if (!valid_signal(sig))
1694 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1696 EXPORT_SYMBOL(send_sig_info);
1698 #define __si_special(priv) \
1699 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1702 send_sig(int sig, struct task_struct *p, int priv)
1704 return send_sig_info(sig, __si_special(priv), p);
1706 EXPORT_SYMBOL(send_sig);
1708 void force_sig(int sig)
1710 struct kernel_siginfo info;
1712 clear_siginfo(&info);
1713 info.si_signo = sig;
1715 info.si_code = SI_KERNEL;
1718 force_sig_info(&info);
1720 EXPORT_SYMBOL(force_sig);
1723 * When things go south during signal handling, we
1724 * will force a SIGSEGV. And if the signal that caused
1725 * the problem was already a SIGSEGV, we'll want to
1726 * make sure we don't even try to deliver the signal..
1728 void force_sigsegv(int sig)
1730 struct task_struct *p = current;
1732 if (sig == SIGSEGV) {
1733 unsigned long flags;
1734 spin_lock_irqsave(&p->sighand->siglock, flags);
1735 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1736 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1741 int force_sig_fault_to_task(int sig, int code, void __user *addr
1742 ___ARCH_SI_TRAPNO(int trapno)
1743 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1744 , struct task_struct *t)
1746 struct kernel_siginfo info;
1748 clear_siginfo(&info);
1749 info.si_signo = sig;
1751 info.si_code = code;
1752 info.si_addr = addr;
1753 #ifdef __ARCH_SI_TRAPNO
1754 info.si_trapno = trapno;
1758 info.si_flags = flags;
1761 return force_sig_info_to_task(&info, t);
1764 int force_sig_fault(int sig, int code, void __user *addr
1765 ___ARCH_SI_TRAPNO(int trapno)
1766 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1768 return force_sig_fault_to_task(sig, code, addr
1769 ___ARCH_SI_TRAPNO(trapno)
1770 ___ARCH_SI_IA64(imm, flags, isr), current);
1773 int send_sig_fault(int sig, int code, void __user *addr
1774 ___ARCH_SI_TRAPNO(int trapno)
1775 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1776 , struct task_struct *t)
1778 struct kernel_siginfo info;
1780 clear_siginfo(&info);
1781 info.si_signo = sig;
1783 info.si_code = code;
1784 info.si_addr = addr;
1785 #ifdef __ARCH_SI_TRAPNO
1786 info.si_trapno = trapno;
1790 info.si_flags = flags;
1793 return send_sig_info(info.si_signo, &info, t);
1796 int force_sig_mceerr(int code, void __user *addr, short lsb)
1798 struct kernel_siginfo info;
1800 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1801 clear_siginfo(&info);
1802 info.si_signo = SIGBUS;
1804 info.si_code = code;
1805 info.si_addr = addr;
1806 info.si_addr_lsb = lsb;
1807 return force_sig_info(&info);
1810 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1812 struct kernel_siginfo info;
1814 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1815 clear_siginfo(&info);
1816 info.si_signo = SIGBUS;
1818 info.si_code = code;
1819 info.si_addr = addr;
1820 info.si_addr_lsb = lsb;
1821 return send_sig_info(info.si_signo, &info, t);
1823 EXPORT_SYMBOL(send_sig_mceerr);
1825 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1827 struct kernel_siginfo info;
1829 clear_siginfo(&info);
1830 info.si_signo = SIGSEGV;
1832 info.si_code = SEGV_BNDERR;
1833 info.si_addr = addr;
1834 info.si_lower = lower;
1835 info.si_upper = upper;
1836 return force_sig_info(&info);
1840 int force_sig_pkuerr(void __user *addr, u32 pkey)
1842 struct kernel_siginfo info;
1844 clear_siginfo(&info);
1845 info.si_signo = SIGSEGV;
1847 info.si_code = SEGV_PKUERR;
1848 info.si_addr = addr;
1849 info.si_pkey = pkey;
1850 return force_sig_info(&info);
1854 /* For the crazy architectures that include trap information in
1855 * the errno field, instead of an actual errno value.
1857 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1859 struct kernel_siginfo info;
1861 clear_siginfo(&info);
1862 info.si_signo = SIGTRAP;
1863 info.si_errno = errno;
1864 info.si_code = TRAP_HWBKPT;
1865 info.si_addr = addr;
1866 return force_sig_info(&info);
1869 int kill_pgrp(struct pid *pid, int sig, int priv)
1873 read_lock(&tasklist_lock);
1874 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1875 read_unlock(&tasklist_lock);
1879 EXPORT_SYMBOL(kill_pgrp);
1881 int kill_pid(struct pid *pid, int sig, int priv)
1883 return kill_pid_info(sig, __si_special(priv), pid);
1885 EXPORT_SYMBOL(kill_pid);
1888 * These functions support sending signals using preallocated sigqueue
1889 * structures. This is needed "because realtime applications cannot
1890 * afford to lose notifications of asynchronous events, like timer
1891 * expirations or I/O completions". In the case of POSIX Timers
1892 * we allocate the sigqueue structure from the timer_create. If this
1893 * allocation fails we are able to report the failure to the application
1894 * with an EAGAIN error.
1896 struct sigqueue *sigqueue_alloc(void)
1898 /* Preallocated sigqueue objects always from the slabcache ! */
1899 struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
1902 q->flags |= SIGQUEUE_PREALLOC;
1907 void sigqueue_free(struct sigqueue *q)
1909 unsigned long flags;
1910 spinlock_t *lock = ¤t->sighand->siglock;
1912 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1914 * We must hold ->siglock while testing q->list
1915 * to serialize with collect_signal() or with
1916 * __exit_signal()->flush_sigqueue().
1918 spin_lock_irqsave(lock, flags);
1919 q->flags &= ~SIGQUEUE_PREALLOC;
1921 * If it is queued it will be freed when dequeued,
1922 * like the "regular" sigqueue.
1924 if (!list_empty(&q->list))
1926 spin_unlock_irqrestore(lock, flags);
1932 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1934 int sig = q->info.si_signo;
1935 struct sigpending *pending;
1936 struct task_struct *t;
1937 unsigned long flags;
1940 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1944 t = pid_task(pid, type);
1945 if (!t || !likely(lock_task_sighand(t, &flags)))
1948 ret = 1; /* the signal is ignored */
1949 result = TRACE_SIGNAL_IGNORED;
1950 if (!prepare_signal(sig, t, false))
1954 if (unlikely(!list_empty(&q->list))) {
1956 * If an SI_TIMER entry is already queue just increment
1957 * the overrun count.
1959 BUG_ON(q->info.si_code != SI_TIMER);
1960 q->info.si_overrun++;
1961 result = TRACE_SIGNAL_ALREADY_PENDING;
1964 q->info.si_overrun = 0;
1966 signalfd_notify(t, sig);
1967 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1968 list_add_tail(&q->list, &pending->list);
1969 sigaddset(&pending->signal, sig);
1970 complete_signal(sig, t, type);
1971 result = TRACE_SIGNAL_DELIVERED;
1973 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1974 unlock_task_sighand(t, &flags);
1980 static void do_notify_pidfd(struct task_struct *task)
1984 WARN_ON(task->exit_state == 0);
1985 pid = task_pid(task);
1986 wake_up_all(&pid->wait_pidfd);
1990 * Let a parent know about the death of a child.
1991 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1993 * Returns true if our parent ignored us and so we've switched to
1996 bool do_notify_parent(struct task_struct *tsk, int sig)
1998 struct kernel_siginfo info;
1999 unsigned long flags;
2000 struct sighand_struct *psig;
2001 bool autoreap = false;
2006 /* do_notify_parent_cldstop should have been called instead. */
2007 BUG_ON(task_is_stopped_or_traced(tsk));
2009 BUG_ON(!tsk->ptrace &&
2010 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2012 /* Wake up all pidfd waiters */
2013 do_notify_pidfd(tsk);
2015 if (sig != SIGCHLD) {
2017 * This is only possible if parent == real_parent.
2018 * Check if it has changed security domain.
2020 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2024 clear_siginfo(&info);
2025 info.si_signo = sig;
2028 * We are under tasklist_lock here so our parent is tied to
2029 * us and cannot change.
2031 * task_active_pid_ns will always return the same pid namespace
2032 * until a task passes through release_task.
2034 * write_lock() currently calls preempt_disable() which is the
2035 * same as rcu_read_lock(), but according to Oleg, this is not
2036 * correct to rely on this
2039 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2040 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2044 task_cputime(tsk, &utime, &stime);
2045 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2046 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2048 info.si_status = tsk->exit_code & 0x7f;
2049 if (tsk->exit_code & 0x80)
2050 info.si_code = CLD_DUMPED;
2051 else if (tsk->exit_code & 0x7f)
2052 info.si_code = CLD_KILLED;
2054 info.si_code = CLD_EXITED;
2055 info.si_status = tsk->exit_code >> 8;
2058 psig = tsk->parent->sighand;
2059 spin_lock_irqsave(&psig->siglock, flags);
2060 if (!tsk->ptrace && sig == SIGCHLD &&
2061 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2062 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2064 * We are exiting and our parent doesn't care. POSIX.1
2065 * defines special semantics for setting SIGCHLD to SIG_IGN
2066 * or setting the SA_NOCLDWAIT flag: we should be reaped
2067 * automatically and not left for our parent's wait4 call.
2068 * Rather than having the parent do it as a magic kind of
2069 * signal handler, we just set this to tell do_exit that we
2070 * can be cleaned up without becoming a zombie. Note that
2071 * we still call __wake_up_parent in this case, because a
2072 * blocked sys_wait4 might now return -ECHILD.
2074 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2075 * is implementation-defined: we do (if you don't want
2076 * it, just use SIG_IGN instead).
2079 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2083 * Send with __send_signal as si_pid and si_uid are in the
2084 * parent's namespaces.
2086 if (valid_signal(sig) && sig)
2087 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2088 __wake_up_parent(tsk, tsk->parent);
2089 spin_unlock_irqrestore(&psig->siglock, flags);
2095 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2096 * @tsk: task reporting the state change
2097 * @for_ptracer: the notification is for ptracer
2098 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2100 * Notify @tsk's parent that the stopped/continued state has changed. If
2101 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2102 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2105 * Must be called with tasklist_lock at least read locked.
2107 static void do_notify_parent_cldstop(struct task_struct *tsk,
2108 bool for_ptracer, int why)
2110 struct kernel_siginfo info;
2111 unsigned long flags;
2112 struct task_struct *parent;
2113 struct sighand_struct *sighand;
2117 parent = tsk->parent;
2119 tsk = tsk->group_leader;
2120 parent = tsk->real_parent;
2123 clear_siginfo(&info);
2124 info.si_signo = SIGCHLD;
2127 * see comment in do_notify_parent() about the following 4 lines
2130 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2131 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2134 task_cputime(tsk, &utime, &stime);
2135 info.si_utime = nsec_to_clock_t(utime);
2136 info.si_stime = nsec_to_clock_t(stime);
2141 info.si_status = SIGCONT;
2144 info.si_status = tsk->signal->group_exit_code & 0x7f;
2147 info.si_status = tsk->exit_code & 0x7f;
2153 sighand = parent->sighand;
2154 spin_lock_irqsave(&sighand->siglock, flags);
2155 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2156 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2157 __group_send_sig_info(SIGCHLD, &info, parent);
2159 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2161 __wake_up_parent(tsk, parent);
2162 spin_unlock_irqrestore(&sighand->siglock, flags);
2165 static inline bool may_ptrace_stop(void)
2167 if (!likely(current->ptrace))
2170 * Are we in the middle of do_coredump?
2171 * If so and our tracer is also part of the coredump stopping
2172 * is a deadlock situation, and pointless because our tracer
2173 * is dead so don't allow us to stop.
2174 * If SIGKILL was already sent before the caller unlocked
2175 * ->siglock we must see ->core_state != NULL. Otherwise it
2176 * is safe to enter schedule().
2178 * This is almost outdated, a task with the pending SIGKILL can't
2179 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2180 * after SIGKILL was already dequeued.
2182 if (unlikely(current->mm->core_state) &&
2183 unlikely(current->mm == current->parent->mm))
2191 * This must be called with current->sighand->siglock held.
2193 * This should be the path for all ptrace stops.
2194 * We always set current->last_siginfo while stopped here.
2195 * That makes it a way to test a stopped process for
2196 * being ptrace-stopped vs being job-control-stopped.
2198 * If we actually decide not to stop at all because the tracer
2199 * is gone, we keep current->exit_code unless clear_code.
2201 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2202 __releases(¤t->sighand->siglock)
2203 __acquires(¤t->sighand->siglock)
2205 bool gstop_done = false;
2207 if (arch_ptrace_stop_needed(exit_code, info)) {
2209 * The arch code has something special to do before a
2210 * ptrace stop. This is allowed to block, e.g. for faults
2211 * on user stack pages. We can't keep the siglock while
2212 * calling arch_ptrace_stop, so we must release it now.
2213 * To preserve proper semantics, we must do this before
2214 * any signal bookkeeping like checking group_stop_count.
2216 spin_unlock_irq(¤t->sighand->siglock);
2217 arch_ptrace_stop(exit_code, info);
2218 spin_lock_irq(¤t->sighand->siglock);
2222 * schedule() will not sleep if there is a pending signal that
2223 * can awaken the task.
2225 set_special_state(TASK_TRACED);
2228 * We're committing to trapping. TRACED should be visible before
2229 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2230 * Also, transition to TRACED and updates to ->jobctl should be
2231 * atomic with respect to siglock and should be done after the arch
2232 * hook as siglock is released and regrabbed across it.
2237 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2239 * set_current_state() smp_wmb();
2241 * wait_task_stopped()
2242 * task_stopped_code()
2243 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2247 current->last_siginfo = info;
2248 current->exit_code = exit_code;
2251 * If @why is CLD_STOPPED, we're trapping to participate in a group
2252 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2253 * across siglock relocks since INTERRUPT was scheduled, PENDING
2254 * could be clear now. We act as if SIGCONT is received after
2255 * TASK_TRACED is entered - ignore it.
2257 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2258 gstop_done = task_participate_group_stop(current);
2260 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2261 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2262 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2263 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2265 /* entering a trap, clear TRAPPING */
2266 task_clear_jobctl_trapping(current);
2268 spin_unlock_irq(¤t->sighand->siglock);
2269 read_lock(&tasklist_lock);
2270 if (may_ptrace_stop()) {
2272 * Notify parents of the stop.
2274 * While ptraced, there are two parents - the ptracer and
2275 * the real_parent of the group_leader. The ptracer should
2276 * know about every stop while the real parent is only
2277 * interested in the completion of group stop. The states
2278 * for the two don't interact with each other. Notify
2279 * separately unless they're gonna be duplicates.
2281 do_notify_parent_cldstop(current, true, why);
2282 if (gstop_done && ptrace_reparented(current))
2283 do_notify_parent_cldstop(current, false, why);
2285 read_unlock(&tasklist_lock);
2286 cgroup_enter_frozen();
2287 freezable_schedule();
2288 cgroup_leave_frozen(true);
2291 * By the time we got the lock, our tracer went away.
2292 * Don't drop the lock yet, another tracer may come.
2294 * If @gstop_done, the ptracer went away between group stop
2295 * completion and here. During detach, it would have set
2296 * JOBCTL_STOP_PENDING on us and we'll re-enter
2297 * TASK_STOPPED in do_signal_stop() on return, so notifying
2298 * the real parent of the group stop completion is enough.
2301 do_notify_parent_cldstop(current, false, why);
2303 /* tasklist protects us from ptrace_freeze_traced() */
2304 __set_current_state(TASK_RUNNING);
2306 current->exit_code = 0;
2307 read_unlock(&tasklist_lock);
2311 * We are back. Now reacquire the siglock before touching
2312 * last_siginfo, so that we are sure to have synchronized with
2313 * any signal-sending on another CPU that wants to examine it.
2315 spin_lock_irq(¤t->sighand->siglock);
2316 current->last_siginfo = NULL;
2318 /* LISTENING can be set only during STOP traps, clear it */
2319 current->jobctl &= ~JOBCTL_LISTENING;
2322 * Queued signals ignored us while we were stopped for tracing.
2323 * So check for any that we should take before resuming user mode.
2324 * This sets TIF_SIGPENDING, but never clears it.
2326 recalc_sigpending_tsk(current);
2329 static void ptrace_do_notify(int signr, int exit_code, int why)
2331 kernel_siginfo_t info;
2333 clear_siginfo(&info);
2334 info.si_signo = signr;
2335 info.si_code = exit_code;
2336 info.si_pid = task_pid_vnr(current);
2337 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2339 /* Let the debugger run. */
2340 ptrace_stop(exit_code, why, 1, &info);
2343 void ptrace_notify(int exit_code)
2345 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2346 if (unlikely(current->task_works))
2349 spin_lock_irq(¤t->sighand->siglock);
2350 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2351 spin_unlock_irq(¤t->sighand->siglock);
2355 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2356 * @signr: signr causing group stop if initiating
2358 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2359 * and participate in it. If already set, participate in the existing
2360 * group stop. If participated in a group stop (and thus slept), %true is
2361 * returned with siglock released.
2363 * If ptraced, this function doesn't handle stop itself. Instead,
2364 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2365 * untouched. The caller must ensure that INTERRUPT trap handling takes
2366 * places afterwards.
2369 * Must be called with @current->sighand->siglock held, which is released
2373 * %false if group stop is already cancelled or ptrace trap is scheduled.
2374 * %true if participated in group stop.
2376 static bool do_signal_stop(int signr)
2377 __releases(¤t->sighand->siglock)
2379 struct signal_struct *sig = current->signal;
2381 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2382 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2383 struct task_struct *t;
2385 /* signr will be recorded in task->jobctl for retries */
2386 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2388 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2389 unlikely(signal_group_exit(sig)))
2392 * There is no group stop already in progress. We must
2395 * While ptraced, a task may be resumed while group stop is
2396 * still in effect and then receive a stop signal and
2397 * initiate another group stop. This deviates from the
2398 * usual behavior as two consecutive stop signals can't
2399 * cause two group stops when !ptraced. That is why we
2400 * also check !task_is_stopped(t) below.
2402 * The condition can be distinguished by testing whether
2403 * SIGNAL_STOP_STOPPED is already set. Don't generate
2404 * group_exit_code in such case.
2406 * This is not necessary for SIGNAL_STOP_CONTINUED because
2407 * an intervening stop signal is required to cause two
2408 * continued events regardless of ptrace.
2410 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2411 sig->group_exit_code = signr;
2413 sig->group_stop_count = 0;
2415 if (task_set_jobctl_pending(current, signr | gstop))
2416 sig->group_stop_count++;
2419 while_each_thread(current, t) {
2421 * Setting state to TASK_STOPPED for a group
2422 * stop is always done with the siglock held,
2423 * so this check has no races.
2425 if (!task_is_stopped(t) &&
2426 task_set_jobctl_pending(t, signr | gstop)) {
2427 sig->group_stop_count++;
2428 if (likely(!(t->ptrace & PT_SEIZED)))
2429 signal_wake_up(t, 0);
2431 ptrace_trap_notify(t);
2436 if (likely(!current->ptrace)) {
2440 * If there are no other threads in the group, or if there
2441 * is a group stop in progress and we are the last to stop,
2442 * report to the parent.
2444 if (task_participate_group_stop(current))
2445 notify = CLD_STOPPED;
2447 set_special_state(TASK_STOPPED);
2448 spin_unlock_irq(¤t->sighand->siglock);
2451 * Notify the parent of the group stop completion. Because
2452 * we're not holding either the siglock or tasklist_lock
2453 * here, ptracer may attach inbetween; however, this is for
2454 * group stop and should always be delivered to the real
2455 * parent of the group leader. The new ptracer will get
2456 * its notification when this task transitions into
2460 read_lock(&tasklist_lock);
2461 do_notify_parent_cldstop(current, false, notify);
2462 read_unlock(&tasklist_lock);
2465 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2466 cgroup_enter_frozen();
2467 freezable_schedule();
2471 * While ptraced, group stop is handled by STOP trap.
2472 * Schedule it and let the caller deal with it.
2474 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2480 * do_jobctl_trap - take care of ptrace jobctl traps
2482 * When PT_SEIZED, it's used for both group stop and explicit
2483 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2484 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2485 * the stop signal; otherwise, %SIGTRAP.
2487 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2488 * number as exit_code and no siginfo.
2491 * Must be called with @current->sighand->siglock held, which may be
2492 * released and re-acquired before returning with intervening sleep.
2494 static void do_jobctl_trap(void)
2496 struct signal_struct *signal = current->signal;
2497 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2499 if (current->ptrace & PT_SEIZED) {
2500 if (!signal->group_stop_count &&
2501 !(signal->flags & SIGNAL_STOP_STOPPED))
2503 WARN_ON_ONCE(!signr);
2504 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2507 WARN_ON_ONCE(!signr);
2508 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2509 current->exit_code = 0;
2514 * do_freezer_trap - handle the freezer jobctl trap
2516 * Puts the task into frozen state, if only the task is not about to quit.
2517 * In this case it drops JOBCTL_TRAP_FREEZE.
2520 * Must be called with @current->sighand->siglock held,
2521 * which is always released before returning.
2523 static void do_freezer_trap(void)
2524 __releases(¤t->sighand->siglock)
2527 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2528 * let's make another loop to give it a chance to be handled.
2529 * In any case, we'll return back.
2531 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2532 JOBCTL_TRAP_FREEZE) {
2533 spin_unlock_irq(¤t->sighand->siglock);
2538 * Now we're sure that there is no pending fatal signal and no
2539 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2540 * immediately (if there is a non-fatal signal pending), and
2541 * put the task into sleep.
2543 __set_current_state(TASK_INTERRUPTIBLE);
2544 clear_thread_flag(TIF_SIGPENDING);
2545 spin_unlock_irq(¤t->sighand->siglock);
2546 cgroup_enter_frozen();
2547 freezable_schedule();
2550 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2553 * We do not check sig_kernel_stop(signr) but set this marker
2554 * unconditionally because we do not know whether debugger will
2555 * change signr. This flag has no meaning unless we are going
2556 * to stop after return from ptrace_stop(). In this case it will
2557 * be checked in do_signal_stop(), we should only stop if it was
2558 * not cleared by SIGCONT while we were sleeping. See also the
2559 * comment in dequeue_signal().
2561 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2562 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2564 /* We're back. Did the debugger cancel the sig? */
2565 signr = current->exit_code;
2569 current->exit_code = 0;
2572 * Update the siginfo structure if the signal has
2573 * changed. If the debugger wanted something
2574 * specific in the siginfo structure then it should
2575 * have updated *info via PTRACE_SETSIGINFO.
2577 if (signr != info->si_signo) {
2578 clear_siginfo(info);
2579 info->si_signo = signr;
2581 info->si_code = SI_USER;
2583 info->si_pid = task_pid_vnr(current->parent);
2584 info->si_uid = from_kuid_munged(current_user_ns(),
2585 task_uid(current->parent));
2589 /* If the (new) signal is now blocked, requeue it. */
2590 if (sigismember(¤t->blocked, signr)) {
2591 send_signal(signr, info, current, PIDTYPE_PID);
2598 bool get_signal(struct ksignal *ksig)
2600 struct sighand_struct *sighand = current->sighand;
2601 struct signal_struct *signal = current->signal;
2604 if (unlikely(uprobe_deny_signal()))
2608 * Do this once, we can't return to user-mode if freezing() == T.
2609 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2610 * thus do not need another check after return.
2615 spin_lock_irq(&sighand->siglock);
2617 * Make sure we can safely read ->jobctl() in task_work add. As Oleg
2620 * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we
2623 * task_work_add: get_signal:
2624 * STORE(task->task_works, new_work); STORE(task->jobctl);
2626 * LOAD(task->jobctl); LOAD(task->task_works);
2628 * and we can rely on STORE-MB-LOAD [ in task_work_add].
2630 smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK);
2631 if (unlikely(current->task_works)) {
2632 spin_unlock_irq(&sighand->siglock);
2638 * Every stopped thread goes here after wakeup. Check to see if
2639 * we should notify the parent, prepare_signal(SIGCONT) encodes
2640 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2642 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2645 if (signal->flags & SIGNAL_CLD_CONTINUED)
2646 why = CLD_CONTINUED;
2650 signal->flags &= ~SIGNAL_CLD_MASK;
2652 spin_unlock_irq(&sighand->siglock);
2655 * Notify the parent that we're continuing. This event is
2656 * always per-process and doesn't make whole lot of sense
2657 * for ptracers, who shouldn't consume the state via
2658 * wait(2) either, but, for backward compatibility, notify
2659 * the ptracer of the group leader too unless it's gonna be
2662 read_lock(&tasklist_lock);
2663 do_notify_parent_cldstop(current, false, why);
2665 if (ptrace_reparented(current->group_leader))
2666 do_notify_parent_cldstop(current->group_leader,
2668 read_unlock(&tasklist_lock);
2673 /* Has this task already been marked for death? */
2674 if (signal_group_exit(signal)) {
2675 ksig->info.si_signo = signr = SIGKILL;
2676 sigdelset(¤t->pending.signal, SIGKILL);
2677 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2678 &sighand->action[SIGKILL - 1]);
2679 recalc_sigpending();
2684 struct k_sigaction *ka;
2686 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2690 if (unlikely(current->jobctl &
2691 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2692 if (current->jobctl & JOBCTL_TRAP_MASK) {
2694 spin_unlock_irq(&sighand->siglock);
2695 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2702 * If the task is leaving the frozen state, let's update
2703 * cgroup counters and reset the frozen bit.
2705 if (unlikely(cgroup_task_frozen(current))) {
2706 spin_unlock_irq(&sighand->siglock);
2707 cgroup_leave_frozen(false);
2712 * Signals generated by the execution of an instruction
2713 * need to be delivered before any other pending signals
2714 * so that the instruction pointer in the signal stack
2715 * frame points to the faulting instruction.
2717 signr = dequeue_synchronous_signal(&ksig->info);
2719 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2722 break; /* will return 0 */
2724 if (unlikely(current->ptrace) && signr != SIGKILL) {
2725 signr = ptrace_signal(signr, &ksig->info);
2730 ka = &sighand->action[signr-1];
2732 /* Trace actually delivered signals. */
2733 trace_signal_deliver(signr, &ksig->info, ka);
2735 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2737 if (ka->sa.sa_handler != SIG_DFL) {
2738 /* Run the handler. */
2741 if (ka->sa.sa_flags & SA_ONESHOT)
2742 ka->sa.sa_handler = SIG_DFL;
2744 break; /* will return non-zero "signr" value */
2748 * Now we are doing the default action for this signal.
2750 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2754 * Global init gets no signals it doesn't want.
2755 * Container-init gets no signals it doesn't want from same
2758 * Note that if global/container-init sees a sig_kernel_only()
2759 * signal here, the signal must have been generated internally
2760 * or must have come from an ancestor namespace. In either
2761 * case, the signal cannot be dropped.
2763 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2764 !sig_kernel_only(signr))
2767 if (sig_kernel_stop(signr)) {
2769 * The default action is to stop all threads in
2770 * the thread group. The job control signals
2771 * do nothing in an orphaned pgrp, but SIGSTOP
2772 * always works. Note that siglock needs to be
2773 * dropped during the call to is_orphaned_pgrp()
2774 * because of lock ordering with tasklist_lock.
2775 * This allows an intervening SIGCONT to be posted.
2776 * We need to check for that and bail out if necessary.
2778 if (signr != SIGSTOP) {
2779 spin_unlock_irq(&sighand->siglock);
2781 /* signals can be posted during this window */
2783 if (is_current_pgrp_orphaned())
2786 spin_lock_irq(&sighand->siglock);
2789 if (likely(do_signal_stop(ksig->info.si_signo))) {
2790 /* It released the siglock. */
2795 * We didn't actually stop, due to a race
2796 * with SIGCONT or something like that.
2802 spin_unlock_irq(&sighand->siglock);
2803 if (unlikely(cgroup_task_frozen(current)))
2804 cgroup_leave_frozen(true);
2807 * Anything else is fatal, maybe with a core dump.
2809 current->flags |= PF_SIGNALED;
2811 if (sig_kernel_coredump(signr)) {
2812 if (print_fatal_signals)
2813 print_fatal_signal(ksig->info.si_signo);
2814 proc_coredump_connector(current);
2816 * If it was able to dump core, this kills all
2817 * other threads in the group and synchronizes with
2818 * their demise. If we lost the race with another
2819 * thread getting here, it set group_exit_code
2820 * first and our do_group_exit call below will use
2821 * that value and ignore the one we pass it.
2823 do_coredump(&ksig->info);
2827 * Death signals, no core dump.
2829 do_group_exit(ksig->info.si_signo);
2832 spin_unlock_irq(&sighand->siglock);
2835 return ksig->sig > 0;
2839 * signal_delivered -
2840 * @ksig: kernel signal struct
2841 * @stepping: nonzero if debugger single-step or block-step in use
2843 * This function should be called when a signal has successfully been
2844 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2845 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2846 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2848 static void signal_delivered(struct ksignal *ksig, int stepping)
2852 /* A signal was successfully delivered, and the
2853 saved sigmask was stored on the signal frame,
2854 and will be restored by sigreturn. So we can
2855 simply clear the restore sigmask flag. */
2856 clear_restore_sigmask();
2858 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2859 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2860 sigaddset(&blocked, ksig->sig);
2861 set_current_blocked(&blocked);
2862 tracehook_signal_handler(stepping);
2865 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2868 force_sigsegv(ksig->sig);
2870 signal_delivered(ksig, stepping);
2874 * It could be that complete_signal() picked us to notify about the
2875 * group-wide signal. Other threads should be notified now to take
2876 * the shared signals in @which since we will not.
2878 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2881 struct task_struct *t;
2883 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2884 if (sigisemptyset(&retarget))
2888 while_each_thread(tsk, t) {
2889 if (t->flags & PF_EXITING)
2892 if (!has_pending_signals(&retarget, &t->blocked))
2894 /* Remove the signals this thread can handle. */
2895 sigandsets(&retarget, &retarget, &t->blocked);
2897 if (!signal_pending(t))
2898 signal_wake_up(t, 0);
2900 if (sigisemptyset(&retarget))
2905 void exit_signals(struct task_struct *tsk)
2911 * @tsk is about to have PF_EXITING set - lock out users which
2912 * expect stable threadgroup.
2914 cgroup_threadgroup_change_begin(tsk);
2916 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2917 tsk->flags |= PF_EXITING;
2918 cgroup_threadgroup_change_end(tsk);
2922 spin_lock_irq(&tsk->sighand->siglock);
2924 * From now this task is not visible for group-wide signals,
2925 * see wants_signal(), do_signal_stop().
2927 tsk->flags |= PF_EXITING;
2929 cgroup_threadgroup_change_end(tsk);
2931 if (!signal_pending(tsk))
2934 unblocked = tsk->blocked;
2935 signotset(&unblocked);
2936 retarget_shared_pending(tsk, &unblocked);
2938 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2939 task_participate_group_stop(tsk))
2940 group_stop = CLD_STOPPED;
2942 spin_unlock_irq(&tsk->sighand->siglock);
2945 * If group stop has completed, deliver the notification. This
2946 * should always go to the real parent of the group leader.
2948 if (unlikely(group_stop)) {
2949 read_lock(&tasklist_lock);
2950 do_notify_parent_cldstop(tsk, false, group_stop);
2951 read_unlock(&tasklist_lock);
2956 * System call entry points.
2960 * sys_restart_syscall - restart a system call
2962 SYSCALL_DEFINE0(restart_syscall)
2964 struct restart_block *restart = ¤t->restart_block;
2965 return restart->fn(restart);
2968 long do_no_restart_syscall(struct restart_block *param)
2973 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2975 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2976 sigset_t newblocked;
2977 /* A set of now blocked but previously unblocked signals. */
2978 sigandnsets(&newblocked, newset, ¤t->blocked);
2979 retarget_shared_pending(tsk, &newblocked);
2981 tsk->blocked = *newset;
2982 recalc_sigpending();
2986 * set_current_blocked - change current->blocked mask
2989 * It is wrong to change ->blocked directly, this helper should be used
2990 * to ensure the process can't miss a shared signal we are going to block.
2992 void set_current_blocked(sigset_t *newset)
2994 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2995 __set_current_blocked(newset);
2998 void __set_current_blocked(const sigset_t *newset)
3000 struct task_struct *tsk = current;
3003 * In case the signal mask hasn't changed, there is nothing we need
3004 * to do. The current->blocked shouldn't be modified by other task.
3006 if (sigequalsets(&tsk->blocked, newset))
3009 spin_lock_irq(&tsk->sighand->siglock);
3010 __set_task_blocked(tsk, newset);
3011 spin_unlock_irq(&tsk->sighand->siglock);
3015 * This is also useful for kernel threads that want to temporarily
3016 * (or permanently) block certain signals.
3018 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3019 * interface happily blocks "unblockable" signals like SIGKILL
3022 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3024 struct task_struct *tsk = current;
3027 /* Lockless, only current can change ->blocked, never from irq */
3029 *oldset = tsk->blocked;
3033 sigorsets(&newset, &tsk->blocked, set);
3036 sigandnsets(&newset, &tsk->blocked, set);
3045 __set_current_blocked(&newset);
3048 EXPORT_SYMBOL(sigprocmask);
3051 * The api helps set app-provided sigmasks.
3053 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3054 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3056 * Note that it does set_restore_sigmask() in advance, so it must be always
3057 * paired with restore_saved_sigmask_unless() before return from syscall.
3059 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3065 if (sigsetsize != sizeof(sigset_t))
3067 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3070 set_restore_sigmask();
3071 current->saved_sigmask = current->blocked;
3072 set_current_blocked(&kmask);
3077 #ifdef CONFIG_COMPAT
3078 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3085 if (sigsetsize != sizeof(compat_sigset_t))
3087 if (get_compat_sigset(&kmask, umask))
3090 set_restore_sigmask();
3091 current->saved_sigmask = current->blocked;
3092 set_current_blocked(&kmask);
3099 * sys_rt_sigprocmask - change the list of currently blocked signals
3100 * @how: whether to add, remove, or set signals
3101 * @nset: stores pending signals
3102 * @oset: previous value of signal mask if non-null
3103 * @sigsetsize: size of sigset_t type
3105 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3106 sigset_t __user *, oset, size_t, sigsetsize)
3108 sigset_t old_set, new_set;
3111 /* XXX: Don't preclude handling different sized sigset_t's. */
3112 if (sigsetsize != sizeof(sigset_t))
3115 old_set = current->blocked;
3118 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3120 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3122 error = sigprocmask(how, &new_set, NULL);
3128 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3135 #ifdef CONFIG_COMPAT
3136 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3137 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3139 sigset_t old_set = current->blocked;
3141 /* XXX: Don't preclude handling different sized sigset_t's. */
3142 if (sigsetsize != sizeof(sigset_t))
3148 if (get_compat_sigset(&new_set, nset))
3150 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3152 error = sigprocmask(how, &new_set, NULL);
3156 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3160 static void do_sigpending(sigset_t *set)
3162 spin_lock_irq(¤t->sighand->siglock);
3163 sigorsets(set, ¤t->pending.signal,
3164 ¤t->signal->shared_pending.signal);
3165 spin_unlock_irq(¤t->sighand->siglock);
3167 /* Outside the lock because only this thread touches it. */
3168 sigandsets(set, ¤t->blocked, set);
3172 * sys_rt_sigpending - examine a pending signal that has been raised
3174 * @uset: stores pending signals
3175 * @sigsetsize: size of sigset_t type or larger
3177 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3181 if (sigsetsize > sizeof(*uset))
3184 do_sigpending(&set);
3186 if (copy_to_user(uset, &set, sigsetsize))
3192 #ifdef CONFIG_COMPAT
3193 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3194 compat_size_t, sigsetsize)
3198 if (sigsetsize > sizeof(*uset))
3201 do_sigpending(&set);
3203 return put_compat_sigset(uset, &set, sigsetsize);
3207 static const struct {
3208 unsigned char limit, layout;
3210 [SIGILL] = { NSIGILL, SIL_FAULT },
3211 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3212 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3213 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3214 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3216 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3218 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3219 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3220 [SIGSYS] = { NSIGSYS, SIL_SYS },
3223 static bool known_siginfo_layout(unsigned sig, int si_code)
3225 if (si_code == SI_KERNEL)
3227 else if ((si_code > SI_USER)) {
3228 if (sig_specific_sicodes(sig)) {
3229 if (si_code <= sig_sicodes[sig].limit)
3232 else if (si_code <= NSIGPOLL)
3235 else if (si_code >= SI_DETHREAD)
3237 else if (si_code == SI_ASYNCNL)
3242 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3244 enum siginfo_layout layout = SIL_KILL;
3245 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3246 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3247 (si_code <= sig_sicodes[sig].limit)) {
3248 layout = sig_sicodes[sig].layout;
3249 /* Handle the exceptions */
3250 if ((sig == SIGBUS) &&
3251 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3252 layout = SIL_FAULT_MCEERR;
3253 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3254 layout = SIL_FAULT_BNDERR;
3256 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3257 layout = SIL_FAULT_PKUERR;
3260 else if (si_code <= NSIGPOLL)
3263 if (si_code == SI_TIMER)
3265 else if (si_code == SI_SIGIO)
3267 else if (si_code < 0)
3273 static inline char __user *si_expansion(const siginfo_t __user *info)
3275 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3278 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3280 char __user *expansion = si_expansion(to);
3281 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3283 if (clear_user(expansion, SI_EXPANSION_SIZE))
3288 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3289 const siginfo_t __user *from)
3291 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3292 char __user *expansion = si_expansion(from);
3293 char buf[SI_EXPANSION_SIZE];
3296 * An unknown si_code might need more than
3297 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3298 * extra bytes are 0. This guarantees copy_siginfo_to_user
3299 * will return this data to userspace exactly.
3301 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3303 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3311 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3312 const siginfo_t __user *from)
3314 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3316 to->si_signo = signo;
3317 return post_copy_siginfo_from_user(to, from);
3320 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3322 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3324 return post_copy_siginfo_from_user(to, from);
3327 #ifdef CONFIG_COMPAT
3329 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3330 * @to: compat siginfo destination
3331 * @from: kernel siginfo source
3333 * Note: This function does not work properly for the SIGCHLD on x32, but
3334 * fortunately it doesn't have to. The only valid callers for this function are
3335 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3336 * The latter does not care because SIGCHLD will never cause a coredump.
3338 void copy_siginfo_to_external32(struct compat_siginfo *to,
3339 const struct kernel_siginfo *from)
3341 memset(to, 0, sizeof(*to));
3343 to->si_signo = from->si_signo;
3344 to->si_errno = from->si_errno;
3345 to->si_code = from->si_code;
3346 switch(siginfo_layout(from->si_signo, from->si_code)) {
3348 to->si_pid = from->si_pid;
3349 to->si_uid = from->si_uid;
3352 to->si_tid = from->si_tid;
3353 to->si_overrun = from->si_overrun;
3354 to->si_int = from->si_int;
3357 to->si_band = from->si_band;
3358 to->si_fd = from->si_fd;
3361 to->si_addr = ptr_to_compat(from->si_addr);
3362 #ifdef __ARCH_SI_TRAPNO
3363 to->si_trapno = from->si_trapno;
3366 case SIL_FAULT_MCEERR:
3367 to->si_addr = ptr_to_compat(from->si_addr);
3368 #ifdef __ARCH_SI_TRAPNO
3369 to->si_trapno = from->si_trapno;
3371 to->si_addr_lsb = from->si_addr_lsb;
3373 case SIL_FAULT_BNDERR:
3374 to->si_addr = ptr_to_compat(from->si_addr);
3375 #ifdef __ARCH_SI_TRAPNO
3376 to->si_trapno = from->si_trapno;
3378 to->si_lower = ptr_to_compat(from->si_lower);
3379 to->si_upper = ptr_to_compat(from->si_upper);
3381 case SIL_FAULT_PKUERR:
3382 to->si_addr = ptr_to_compat(from->si_addr);
3383 #ifdef __ARCH_SI_TRAPNO
3384 to->si_trapno = from->si_trapno;
3386 to->si_pkey = from->si_pkey;
3389 to->si_pid = from->si_pid;
3390 to->si_uid = from->si_uid;
3391 to->si_status = from->si_status;
3392 to->si_utime = from->si_utime;
3393 to->si_stime = from->si_stime;
3396 to->si_pid = from->si_pid;
3397 to->si_uid = from->si_uid;
3398 to->si_int = from->si_int;
3401 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3402 to->si_syscall = from->si_syscall;
3403 to->si_arch = from->si_arch;
3408 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3409 const struct kernel_siginfo *from)
3411 struct compat_siginfo new;
3413 copy_siginfo_to_external32(&new, from);
3414 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3419 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3420 const struct compat_siginfo *from)
3423 to->si_signo = from->si_signo;
3424 to->si_errno = from->si_errno;
3425 to->si_code = from->si_code;
3426 switch(siginfo_layout(from->si_signo, from->si_code)) {
3428 to->si_pid = from->si_pid;
3429 to->si_uid = from->si_uid;
3432 to->si_tid = from->si_tid;
3433 to->si_overrun = from->si_overrun;
3434 to->si_int = from->si_int;
3437 to->si_band = from->si_band;
3438 to->si_fd = from->si_fd;
3441 to->si_addr = compat_ptr(from->si_addr);
3442 #ifdef __ARCH_SI_TRAPNO
3443 to->si_trapno = from->si_trapno;
3446 case SIL_FAULT_MCEERR:
3447 to->si_addr = compat_ptr(from->si_addr);
3448 #ifdef __ARCH_SI_TRAPNO
3449 to->si_trapno = from->si_trapno;
3451 to->si_addr_lsb = from->si_addr_lsb;
3453 case SIL_FAULT_BNDERR:
3454 to->si_addr = compat_ptr(from->si_addr);
3455 #ifdef __ARCH_SI_TRAPNO
3456 to->si_trapno = from->si_trapno;
3458 to->si_lower = compat_ptr(from->si_lower);
3459 to->si_upper = compat_ptr(from->si_upper);
3461 case SIL_FAULT_PKUERR:
3462 to->si_addr = compat_ptr(from->si_addr);
3463 #ifdef __ARCH_SI_TRAPNO
3464 to->si_trapno = from->si_trapno;
3466 to->si_pkey = from->si_pkey;
3469 to->si_pid = from->si_pid;
3470 to->si_uid = from->si_uid;
3471 to->si_status = from->si_status;
3472 #ifdef CONFIG_X86_X32_ABI
3473 if (in_x32_syscall()) {
3474 to->si_utime = from->_sifields._sigchld_x32._utime;
3475 to->si_stime = from->_sifields._sigchld_x32._stime;
3479 to->si_utime = from->si_utime;
3480 to->si_stime = from->si_stime;
3484 to->si_pid = from->si_pid;
3485 to->si_uid = from->si_uid;
3486 to->si_int = from->si_int;
3489 to->si_call_addr = compat_ptr(from->si_call_addr);
3490 to->si_syscall = from->si_syscall;
3491 to->si_arch = from->si_arch;
3497 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3498 const struct compat_siginfo __user *ufrom)
3500 struct compat_siginfo from;
3502 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3505 from.si_signo = signo;
3506 return post_copy_siginfo_from_user32(to, &from);
3509 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3510 const struct compat_siginfo __user *ufrom)
3512 struct compat_siginfo from;
3514 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3517 return post_copy_siginfo_from_user32(to, &from);
3519 #endif /* CONFIG_COMPAT */
3522 * do_sigtimedwait - wait for queued signals specified in @which
3523 * @which: queued signals to wait for
3524 * @info: if non-null, the signal's siginfo is returned here
3525 * @ts: upper bound on process time suspension
3527 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3528 const struct timespec64 *ts)
3530 ktime_t *to = NULL, timeout = KTIME_MAX;
3531 struct task_struct *tsk = current;
3532 sigset_t mask = *which;
3536 if (!timespec64_valid(ts))
3538 timeout = timespec64_to_ktime(*ts);
3543 * Invert the set of allowed signals to get those we want to block.
3545 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3548 spin_lock_irq(&tsk->sighand->siglock);
3549 sig = dequeue_signal(tsk, &mask, info);
3550 if (!sig && timeout) {
3552 * None ready, temporarily unblock those we're interested
3553 * while we are sleeping in so that we'll be awakened when
3554 * they arrive. Unblocking is always fine, we can avoid
3555 * set_current_blocked().
3557 tsk->real_blocked = tsk->blocked;
3558 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3559 recalc_sigpending();
3560 spin_unlock_irq(&tsk->sighand->siglock);
3562 __set_current_state(TASK_INTERRUPTIBLE);
3563 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3565 spin_lock_irq(&tsk->sighand->siglock);
3566 __set_task_blocked(tsk, &tsk->real_blocked);
3567 sigemptyset(&tsk->real_blocked);
3568 sig = dequeue_signal(tsk, &mask, info);
3570 spin_unlock_irq(&tsk->sighand->siglock);
3574 return ret ? -EINTR : -EAGAIN;
3578 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3580 * @uthese: queued signals to wait for
3581 * @uinfo: if non-null, the signal's siginfo is returned here
3582 * @uts: upper bound on process time suspension
3583 * @sigsetsize: size of sigset_t type
3585 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3586 siginfo_t __user *, uinfo,
3587 const struct __kernel_timespec __user *, uts,
3591 struct timespec64 ts;
3592 kernel_siginfo_t info;
3595 /* XXX: Don't preclude handling different sized sigset_t's. */
3596 if (sigsetsize != sizeof(sigset_t))
3599 if (copy_from_user(&these, uthese, sizeof(these)))
3603 if (get_timespec64(&ts, uts))
3607 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3609 if (ret > 0 && uinfo) {
3610 if (copy_siginfo_to_user(uinfo, &info))
3617 #ifdef CONFIG_COMPAT_32BIT_TIME
3618 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3619 siginfo_t __user *, uinfo,
3620 const struct old_timespec32 __user *, uts,
3624 struct timespec64 ts;
3625 kernel_siginfo_t info;
3628 if (sigsetsize != sizeof(sigset_t))
3631 if (copy_from_user(&these, uthese, sizeof(these)))
3635 if (get_old_timespec32(&ts, uts))
3639 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3641 if (ret > 0 && uinfo) {
3642 if (copy_siginfo_to_user(uinfo, &info))
3650 #ifdef CONFIG_COMPAT
3651 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3652 struct compat_siginfo __user *, uinfo,
3653 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3656 struct timespec64 t;
3657 kernel_siginfo_t info;
3660 if (sigsetsize != sizeof(sigset_t))
3663 if (get_compat_sigset(&s, uthese))
3667 if (get_timespec64(&t, uts))
3671 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3673 if (ret > 0 && uinfo) {
3674 if (copy_siginfo_to_user32(uinfo, &info))
3681 #ifdef CONFIG_COMPAT_32BIT_TIME
3682 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3683 struct compat_siginfo __user *, uinfo,
3684 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3687 struct timespec64 t;
3688 kernel_siginfo_t info;
3691 if (sigsetsize != sizeof(sigset_t))
3694 if (get_compat_sigset(&s, uthese))
3698 if (get_old_timespec32(&t, uts))
3702 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3704 if (ret > 0 && uinfo) {
3705 if (copy_siginfo_to_user32(uinfo, &info))
3714 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3716 clear_siginfo(info);
3717 info->si_signo = sig;
3719 info->si_code = SI_USER;
3720 info->si_pid = task_tgid_vnr(current);
3721 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3725 * sys_kill - send a signal to a process
3726 * @pid: the PID of the process
3727 * @sig: signal to be sent
3729 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3731 struct kernel_siginfo info;
3733 prepare_kill_siginfo(sig, &info);
3735 return kill_something_info(sig, &info, pid);
3739 * Verify that the signaler and signalee either are in the same pid namespace
3740 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3743 static bool access_pidfd_pidns(struct pid *pid)
3745 struct pid_namespace *active = task_active_pid_ns(current);
3746 struct pid_namespace *p = ns_of_pid(pid);
3759 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3761 #ifdef CONFIG_COMPAT
3763 * Avoid hooking up compat syscalls and instead handle necessary
3764 * conversions here. Note, this is a stop-gap measure and should not be
3765 * considered a generic solution.
3767 if (in_compat_syscall())
3768 return copy_siginfo_from_user32(
3769 kinfo, (struct compat_siginfo __user *)info);
3771 return copy_siginfo_from_user(kinfo, info);
3774 static struct pid *pidfd_to_pid(const struct file *file)
3778 pid = pidfd_pid(file);
3782 return tgid_pidfd_to_pid(file);
3786 * sys_pidfd_send_signal - Signal a process through a pidfd
3787 * @pidfd: file descriptor of the process
3788 * @sig: signal to send
3789 * @info: signal info
3790 * @flags: future flags
3792 * The syscall currently only signals via PIDTYPE_PID which covers
3793 * kill(<positive-pid>, <signal>. It does not signal threads or process
3795 * In order to extend the syscall to threads and process groups the @flags
3796 * argument should be used. In essence, the @flags argument will determine
3797 * what is signaled and not the file descriptor itself. Put in other words,
3798 * grouping is a property of the flags argument not a property of the file
3801 * Return: 0 on success, negative errno on failure
3803 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3804 siginfo_t __user *, info, unsigned int, flags)
3809 kernel_siginfo_t kinfo;
3811 /* Enforce flags be set to 0 until we add an extension. */
3819 /* Is this a pidfd? */
3820 pid = pidfd_to_pid(f.file);
3827 if (!access_pidfd_pidns(pid))
3831 ret = copy_siginfo_from_user_any(&kinfo, info);
3836 if (unlikely(sig != kinfo.si_signo))
3839 /* Only allow sending arbitrary signals to yourself. */
3841 if ((task_pid(current) != pid) &&
3842 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3845 prepare_kill_siginfo(sig, &kinfo);
3848 ret = kill_pid_info(sig, &kinfo, pid);
3856 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3858 struct task_struct *p;
3862 p = find_task_by_vpid(pid);
3863 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3864 error = check_kill_permission(sig, info, p);
3866 * The null signal is a permissions and process existence
3867 * probe. No signal is actually delivered.
3869 if (!error && sig) {
3870 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3872 * If lock_task_sighand() failed we pretend the task
3873 * dies after receiving the signal. The window is tiny,
3874 * and the signal is private anyway.
3876 if (unlikely(error == -ESRCH))
3885 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3887 struct kernel_siginfo info;
3889 clear_siginfo(&info);
3890 info.si_signo = sig;
3892 info.si_code = SI_TKILL;
3893 info.si_pid = task_tgid_vnr(current);
3894 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3896 return do_send_specific(tgid, pid, sig, &info);
3900 * sys_tgkill - send signal to one specific thread
3901 * @tgid: the thread group ID of the thread
3902 * @pid: the PID of the thread
3903 * @sig: signal to be sent
3905 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3906 * exists but it's not belonging to the target process anymore. This
3907 * method solves the problem of threads exiting and PIDs getting reused.
3909 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3911 /* This is only valid for single tasks */
3912 if (pid <= 0 || tgid <= 0)
3915 return do_tkill(tgid, pid, sig);
3919 * sys_tkill - send signal to one specific task
3920 * @pid: the PID of the task
3921 * @sig: signal to be sent
3923 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3925 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3927 /* This is only valid for single tasks */
3931 return do_tkill(0, pid, sig);
3934 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3936 /* Not even root can pretend to send signals from the kernel.
3937 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3939 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3940 (task_pid_vnr(current) != pid))
3943 /* POSIX.1b doesn't mention process groups. */
3944 return kill_proc_info(sig, info, pid);
3948 * sys_rt_sigqueueinfo - send signal information to a signal
3949 * @pid: the PID of the thread
3950 * @sig: signal to be sent
3951 * @uinfo: signal info to be sent
3953 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3954 siginfo_t __user *, uinfo)
3956 kernel_siginfo_t info;
3957 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3960 return do_rt_sigqueueinfo(pid, sig, &info);
3963 #ifdef CONFIG_COMPAT
3964 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3967 struct compat_siginfo __user *, uinfo)
3969 kernel_siginfo_t info;
3970 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3973 return do_rt_sigqueueinfo(pid, sig, &info);
3977 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3979 /* This is only valid for single tasks */
3980 if (pid <= 0 || tgid <= 0)
3983 /* Not even root can pretend to send signals from the kernel.
3984 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3986 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3987 (task_pid_vnr(current) != pid))
3990 return do_send_specific(tgid, pid, sig, info);
3993 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3994 siginfo_t __user *, uinfo)
3996 kernel_siginfo_t info;
3997 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4000 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4003 #ifdef CONFIG_COMPAT
4004 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4008 struct compat_siginfo __user *, uinfo)
4010 kernel_siginfo_t info;
4011 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4014 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4019 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4021 void kernel_sigaction(int sig, __sighandler_t action)
4023 spin_lock_irq(¤t->sighand->siglock);
4024 current->sighand->action[sig - 1].sa.sa_handler = action;
4025 if (action == SIG_IGN) {
4029 sigaddset(&mask, sig);
4031 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4032 flush_sigqueue_mask(&mask, ¤t->pending);
4033 recalc_sigpending();
4035 spin_unlock_irq(¤t->sighand->siglock);
4037 EXPORT_SYMBOL(kernel_sigaction);
4039 void __weak sigaction_compat_abi(struct k_sigaction *act,
4040 struct k_sigaction *oact)
4044 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4046 struct task_struct *p = current, *t;
4047 struct k_sigaction *k;
4050 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4053 k = &p->sighand->action[sig-1];
4055 spin_lock_irq(&p->sighand->siglock);
4059 sigaction_compat_abi(act, oact);
4062 sigdelsetmask(&act->sa.sa_mask,
4063 sigmask(SIGKILL) | sigmask(SIGSTOP));
4067 * "Setting a signal action to SIG_IGN for a signal that is
4068 * pending shall cause the pending signal to be discarded,
4069 * whether or not it is blocked."
4071 * "Setting a signal action to SIG_DFL for a signal that is
4072 * pending and whose default action is to ignore the signal
4073 * (for example, SIGCHLD), shall cause the pending signal to
4074 * be discarded, whether or not it is blocked"
4076 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4078 sigaddset(&mask, sig);
4079 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4080 for_each_thread(p, t)
4081 flush_sigqueue_mask(&mask, &t->pending);
4085 spin_unlock_irq(&p->sighand->siglock);
4090 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4093 struct task_struct *t = current;
4096 memset(oss, 0, sizeof(stack_t));
4097 oss->ss_sp = (void __user *) t->sas_ss_sp;
4098 oss->ss_size = t->sas_ss_size;
4099 oss->ss_flags = sas_ss_flags(sp) |
4100 (current->sas_ss_flags & SS_FLAG_BITS);
4104 void __user *ss_sp = ss->ss_sp;
4105 size_t ss_size = ss->ss_size;
4106 unsigned ss_flags = ss->ss_flags;
4109 if (unlikely(on_sig_stack(sp)))
4112 ss_mode = ss_flags & ~SS_FLAG_BITS;
4113 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4117 if (ss_mode == SS_DISABLE) {
4121 if (unlikely(ss_size < min_ss_size))
4125 t->sas_ss_sp = (unsigned long) ss_sp;
4126 t->sas_ss_size = ss_size;
4127 t->sas_ss_flags = ss_flags;
4132 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4136 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4138 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4139 current_user_stack_pointer(),
4141 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4146 int restore_altstack(const stack_t __user *uss)
4149 if (copy_from_user(&new, uss, sizeof(stack_t)))
4151 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4153 /* squash all but EFAULT for now */
4157 int __save_altstack(stack_t __user *uss, unsigned long sp)
4159 struct task_struct *t = current;
4160 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4161 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4162 __put_user(t->sas_ss_size, &uss->ss_size);
4165 if (t->sas_ss_flags & SS_AUTODISARM)
4170 #ifdef CONFIG_COMPAT
4171 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4172 compat_stack_t __user *uoss_ptr)
4178 compat_stack_t uss32;
4179 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4181 uss.ss_sp = compat_ptr(uss32.ss_sp);
4182 uss.ss_flags = uss32.ss_flags;
4183 uss.ss_size = uss32.ss_size;
4185 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4186 compat_user_stack_pointer(),
4187 COMPAT_MINSIGSTKSZ);
4188 if (ret >= 0 && uoss_ptr) {
4190 memset(&old, 0, sizeof(old));
4191 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4192 old.ss_flags = uoss.ss_flags;
4193 old.ss_size = uoss.ss_size;
4194 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4200 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4201 const compat_stack_t __user *, uss_ptr,
4202 compat_stack_t __user *, uoss_ptr)
4204 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4207 int compat_restore_altstack(const compat_stack_t __user *uss)
4209 int err = do_compat_sigaltstack(uss, NULL);
4210 /* squash all but -EFAULT for now */
4211 return err == -EFAULT ? err : 0;
4214 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4217 struct task_struct *t = current;
4218 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4220 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4221 __put_user(t->sas_ss_size, &uss->ss_size);
4224 if (t->sas_ss_flags & SS_AUTODISARM)
4230 #ifdef __ARCH_WANT_SYS_SIGPENDING
4233 * sys_sigpending - examine pending signals
4234 * @uset: where mask of pending signal is returned
4236 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4240 if (sizeof(old_sigset_t) > sizeof(*uset))
4243 do_sigpending(&set);
4245 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4251 #ifdef CONFIG_COMPAT
4252 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4256 do_sigpending(&set);
4258 return put_user(set.sig[0], set32);
4264 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4266 * sys_sigprocmask - examine and change blocked signals
4267 * @how: whether to add, remove, or set signals
4268 * @nset: signals to add or remove (if non-null)
4269 * @oset: previous value of signal mask if non-null
4271 * Some platforms have their own version with special arguments;
4272 * others support only sys_rt_sigprocmask.
4275 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4276 old_sigset_t __user *, oset)
4278 old_sigset_t old_set, new_set;
4279 sigset_t new_blocked;
4281 old_set = current->blocked.sig[0];
4284 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4287 new_blocked = current->blocked;
4291 sigaddsetmask(&new_blocked, new_set);
4294 sigdelsetmask(&new_blocked, new_set);
4297 new_blocked.sig[0] = new_set;
4303 set_current_blocked(&new_blocked);
4307 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4313 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4315 #ifndef CONFIG_ODD_RT_SIGACTION
4317 * sys_rt_sigaction - alter an action taken by a process
4318 * @sig: signal to be sent
4319 * @act: new sigaction
4320 * @oact: used to save the previous sigaction
4321 * @sigsetsize: size of sigset_t type
4323 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4324 const struct sigaction __user *, act,
4325 struct sigaction __user *, oact,
4328 struct k_sigaction new_sa, old_sa;
4331 /* XXX: Don't preclude handling different sized sigset_t's. */
4332 if (sigsetsize != sizeof(sigset_t))
4335 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4338 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4342 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4347 #ifdef CONFIG_COMPAT
4348 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4349 const struct compat_sigaction __user *, act,
4350 struct compat_sigaction __user *, oact,
4351 compat_size_t, sigsetsize)
4353 struct k_sigaction new_ka, old_ka;
4354 #ifdef __ARCH_HAS_SA_RESTORER
4355 compat_uptr_t restorer;
4359 /* XXX: Don't preclude handling different sized sigset_t's. */
4360 if (sigsetsize != sizeof(compat_sigset_t))
4364 compat_uptr_t handler;
4365 ret = get_user(handler, &act->sa_handler);
4366 new_ka.sa.sa_handler = compat_ptr(handler);
4367 #ifdef __ARCH_HAS_SA_RESTORER
4368 ret |= get_user(restorer, &act->sa_restorer);
4369 new_ka.sa.sa_restorer = compat_ptr(restorer);
4371 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4372 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4377 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4379 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4381 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4382 sizeof(oact->sa_mask));
4383 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4384 #ifdef __ARCH_HAS_SA_RESTORER
4385 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4386 &oact->sa_restorer);
4392 #endif /* !CONFIG_ODD_RT_SIGACTION */
4394 #ifdef CONFIG_OLD_SIGACTION
4395 SYSCALL_DEFINE3(sigaction, int, sig,
4396 const struct old_sigaction __user *, act,
4397 struct old_sigaction __user *, oact)
4399 struct k_sigaction new_ka, old_ka;
4404 if (!access_ok(act, sizeof(*act)) ||
4405 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4406 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4407 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4408 __get_user(mask, &act->sa_mask))
4410 #ifdef __ARCH_HAS_KA_RESTORER
4411 new_ka.ka_restorer = NULL;
4413 siginitset(&new_ka.sa.sa_mask, mask);
4416 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4419 if (!access_ok(oact, sizeof(*oact)) ||
4420 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4421 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4422 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4423 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4430 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4431 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4432 const struct compat_old_sigaction __user *, act,
4433 struct compat_old_sigaction __user *, oact)
4435 struct k_sigaction new_ka, old_ka;
4437 compat_old_sigset_t mask;
4438 compat_uptr_t handler, restorer;
4441 if (!access_ok(act, sizeof(*act)) ||
4442 __get_user(handler, &act->sa_handler) ||
4443 __get_user(restorer, &act->sa_restorer) ||
4444 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4445 __get_user(mask, &act->sa_mask))
4448 #ifdef __ARCH_HAS_KA_RESTORER
4449 new_ka.ka_restorer = NULL;
4451 new_ka.sa.sa_handler = compat_ptr(handler);
4452 new_ka.sa.sa_restorer = compat_ptr(restorer);
4453 siginitset(&new_ka.sa.sa_mask, mask);
4456 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4459 if (!access_ok(oact, sizeof(*oact)) ||
4460 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4461 &oact->sa_handler) ||
4462 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4463 &oact->sa_restorer) ||
4464 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4465 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4472 #ifdef CONFIG_SGETMASK_SYSCALL
4475 * For backwards compatibility. Functionality superseded by sigprocmask.
4477 SYSCALL_DEFINE0(sgetmask)
4480 return current->blocked.sig[0];
4483 SYSCALL_DEFINE1(ssetmask, int, newmask)
4485 int old = current->blocked.sig[0];
4488 siginitset(&newset, newmask);
4489 set_current_blocked(&newset);
4493 #endif /* CONFIG_SGETMASK_SYSCALL */
4495 #ifdef __ARCH_WANT_SYS_SIGNAL
4497 * For backwards compatibility. Functionality superseded by sigaction.
4499 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4501 struct k_sigaction new_sa, old_sa;
4504 new_sa.sa.sa_handler = handler;
4505 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4506 sigemptyset(&new_sa.sa.sa_mask);
4508 ret = do_sigaction(sig, &new_sa, &old_sa);
4510 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4512 #endif /* __ARCH_WANT_SYS_SIGNAL */
4514 #ifdef __ARCH_WANT_SYS_PAUSE
4516 SYSCALL_DEFINE0(pause)
4518 while (!signal_pending(current)) {
4519 __set_current_state(TASK_INTERRUPTIBLE);
4522 return -ERESTARTNOHAND;
4527 static int sigsuspend(sigset_t *set)
4529 current->saved_sigmask = current->blocked;
4530 set_current_blocked(set);
4532 while (!signal_pending(current)) {
4533 __set_current_state(TASK_INTERRUPTIBLE);
4536 set_restore_sigmask();
4537 return -ERESTARTNOHAND;
4541 * sys_rt_sigsuspend - replace the signal mask for a value with the
4542 * @unewset value until a signal is received
4543 * @unewset: new signal mask value
4544 * @sigsetsize: size of sigset_t type
4546 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4550 /* XXX: Don't preclude handling different sized sigset_t's. */
4551 if (sigsetsize != sizeof(sigset_t))
4554 if (copy_from_user(&newset, unewset, sizeof(newset)))
4556 return sigsuspend(&newset);
4559 #ifdef CONFIG_COMPAT
4560 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4564 /* XXX: Don't preclude handling different sized sigset_t's. */
4565 if (sigsetsize != sizeof(sigset_t))
4568 if (get_compat_sigset(&newset, unewset))
4570 return sigsuspend(&newset);
4574 #ifdef CONFIG_OLD_SIGSUSPEND
4575 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4578 siginitset(&blocked, mask);
4579 return sigsuspend(&blocked);
4582 #ifdef CONFIG_OLD_SIGSUSPEND3
4583 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4586 siginitset(&blocked, mask);
4587 return sigsuspend(&blocked);
4591 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4596 static inline void siginfo_buildtime_checks(void)
4598 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4600 /* Verify the offsets in the two siginfos match */
4601 #define CHECK_OFFSET(field) \
4602 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4605 CHECK_OFFSET(si_pid);
4606 CHECK_OFFSET(si_uid);
4609 CHECK_OFFSET(si_tid);
4610 CHECK_OFFSET(si_overrun);
4611 CHECK_OFFSET(si_value);
4614 CHECK_OFFSET(si_pid);
4615 CHECK_OFFSET(si_uid);
4616 CHECK_OFFSET(si_value);
4619 CHECK_OFFSET(si_pid);
4620 CHECK_OFFSET(si_uid);
4621 CHECK_OFFSET(si_status);
4622 CHECK_OFFSET(si_utime);
4623 CHECK_OFFSET(si_stime);
4626 CHECK_OFFSET(si_addr);
4627 CHECK_OFFSET(si_addr_lsb);
4628 CHECK_OFFSET(si_lower);
4629 CHECK_OFFSET(si_upper);
4630 CHECK_OFFSET(si_pkey);
4633 CHECK_OFFSET(si_band);
4634 CHECK_OFFSET(si_fd);
4637 CHECK_OFFSET(si_call_addr);
4638 CHECK_OFFSET(si_syscall);
4639 CHECK_OFFSET(si_arch);
4643 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4644 offsetof(struct siginfo, si_addr));
4645 if (sizeof(int) == sizeof(void __user *)) {
4646 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4647 sizeof(void __user *));
4649 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4650 sizeof_field(struct siginfo, si_uid)) !=
4651 sizeof(void __user *));
4652 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4653 offsetof(struct siginfo, si_uid));
4655 #ifdef CONFIG_COMPAT
4656 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4657 offsetof(struct compat_siginfo, si_addr));
4658 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4659 sizeof(compat_uptr_t));
4660 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4661 sizeof_field(struct siginfo, si_pid));
4665 void __init signals_init(void)
4667 siginfo_buildtime_checks();
4669 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4672 #ifdef CONFIG_KGDB_KDB
4673 #include <linux/kdb.h>
4675 * kdb_send_sig - Allows kdb to send signals without exposing
4676 * signal internals. This function checks if the required locks are
4677 * available before calling the main signal code, to avoid kdb
4680 void kdb_send_sig(struct task_struct *t, int sig)
4682 static struct task_struct *kdb_prev_t;
4684 if (!spin_trylock(&t->sighand->siglock)) {
4685 kdb_printf("Can't do kill command now.\n"
4686 "The sigmask lock is held somewhere else in "
4687 "kernel, try again later\n");
4690 new_t = kdb_prev_t != t;
4692 if (t->state != TASK_RUNNING && new_t) {
4693 spin_unlock(&t->sighand->siglock);
4694 kdb_printf("Process is not RUNNING, sending a signal from "
4695 "kdb risks deadlock\n"
4696 "on the run queue locks. "
4697 "The signal has _not_ been sent.\n"
4698 "Reissue the kill command if you want to risk "
4702 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4703 spin_unlock(&t->sighand->siglock);
4705 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4708 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4710 #endif /* CONFIG_KGDB_KDB */