2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/sched/user.h>
19 #include <linux/tty.h>
20 #include <linux/binfmts.h>
21 #include <linux/coredump.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/ptrace.h>
25 #include <linux/signal.h>
26 #include <linux/signalfd.h>
27 #include <linux/ratelimit.h>
28 #include <linux/tracehook.h>
29 #include <linux/capability.h>
30 #include <linux/freezer.h>
31 #include <linux/pid_namespace.h>
32 #include <linux/nsproxy.h>
33 #include <linux/user_namespace.h>
34 #include <linux/uprobes.h>
35 #include <linux/compat.h>
36 #include <linux/cn_proc.h>
37 #include <linux/compiler.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/signal.h>
42 #include <asm/param.h>
43 #include <linux/uaccess.h>
44 #include <asm/unistd.h>
45 #include <asm/siginfo.h>
46 #include <asm/cacheflush.h>
47 #include "audit.h" /* audit_signal_info() */
50 * SLAB caches for signal bits.
53 static struct kmem_cache *sigqueue_cachep;
55 int print_fatal_signals __read_mostly;
57 static void __user *sig_handler(struct task_struct *t, int sig)
59 return t->sighand->action[sig - 1].sa.sa_handler;
62 static int sig_handler_ignored(void __user *handler, int sig)
64 /* Is it explicitly or implicitly ignored? */
65 return handler == SIG_IGN ||
66 (handler == SIG_DFL && sig_kernel_ignore(sig));
69 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
73 handler = sig_handler(t, sig);
75 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
76 handler == SIG_DFL && !force)
79 return sig_handler_ignored(handler, sig);
82 static int sig_ignored(struct task_struct *t, int sig, bool force)
85 * Blocked signals are never ignored, since the
86 * signal handler may change by the time it is
89 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
92 if (!sig_task_ignored(t, sig, force))
96 * Tracers may want to know about even ignored signals.
102 * Re-calculate pending state from the set of locally pending
103 * signals, globally pending signals, and blocked signals.
105 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
110 switch (_NSIG_WORDS) {
112 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
113 ready |= signal->sig[i] &~ blocked->sig[i];
116 case 4: ready = signal->sig[3] &~ blocked->sig[3];
117 ready |= signal->sig[2] &~ blocked->sig[2];
118 ready |= signal->sig[1] &~ blocked->sig[1];
119 ready |= signal->sig[0] &~ blocked->sig[0];
122 case 2: ready = signal->sig[1] &~ blocked->sig[1];
123 ready |= signal->sig[0] &~ blocked->sig[0];
126 case 1: ready = signal->sig[0] &~ blocked->sig[0];
131 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
133 static int recalc_sigpending_tsk(struct task_struct *t)
135 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
136 PENDING(&t->pending, &t->blocked) ||
137 PENDING(&t->signal->shared_pending, &t->blocked)) {
138 set_tsk_thread_flag(t, TIF_SIGPENDING);
142 * We must never clear the flag in another thread, or in current
143 * when it's possible the current syscall is returning -ERESTART*.
144 * So we don't clear it here, and only callers who know they should do.
150 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
151 * This is superfluous when called on current, the wakeup is a harmless no-op.
153 void recalc_sigpending_and_wake(struct task_struct *t)
155 if (recalc_sigpending_tsk(t))
156 signal_wake_up(t, 0);
159 void recalc_sigpending(void)
161 if (!recalc_sigpending_tsk(current) && !freezing(current))
162 clear_thread_flag(TIF_SIGPENDING);
166 /* Given the mask, find the first available signal that should be serviced. */
168 #define SYNCHRONOUS_MASK \
169 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
170 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
172 int next_signal(struct sigpending *pending, sigset_t *mask)
174 unsigned long i, *s, *m, x;
177 s = pending->signal.sig;
181 * Handle the first word specially: it contains the
182 * synchronous signals that need to be dequeued first.
186 if (x & SYNCHRONOUS_MASK)
187 x &= SYNCHRONOUS_MASK;
192 switch (_NSIG_WORDS) {
194 for (i = 1; i < _NSIG_WORDS; ++i) {
198 sig = ffz(~x) + i*_NSIG_BPW + 1;
207 sig = ffz(~x) + _NSIG_BPW + 1;
218 static inline void print_dropped_signal(int sig)
220 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
222 if (!print_fatal_signals)
225 if (!__ratelimit(&ratelimit_state))
228 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
229 current->comm, current->pid, sig);
233 * task_set_jobctl_pending - set jobctl pending bits
235 * @mask: pending bits to set
237 * Clear @mask from @task->jobctl. @mask must be subset of
238 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
239 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
240 * cleared. If @task is already being killed or exiting, this function
244 * Must be called with @task->sighand->siglock held.
247 * %true if @mask is set, %false if made noop because @task was dying.
249 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
251 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
252 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
253 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
255 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
258 if (mask & JOBCTL_STOP_SIGMASK)
259 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
261 task->jobctl |= mask;
266 * task_clear_jobctl_trapping - clear jobctl trapping bit
269 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
270 * Clear it and wake up the ptracer. Note that we don't need any further
271 * locking. @task->siglock guarantees that @task->parent points to the
275 * Must be called with @task->sighand->siglock held.
277 void task_clear_jobctl_trapping(struct task_struct *task)
279 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
280 task->jobctl &= ~JOBCTL_TRAPPING;
281 smp_mb(); /* advised by wake_up_bit() */
282 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
287 * task_clear_jobctl_pending - clear jobctl pending bits
289 * @mask: pending bits to clear
291 * Clear @mask from @task->jobctl. @mask must be subset of
292 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
293 * STOP bits are cleared together.
295 * If clearing of @mask leaves no stop or trap pending, this function calls
296 * task_clear_jobctl_trapping().
299 * Must be called with @task->sighand->siglock held.
301 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
303 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
305 if (mask & JOBCTL_STOP_PENDING)
306 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
308 task->jobctl &= ~mask;
310 if (!(task->jobctl & JOBCTL_PENDING_MASK))
311 task_clear_jobctl_trapping(task);
315 * task_participate_group_stop - participate in a group stop
316 * @task: task participating in a group stop
318 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
319 * Group stop states are cleared and the group stop count is consumed if
320 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
321 * stop, the appropriate %SIGNAL_* flags are set.
324 * Must be called with @task->sighand->siglock held.
327 * %true if group stop completion should be notified to the parent, %false
330 static bool task_participate_group_stop(struct task_struct *task)
332 struct signal_struct *sig = task->signal;
333 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
335 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
337 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
342 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
343 sig->group_stop_count--;
346 * Tell the caller to notify completion iff we are entering into a
347 * fresh group stop. Read comment in do_signal_stop() for details.
349 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
350 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
357 * allocate a new signal queue record
358 * - this may be called without locks if and only if t == current, otherwise an
359 * appropriate lock must be held to stop the target task from exiting
361 static struct sigqueue *
362 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
364 struct sigqueue *q = NULL;
365 struct user_struct *user;
368 * Protect access to @t credentials. This can go away when all
369 * callers hold rcu read lock.
372 user = get_uid(__task_cred(t)->user);
373 atomic_inc(&user->sigpending);
376 if (override_rlimit ||
377 atomic_read(&user->sigpending) <=
378 task_rlimit(t, RLIMIT_SIGPENDING)) {
379 q = kmem_cache_alloc(sigqueue_cachep, flags);
381 print_dropped_signal(sig);
384 if (unlikely(q == NULL)) {
385 atomic_dec(&user->sigpending);
388 INIT_LIST_HEAD(&q->list);
396 static void __sigqueue_free(struct sigqueue *q)
398 if (q->flags & SIGQUEUE_PREALLOC)
400 atomic_dec(&q->user->sigpending);
402 kmem_cache_free(sigqueue_cachep, q);
405 void flush_sigqueue(struct sigpending *queue)
409 sigemptyset(&queue->signal);
410 while (!list_empty(&queue->list)) {
411 q = list_entry(queue->list.next, struct sigqueue , list);
412 list_del_init(&q->list);
418 * Flush all pending signals for this kthread.
420 void flush_signals(struct task_struct *t)
424 spin_lock_irqsave(&t->sighand->siglock, flags);
425 clear_tsk_thread_flag(t, TIF_SIGPENDING);
426 flush_sigqueue(&t->pending);
427 flush_sigqueue(&t->signal->shared_pending);
428 spin_unlock_irqrestore(&t->sighand->siglock, flags);
431 #ifdef CONFIG_POSIX_TIMERS
432 static void __flush_itimer_signals(struct sigpending *pending)
434 sigset_t signal, retain;
435 struct sigqueue *q, *n;
437 signal = pending->signal;
438 sigemptyset(&retain);
440 list_for_each_entry_safe(q, n, &pending->list, list) {
441 int sig = q->info.si_signo;
443 if (likely(q->info.si_code != SI_TIMER)) {
444 sigaddset(&retain, sig);
446 sigdelset(&signal, sig);
447 list_del_init(&q->list);
452 sigorsets(&pending->signal, &signal, &retain);
455 void flush_itimer_signals(void)
457 struct task_struct *tsk = current;
460 spin_lock_irqsave(&tsk->sighand->siglock, flags);
461 __flush_itimer_signals(&tsk->pending);
462 __flush_itimer_signals(&tsk->signal->shared_pending);
463 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
467 void ignore_signals(struct task_struct *t)
471 for (i = 0; i < _NSIG; ++i)
472 t->sighand->action[i].sa.sa_handler = SIG_IGN;
478 * Flush all handlers for a task.
482 flush_signal_handlers(struct task_struct *t, int force_default)
485 struct k_sigaction *ka = &t->sighand->action[0];
486 for (i = _NSIG ; i != 0 ; i--) {
487 if (force_default || ka->sa.sa_handler != SIG_IGN)
488 ka->sa.sa_handler = SIG_DFL;
490 #ifdef __ARCH_HAS_SA_RESTORER
491 ka->sa.sa_restorer = NULL;
493 sigemptyset(&ka->sa.sa_mask);
498 int unhandled_signal(struct task_struct *tsk, int sig)
500 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
501 if (is_global_init(tsk))
503 if (handler != SIG_IGN && handler != SIG_DFL)
505 /* if ptraced, let the tracer determine */
509 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
511 struct sigqueue *q, *first = NULL;
514 * Collect the siginfo appropriate to this signal. Check if
515 * there is another siginfo for the same signal.
517 list_for_each_entry(q, &list->list, list) {
518 if (q->info.si_signo == sig) {
525 sigdelset(&list->signal, sig);
529 list_del_init(&first->list);
530 copy_siginfo(info, &first->info);
531 __sigqueue_free(first);
534 * Ok, it wasn't in the queue. This must be
535 * a fast-pathed signal or we must have been
536 * out of queue space. So zero out the info.
538 info->si_signo = sig;
540 info->si_code = SI_USER;
546 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
549 int sig = next_signal(pending, mask);
552 collect_signal(sig, pending, info);
557 * Dequeue a signal and return the element to the caller, which is
558 * expected to free it.
560 * All callers have to hold the siglock.
562 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
566 /* We only dequeue private signals from ourselves, we don't let
567 * signalfd steal them
569 signr = __dequeue_signal(&tsk->pending, mask, info);
571 signr = __dequeue_signal(&tsk->signal->shared_pending,
573 #ifdef CONFIG_POSIX_TIMERS
577 * itimers are process shared and we restart periodic
578 * itimers in the signal delivery path to prevent DoS
579 * attacks in the high resolution timer case. This is
580 * compliant with the old way of self-restarting
581 * itimers, as the SIGALRM is a legacy signal and only
582 * queued once. Changing the restart behaviour to
583 * restart the timer in the signal dequeue path is
584 * reducing the timer noise on heavy loaded !highres
587 if (unlikely(signr == SIGALRM)) {
588 struct hrtimer *tmr = &tsk->signal->real_timer;
590 if (!hrtimer_is_queued(tmr) &&
591 tsk->signal->it_real_incr != 0) {
592 hrtimer_forward(tmr, tmr->base->get_time(),
593 tsk->signal->it_real_incr);
594 hrtimer_restart(tmr);
604 if (unlikely(sig_kernel_stop(signr))) {
606 * Set a marker that we have dequeued a stop signal. Our
607 * caller might release the siglock and then the pending
608 * stop signal it is about to process is no longer in the
609 * pending bitmasks, but must still be cleared by a SIGCONT
610 * (and overruled by a SIGKILL). So those cases clear this
611 * shared flag after we've set it. Note that this flag may
612 * remain set after the signal we return is ignored or
613 * handled. That doesn't matter because its only purpose
614 * is to alert stop-signal processing code when another
615 * processor has come along and cleared the flag.
617 current->jobctl |= JOBCTL_STOP_DEQUEUED;
619 #ifdef CONFIG_POSIX_TIMERS
620 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
622 * Release the siglock to ensure proper locking order
623 * of timer locks outside of siglocks. Note, we leave
624 * irqs disabled here, since the posix-timers code is
625 * about to disable them again anyway.
627 spin_unlock(&tsk->sighand->siglock);
628 do_schedule_next_timer(info);
629 spin_lock(&tsk->sighand->siglock);
636 * Tell a process that it has a new active signal..
638 * NOTE! we rely on the previous spin_lock to
639 * lock interrupts for us! We can only be called with
640 * "siglock" held, and the local interrupt must
641 * have been disabled when that got acquired!
643 * No need to set need_resched since signal event passing
644 * goes through ->blocked
646 void signal_wake_up_state(struct task_struct *t, unsigned int state)
648 set_tsk_thread_flag(t, TIF_SIGPENDING);
650 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
651 * case. We don't check t->state here because there is a race with it
652 * executing another processor and just now entering stopped state.
653 * By using wake_up_state, we ensure the process will wake up and
654 * handle its death signal.
656 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
661 * Remove signals in mask from the pending set and queue.
662 * Returns 1 if any signals were found.
664 * All callers must be holding the siglock.
666 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
668 struct sigqueue *q, *n;
671 sigandsets(&m, mask, &s->signal);
672 if (sigisemptyset(&m))
675 sigandnsets(&s->signal, &s->signal, mask);
676 list_for_each_entry_safe(q, n, &s->list, list) {
677 if (sigismember(mask, q->info.si_signo)) {
678 list_del_init(&q->list);
685 static inline int is_si_special(const struct siginfo *info)
687 return info <= SEND_SIG_FORCED;
690 static inline bool si_fromuser(const struct siginfo *info)
692 return info == SEND_SIG_NOINFO ||
693 (!is_si_special(info) && SI_FROMUSER(info));
697 * called with RCU read lock from check_kill_permission()
699 static int kill_ok_by_cred(struct task_struct *t)
701 const struct cred *cred = current_cred();
702 const struct cred *tcred = __task_cred(t);
704 if (uid_eq(cred->euid, tcred->suid) ||
705 uid_eq(cred->euid, tcred->uid) ||
706 uid_eq(cred->uid, tcred->suid) ||
707 uid_eq(cred->uid, tcred->uid))
710 if (ns_capable(tcred->user_ns, CAP_KILL))
717 * Bad permissions for sending the signal
718 * - the caller must hold the RCU read lock
720 static int check_kill_permission(int sig, struct siginfo *info,
721 struct task_struct *t)
726 if (!valid_signal(sig))
729 if (!si_fromuser(info))
732 error = audit_signal_info(sig, t); /* Let audit system see the signal */
736 if (!same_thread_group(current, t) &&
737 !kill_ok_by_cred(t)) {
740 sid = task_session(t);
742 * We don't return the error if sid == NULL. The
743 * task was unhashed, the caller must notice this.
745 if (!sid || sid == task_session(current))
752 return security_task_kill(t, info, sig, 0);
756 * ptrace_trap_notify - schedule trap to notify ptracer
757 * @t: tracee wanting to notify tracer
759 * This function schedules sticky ptrace trap which is cleared on the next
760 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
763 * If @t is running, STOP trap will be taken. If trapped for STOP and
764 * ptracer is listening for events, tracee is woken up so that it can
765 * re-trap for the new event. If trapped otherwise, STOP trap will be
766 * eventually taken without returning to userland after the existing traps
767 * are finished by PTRACE_CONT.
770 * Must be called with @task->sighand->siglock held.
772 static void ptrace_trap_notify(struct task_struct *t)
774 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
775 assert_spin_locked(&t->sighand->siglock);
777 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
778 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
782 * Handle magic process-wide effects of stop/continue signals. Unlike
783 * the signal actions, these happen immediately at signal-generation
784 * time regardless of blocking, ignoring, or handling. This does the
785 * actual continuing for SIGCONT, but not the actual stopping for stop
786 * signals. The process stop is done as a signal action for SIG_DFL.
788 * Returns true if the signal should be actually delivered, otherwise
789 * it should be dropped.
791 static bool prepare_signal(int sig, struct task_struct *p, bool force)
793 struct signal_struct *signal = p->signal;
794 struct task_struct *t;
797 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
798 if (!(signal->flags & SIGNAL_GROUP_EXIT))
799 return sig == SIGKILL;
801 * The process is in the middle of dying, nothing to do.
803 } else if (sig_kernel_stop(sig)) {
805 * This is a stop signal. Remove SIGCONT from all queues.
807 siginitset(&flush, sigmask(SIGCONT));
808 flush_sigqueue_mask(&flush, &signal->shared_pending);
809 for_each_thread(p, t)
810 flush_sigqueue_mask(&flush, &t->pending);
811 } else if (sig == SIGCONT) {
814 * Remove all stop signals from all queues, wake all threads.
816 siginitset(&flush, SIG_KERNEL_STOP_MASK);
817 flush_sigqueue_mask(&flush, &signal->shared_pending);
818 for_each_thread(p, t) {
819 flush_sigqueue_mask(&flush, &t->pending);
820 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
821 if (likely(!(t->ptrace & PT_SEIZED)))
822 wake_up_state(t, __TASK_STOPPED);
824 ptrace_trap_notify(t);
828 * Notify the parent with CLD_CONTINUED if we were stopped.
830 * If we were in the middle of a group stop, we pretend it
831 * was already finished, and then continued. Since SIGCHLD
832 * doesn't queue we report only CLD_STOPPED, as if the next
833 * CLD_CONTINUED was dropped.
836 if (signal->flags & SIGNAL_STOP_STOPPED)
837 why |= SIGNAL_CLD_CONTINUED;
838 else if (signal->group_stop_count)
839 why |= SIGNAL_CLD_STOPPED;
843 * The first thread which returns from do_signal_stop()
844 * will take ->siglock, notice SIGNAL_CLD_MASK, and
845 * notify its parent. See get_signal_to_deliver().
847 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
848 signal->group_stop_count = 0;
849 signal->group_exit_code = 0;
853 return !sig_ignored(p, sig, force);
857 * Test if P wants to take SIG. After we've checked all threads with this,
858 * it's equivalent to finding no threads not blocking SIG. Any threads not
859 * blocking SIG were ruled out because they are not running and already
860 * have pending signals. Such threads will dequeue from the shared queue
861 * as soon as they're available, so putting the signal on the shared queue
862 * will be equivalent to sending it to one such thread.
864 static inline int wants_signal(int sig, struct task_struct *p)
866 if (sigismember(&p->blocked, sig))
868 if (p->flags & PF_EXITING)
872 if (task_is_stopped_or_traced(p))
874 return task_curr(p) || !signal_pending(p);
877 static void complete_signal(int sig, struct task_struct *p, int group)
879 struct signal_struct *signal = p->signal;
880 struct task_struct *t;
883 * Now find a thread we can wake up to take the signal off the queue.
885 * If the main thread wants the signal, it gets first crack.
886 * Probably the least surprising to the average bear.
888 if (wants_signal(sig, p))
890 else if (!group || thread_group_empty(p))
892 * There is just one thread and it does not need to be woken.
893 * It will dequeue unblocked signals before it runs again.
898 * Otherwise try to find a suitable thread.
900 t = signal->curr_target;
901 while (!wants_signal(sig, t)) {
903 if (t == signal->curr_target)
905 * No thread needs to be woken.
906 * Any eligible threads will see
907 * the signal in the queue soon.
911 signal->curr_target = t;
915 * Found a killable thread. If the signal will be fatal,
916 * then start taking the whole group down immediately.
918 if (sig_fatal(p, sig) &&
919 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
920 !sigismember(&t->real_blocked, sig) &&
921 (sig == SIGKILL || !t->ptrace)) {
923 * This signal will be fatal to the whole group.
925 if (!sig_kernel_coredump(sig)) {
927 * Start a group exit and wake everybody up.
928 * This way we don't have other threads
929 * running and doing things after a slower
930 * thread has the fatal signal pending.
932 signal->flags = SIGNAL_GROUP_EXIT;
933 signal->group_exit_code = sig;
934 signal->group_stop_count = 0;
937 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
938 sigaddset(&t->pending.signal, SIGKILL);
939 signal_wake_up(t, 1);
940 } while_each_thread(p, t);
946 * The signal is already in the shared-pending queue.
947 * Tell the chosen thread to wake up and dequeue it.
949 signal_wake_up(t, sig == SIGKILL);
953 static inline int legacy_queue(struct sigpending *signals, int sig)
955 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
958 #ifdef CONFIG_USER_NS
959 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
961 if (current_user_ns() == task_cred_xxx(t, user_ns))
964 if (SI_FROMKERNEL(info))
968 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
969 make_kuid(current_user_ns(), info->si_uid));
973 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
979 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
980 int group, int from_ancestor_ns)
982 struct sigpending *pending;
987 assert_spin_locked(&t->sighand->siglock);
989 result = TRACE_SIGNAL_IGNORED;
990 if (!prepare_signal(sig, t,
991 from_ancestor_ns || (info == SEND_SIG_FORCED)))
994 pending = group ? &t->signal->shared_pending : &t->pending;
996 * Short-circuit ignored signals and support queuing
997 * exactly one non-rt signal, so that we can get more
998 * detailed information about the cause of the signal.
1000 result = TRACE_SIGNAL_ALREADY_PENDING;
1001 if (legacy_queue(pending, sig))
1004 result = TRACE_SIGNAL_DELIVERED;
1006 * fast-pathed signals for kernel-internal things like SIGSTOP
1009 if (info == SEND_SIG_FORCED)
1013 * Real-time signals must be queued if sent by sigqueue, or
1014 * some other real-time mechanism. It is implementation
1015 * defined whether kill() does so. We attempt to do so, on
1016 * the principle of least surprise, but since kill is not
1017 * allowed to fail with EAGAIN when low on memory we just
1018 * make sure at least one signal gets delivered and don't
1019 * pass on the info struct.
1022 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1024 override_rlimit = 0;
1026 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1029 list_add_tail(&q->list, &pending->list);
1030 switch ((unsigned long) info) {
1031 case (unsigned long) SEND_SIG_NOINFO:
1032 q->info.si_signo = sig;
1033 q->info.si_errno = 0;
1034 q->info.si_code = SI_USER;
1035 q->info.si_pid = task_tgid_nr_ns(current,
1036 task_active_pid_ns(t));
1037 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1039 case (unsigned long) SEND_SIG_PRIV:
1040 q->info.si_signo = sig;
1041 q->info.si_errno = 0;
1042 q->info.si_code = SI_KERNEL;
1047 copy_siginfo(&q->info, info);
1048 if (from_ancestor_ns)
1053 userns_fixup_signal_uid(&q->info, t);
1055 } else if (!is_si_special(info)) {
1056 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1058 * Queue overflow, abort. We may abort if the
1059 * signal was rt and sent by user using something
1060 * other than kill().
1062 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1067 * This is a silent loss of information. We still
1068 * send the signal, but the *info bits are lost.
1070 result = TRACE_SIGNAL_LOSE_INFO;
1075 signalfd_notify(t, sig);
1076 sigaddset(&pending->signal, sig);
1077 complete_signal(sig, t, group);
1079 trace_signal_generate(sig, info, t, group, result);
1083 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1086 int from_ancestor_ns = 0;
1088 #ifdef CONFIG_PID_NS
1089 from_ancestor_ns = si_fromuser(info) &&
1090 !task_pid_nr_ns(current, task_active_pid_ns(t));
1093 return __send_signal(sig, info, t, group, from_ancestor_ns);
1096 static void print_fatal_signal(int signr)
1098 struct pt_regs *regs = signal_pt_regs();
1099 pr_info("potentially unexpected fatal signal %d.\n", signr);
1101 #if defined(__i386__) && !defined(__arch_um__)
1102 pr_info("code at %08lx: ", regs->ip);
1105 for (i = 0; i < 16; i++) {
1108 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1110 pr_cont("%02x ", insn);
1120 static int __init setup_print_fatal_signals(char *str)
1122 get_option (&str, &print_fatal_signals);
1127 __setup("print-fatal-signals=", setup_print_fatal_signals);
1130 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1132 return send_signal(sig, info, p, 1);
1136 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1138 return send_signal(sig, info, t, 0);
1141 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1144 unsigned long flags;
1147 if (lock_task_sighand(p, &flags)) {
1148 ret = send_signal(sig, info, p, group);
1149 unlock_task_sighand(p, &flags);
1156 * Force a signal that the process can't ignore: if necessary
1157 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1159 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1160 * since we do not want to have a signal handler that was blocked
1161 * be invoked when user space had explicitly blocked it.
1163 * We don't want to have recursive SIGSEGV's etc, for example,
1164 * that is why we also clear SIGNAL_UNKILLABLE.
1167 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1169 unsigned long int flags;
1170 int ret, blocked, ignored;
1171 struct k_sigaction *action;
1173 spin_lock_irqsave(&t->sighand->siglock, flags);
1174 action = &t->sighand->action[sig-1];
1175 ignored = action->sa.sa_handler == SIG_IGN;
1176 blocked = sigismember(&t->blocked, sig);
1177 if (blocked || ignored) {
1178 action->sa.sa_handler = SIG_DFL;
1180 sigdelset(&t->blocked, sig);
1181 recalc_sigpending_and_wake(t);
1184 if (action->sa.sa_handler == SIG_DFL)
1185 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1186 ret = specific_send_sig_info(sig, info, t);
1187 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1193 * Nuke all other threads in the group.
1195 int zap_other_threads(struct task_struct *p)
1197 struct task_struct *t = p;
1200 p->signal->group_stop_count = 0;
1202 while_each_thread(p, t) {
1203 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1206 /* Don't bother with already dead threads */
1209 sigaddset(&t->pending.signal, SIGKILL);
1210 signal_wake_up(t, 1);
1216 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1217 unsigned long *flags)
1219 struct sighand_struct *sighand;
1223 * Disable interrupts early to avoid deadlocks.
1224 * See rcu_read_unlock() comment header for details.
1226 local_irq_save(*flags);
1228 sighand = rcu_dereference(tsk->sighand);
1229 if (unlikely(sighand == NULL)) {
1231 local_irq_restore(*flags);
1235 * This sighand can be already freed and even reused, but
1236 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1237 * initializes ->siglock: this slab can't go away, it has
1238 * the same object type, ->siglock can't be reinitialized.
1240 * We need to ensure that tsk->sighand is still the same
1241 * after we take the lock, we can race with de_thread() or
1242 * __exit_signal(). In the latter case the next iteration
1243 * must see ->sighand == NULL.
1245 spin_lock(&sighand->siglock);
1246 if (likely(sighand == tsk->sighand)) {
1250 spin_unlock(&sighand->siglock);
1252 local_irq_restore(*flags);
1259 * send signal info to all the members of a group
1261 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1266 ret = check_kill_permission(sig, info, p);
1270 ret = do_send_sig_info(sig, info, p, true);
1276 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1277 * control characters do (^C, ^Z etc)
1278 * - the caller must hold at least a readlock on tasklist_lock
1280 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1282 struct task_struct *p = NULL;
1283 int retval, success;
1287 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1288 int err = group_send_sig_info(sig, info, p);
1291 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1292 return success ? 0 : retval;
1295 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1298 struct task_struct *p;
1302 p = pid_task(pid, PIDTYPE_PID);
1304 error = group_send_sig_info(sig, info, p);
1306 if (likely(!p || error != -ESRCH))
1310 * The task was unhashed in between, try again. If it
1311 * is dead, pid_task() will return NULL, if we race with
1312 * de_thread() it will find the new leader.
1317 int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1321 error = kill_pid_info(sig, info, find_vpid(pid));
1326 static int kill_as_cred_perm(const struct cred *cred,
1327 struct task_struct *target)
1329 const struct cred *pcred = __task_cred(target);
1330 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1331 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1336 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1337 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1338 const struct cred *cred, u32 secid)
1341 struct task_struct *p;
1342 unsigned long flags;
1344 if (!valid_signal(sig))
1348 p = pid_task(pid, PIDTYPE_PID);
1353 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1357 ret = security_task_kill(p, info, sig, secid);
1362 if (lock_task_sighand(p, &flags)) {
1363 ret = __send_signal(sig, info, p, 1, 0);
1364 unlock_task_sighand(p, &flags);
1372 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1375 * kill_something_info() interprets pid in interesting ways just like kill(2).
1377 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1378 * is probably wrong. Should make it like BSD or SYSV.
1381 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1387 ret = kill_pid_info(sig, info, find_vpid(pid));
1392 read_lock(&tasklist_lock);
1394 ret = __kill_pgrp_info(sig, info,
1395 pid ? find_vpid(-pid) : task_pgrp(current));
1397 int retval = 0, count = 0;
1398 struct task_struct * p;
1400 for_each_process(p) {
1401 if (task_pid_vnr(p) > 1 &&
1402 !same_thread_group(p, current)) {
1403 int err = group_send_sig_info(sig, info, p);
1409 ret = count ? retval : -ESRCH;
1411 read_unlock(&tasklist_lock);
1417 * These are for backward compatibility with the rest of the kernel source.
1420 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1423 * Make sure legacy kernel users don't send in bad values
1424 * (normal paths check this in check_kill_permission).
1426 if (!valid_signal(sig))
1429 return do_send_sig_info(sig, info, p, false);
1432 #define __si_special(priv) \
1433 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1436 send_sig(int sig, struct task_struct *p, int priv)
1438 return send_sig_info(sig, __si_special(priv), p);
1442 force_sig(int sig, struct task_struct *p)
1444 force_sig_info(sig, SEND_SIG_PRIV, p);
1448 * When things go south during signal handling, we
1449 * will force a SIGSEGV. And if the signal that caused
1450 * the problem was already a SIGSEGV, we'll want to
1451 * make sure we don't even try to deliver the signal..
1454 force_sigsegv(int sig, struct task_struct *p)
1456 if (sig == SIGSEGV) {
1457 unsigned long flags;
1458 spin_lock_irqsave(&p->sighand->siglock, flags);
1459 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1460 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1462 force_sig(SIGSEGV, p);
1466 int kill_pgrp(struct pid *pid, int sig, int priv)
1470 read_lock(&tasklist_lock);
1471 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1472 read_unlock(&tasklist_lock);
1476 EXPORT_SYMBOL(kill_pgrp);
1478 int kill_pid(struct pid *pid, int sig, int priv)
1480 return kill_pid_info(sig, __si_special(priv), pid);
1482 EXPORT_SYMBOL(kill_pid);
1485 * These functions support sending signals using preallocated sigqueue
1486 * structures. This is needed "because realtime applications cannot
1487 * afford to lose notifications of asynchronous events, like timer
1488 * expirations or I/O completions". In the case of POSIX Timers
1489 * we allocate the sigqueue structure from the timer_create. If this
1490 * allocation fails we are able to report the failure to the application
1491 * with an EAGAIN error.
1493 struct sigqueue *sigqueue_alloc(void)
1495 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1498 q->flags |= SIGQUEUE_PREALLOC;
1503 void sigqueue_free(struct sigqueue *q)
1505 unsigned long flags;
1506 spinlock_t *lock = ¤t->sighand->siglock;
1508 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1510 * We must hold ->siglock while testing q->list
1511 * to serialize with collect_signal() or with
1512 * __exit_signal()->flush_sigqueue().
1514 spin_lock_irqsave(lock, flags);
1515 q->flags &= ~SIGQUEUE_PREALLOC;
1517 * If it is queued it will be freed when dequeued,
1518 * like the "regular" sigqueue.
1520 if (!list_empty(&q->list))
1522 spin_unlock_irqrestore(lock, flags);
1528 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1530 int sig = q->info.si_signo;
1531 struct sigpending *pending;
1532 unsigned long flags;
1535 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1538 if (!likely(lock_task_sighand(t, &flags)))
1541 ret = 1; /* the signal is ignored */
1542 result = TRACE_SIGNAL_IGNORED;
1543 if (!prepare_signal(sig, t, false))
1547 if (unlikely(!list_empty(&q->list))) {
1549 * If an SI_TIMER entry is already queue just increment
1550 * the overrun count.
1552 BUG_ON(q->info.si_code != SI_TIMER);
1553 q->info.si_overrun++;
1554 result = TRACE_SIGNAL_ALREADY_PENDING;
1557 q->info.si_overrun = 0;
1559 signalfd_notify(t, sig);
1560 pending = group ? &t->signal->shared_pending : &t->pending;
1561 list_add_tail(&q->list, &pending->list);
1562 sigaddset(&pending->signal, sig);
1563 complete_signal(sig, t, group);
1564 result = TRACE_SIGNAL_DELIVERED;
1566 trace_signal_generate(sig, &q->info, t, group, result);
1567 unlock_task_sighand(t, &flags);
1573 * Let a parent know about the death of a child.
1574 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1576 * Returns true if our parent ignored us and so we've switched to
1579 bool do_notify_parent(struct task_struct *tsk, int sig)
1581 struct siginfo info;
1582 unsigned long flags;
1583 struct sighand_struct *psig;
1584 bool autoreap = false;
1589 /* do_notify_parent_cldstop should have been called instead. */
1590 BUG_ON(task_is_stopped_or_traced(tsk));
1592 BUG_ON(!tsk->ptrace &&
1593 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1595 if (sig != SIGCHLD) {
1597 * This is only possible if parent == real_parent.
1598 * Check if it has changed security domain.
1600 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1604 info.si_signo = sig;
1607 * We are under tasklist_lock here so our parent is tied to
1608 * us and cannot change.
1610 * task_active_pid_ns will always return the same pid namespace
1611 * until a task passes through release_task.
1613 * write_lock() currently calls preempt_disable() which is the
1614 * same as rcu_read_lock(), but according to Oleg, this is not
1615 * correct to rely on this
1618 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1619 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1623 task_cputime(tsk, &utime, &stime);
1624 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1625 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1627 info.si_status = tsk->exit_code & 0x7f;
1628 if (tsk->exit_code & 0x80)
1629 info.si_code = CLD_DUMPED;
1630 else if (tsk->exit_code & 0x7f)
1631 info.si_code = CLD_KILLED;
1633 info.si_code = CLD_EXITED;
1634 info.si_status = tsk->exit_code >> 8;
1637 psig = tsk->parent->sighand;
1638 spin_lock_irqsave(&psig->siglock, flags);
1639 if (!tsk->ptrace && sig == SIGCHLD &&
1640 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1641 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1643 * We are exiting and our parent doesn't care. POSIX.1
1644 * defines special semantics for setting SIGCHLD to SIG_IGN
1645 * or setting the SA_NOCLDWAIT flag: we should be reaped
1646 * automatically and not left for our parent's wait4 call.
1647 * Rather than having the parent do it as a magic kind of
1648 * signal handler, we just set this to tell do_exit that we
1649 * can be cleaned up without becoming a zombie. Note that
1650 * we still call __wake_up_parent in this case, because a
1651 * blocked sys_wait4 might now return -ECHILD.
1653 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1654 * is implementation-defined: we do (if you don't want
1655 * it, just use SIG_IGN instead).
1658 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1661 if (valid_signal(sig) && sig)
1662 __group_send_sig_info(sig, &info, tsk->parent);
1663 __wake_up_parent(tsk, tsk->parent);
1664 spin_unlock_irqrestore(&psig->siglock, flags);
1670 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1671 * @tsk: task reporting the state change
1672 * @for_ptracer: the notification is for ptracer
1673 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1675 * Notify @tsk's parent that the stopped/continued state has changed. If
1676 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1677 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1680 * Must be called with tasklist_lock at least read locked.
1682 static void do_notify_parent_cldstop(struct task_struct *tsk,
1683 bool for_ptracer, int why)
1685 struct siginfo info;
1686 unsigned long flags;
1687 struct task_struct *parent;
1688 struct sighand_struct *sighand;
1692 parent = tsk->parent;
1694 tsk = tsk->group_leader;
1695 parent = tsk->real_parent;
1698 info.si_signo = SIGCHLD;
1701 * see comment in do_notify_parent() about the following 4 lines
1704 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1705 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1708 task_cputime(tsk, &utime, &stime);
1709 info.si_utime = nsec_to_clock_t(utime);
1710 info.si_stime = nsec_to_clock_t(stime);
1715 info.si_status = SIGCONT;
1718 info.si_status = tsk->signal->group_exit_code & 0x7f;
1721 info.si_status = tsk->exit_code & 0x7f;
1727 sighand = parent->sighand;
1728 spin_lock_irqsave(&sighand->siglock, flags);
1729 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1730 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1731 __group_send_sig_info(SIGCHLD, &info, parent);
1733 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1735 __wake_up_parent(tsk, parent);
1736 spin_unlock_irqrestore(&sighand->siglock, flags);
1739 static inline int may_ptrace_stop(void)
1741 if (!likely(current->ptrace))
1744 * Are we in the middle of do_coredump?
1745 * If so and our tracer is also part of the coredump stopping
1746 * is a deadlock situation, and pointless because our tracer
1747 * is dead so don't allow us to stop.
1748 * If SIGKILL was already sent before the caller unlocked
1749 * ->siglock we must see ->core_state != NULL. Otherwise it
1750 * is safe to enter schedule().
1752 * This is almost outdated, a task with the pending SIGKILL can't
1753 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1754 * after SIGKILL was already dequeued.
1756 if (unlikely(current->mm->core_state) &&
1757 unlikely(current->mm == current->parent->mm))
1764 * Return non-zero if there is a SIGKILL that should be waking us up.
1765 * Called with the siglock held.
1767 static int sigkill_pending(struct task_struct *tsk)
1769 return sigismember(&tsk->pending.signal, SIGKILL) ||
1770 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1774 * This must be called with current->sighand->siglock held.
1776 * This should be the path for all ptrace stops.
1777 * We always set current->last_siginfo while stopped here.
1778 * That makes it a way to test a stopped process for
1779 * being ptrace-stopped vs being job-control-stopped.
1781 * If we actually decide not to stop at all because the tracer
1782 * is gone, we keep current->exit_code unless clear_code.
1784 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1785 __releases(¤t->sighand->siglock)
1786 __acquires(¤t->sighand->siglock)
1788 bool gstop_done = false;
1790 if (arch_ptrace_stop_needed(exit_code, info)) {
1792 * The arch code has something special to do before a
1793 * ptrace stop. This is allowed to block, e.g. for faults
1794 * on user stack pages. We can't keep the siglock while
1795 * calling arch_ptrace_stop, so we must release it now.
1796 * To preserve proper semantics, we must do this before
1797 * any signal bookkeeping like checking group_stop_count.
1798 * Meanwhile, a SIGKILL could come in before we retake the
1799 * siglock. That must prevent us from sleeping in TASK_TRACED.
1800 * So after regaining the lock, we must check for SIGKILL.
1802 spin_unlock_irq(¤t->sighand->siglock);
1803 arch_ptrace_stop(exit_code, info);
1804 spin_lock_irq(¤t->sighand->siglock);
1805 if (sigkill_pending(current))
1810 * We're committing to trapping. TRACED should be visible before
1811 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1812 * Also, transition to TRACED and updates to ->jobctl should be
1813 * atomic with respect to siglock and should be done after the arch
1814 * hook as siglock is released and regrabbed across it.
1816 set_current_state(TASK_TRACED);
1818 current->last_siginfo = info;
1819 current->exit_code = exit_code;
1822 * If @why is CLD_STOPPED, we're trapping to participate in a group
1823 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1824 * across siglock relocks since INTERRUPT was scheduled, PENDING
1825 * could be clear now. We act as if SIGCONT is received after
1826 * TASK_TRACED is entered - ignore it.
1828 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1829 gstop_done = task_participate_group_stop(current);
1831 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1832 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1833 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1834 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1836 /* entering a trap, clear TRAPPING */
1837 task_clear_jobctl_trapping(current);
1839 spin_unlock_irq(¤t->sighand->siglock);
1840 read_lock(&tasklist_lock);
1841 if (may_ptrace_stop()) {
1843 * Notify parents of the stop.
1845 * While ptraced, there are two parents - the ptracer and
1846 * the real_parent of the group_leader. The ptracer should
1847 * know about every stop while the real parent is only
1848 * interested in the completion of group stop. The states
1849 * for the two don't interact with each other. Notify
1850 * separately unless they're gonna be duplicates.
1852 do_notify_parent_cldstop(current, true, why);
1853 if (gstop_done && ptrace_reparented(current))
1854 do_notify_parent_cldstop(current, false, why);
1857 * Don't want to allow preemption here, because
1858 * sys_ptrace() needs this task to be inactive.
1860 * XXX: implement read_unlock_no_resched().
1863 read_unlock(&tasklist_lock);
1864 preempt_enable_no_resched();
1865 freezable_schedule();
1868 * By the time we got the lock, our tracer went away.
1869 * Don't drop the lock yet, another tracer may come.
1871 * If @gstop_done, the ptracer went away between group stop
1872 * completion and here. During detach, it would have set
1873 * JOBCTL_STOP_PENDING on us and we'll re-enter
1874 * TASK_STOPPED in do_signal_stop() on return, so notifying
1875 * the real parent of the group stop completion is enough.
1878 do_notify_parent_cldstop(current, false, why);
1880 /* tasklist protects us from ptrace_freeze_traced() */
1881 __set_current_state(TASK_RUNNING);
1883 current->exit_code = 0;
1884 read_unlock(&tasklist_lock);
1888 * We are back. Now reacquire the siglock before touching
1889 * last_siginfo, so that we are sure to have synchronized with
1890 * any signal-sending on another CPU that wants to examine it.
1892 spin_lock_irq(¤t->sighand->siglock);
1893 current->last_siginfo = NULL;
1895 /* LISTENING can be set only during STOP traps, clear it */
1896 current->jobctl &= ~JOBCTL_LISTENING;
1899 * Queued signals ignored us while we were stopped for tracing.
1900 * So check for any that we should take before resuming user mode.
1901 * This sets TIF_SIGPENDING, but never clears it.
1903 recalc_sigpending_tsk(current);
1906 static void ptrace_do_notify(int signr, int exit_code, int why)
1910 memset(&info, 0, sizeof info);
1911 info.si_signo = signr;
1912 info.si_code = exit_code;
1913 info.si_pid = task_pid_vnr(current);
1914 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1916 /* Let the debugger run. */
1917 ptrace_stop(exit_code, why, 1, &info);
1920 void ptrace_notify(int exit_code)
1922 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1923 if (unlikely(current->task_works))
1926 spin_lock_irq(¤t->sighand->siglock);
1927 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1928 spin_unlock_irq(¤t->sighand->siglock);
1932 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1933 * @signr: signr causing group stop if initiating
1935 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1936 * and participate in it. If already set, participate in the existing
1937 * group stop. If participated in a group stop (and thus slept), %true is
1938 * returned with siglock released.
1940 * If ptraced, this function doesn't handle stop itself. Instead,
1941 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1942 * untouched. The caller must ensure that INTERRUPT trap handling takes
1943 * places afterwards.
1946 * Must be called with @current->sighand->siglock held, which is released
1950 * %false if group stop is already cancelled or ptrace trap is scheduled.
1951 * %true if participated in group stop.
1953 static bool do_signal_stop(int signr)
1954 __releases(¤t->sighand->siglock)
1956 struct signal_struct *sig = current->signal;
1958 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1959 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1960 struct task_struct *t;
1962 /* signr will be recorded in task->jobctl for retries */
1963 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1965 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1966 unlikely(signal_group_exit(sig)))
1969 * There is no group stop already in progress. We must
1972 * While ptraced, a task may be resumed while group stop is
1973 * still in effect and then receive a stop signal and
1974 * initiate another group stop. This deviates from the
1975 * usual behavior as two consecutive stop signals can't
1976 * cause two group stops when !ptraced. That is why we
1977 * also check !task_is_stopped(t) below.
1979 * The condition can be distinguished by testing whether
1980 * SIGNAL_STOP_STOPPED is already set. Don't generate
1981 * group_exit_code in such case.
1983 * This is not necessary for SIGNAL_STOP_CONTINUED because
1984 * an intervening stop signal is required to cause two
1985 * continued events regardless of ptrace.
1987 if (!(sig->flags & SIGNAL_STOP_STOPPED))
1988 sig->group_exit_code = signr;
1990 sig->group_stop_count = 0;
1992 if (task_set_jobctl_pending(current, signr | gstop))
1993 sig->group_stop_count++;
1996 while_each_thread(current, t) {
1998 * Setting state to TASK_STOPPED for a group
1999 * stop is always done with the siglock held,
2000 * so this check has no races.
2002 if (!task_is_stopped(t) &&
2003 task_set_jobctl_pending(t, signr | gstop)) {
2004 sig->group_stop_count++;
2005 if (likely(!(t->ptrace & PT_SEIZED)))
2006 signal_wake_up(t, 0);
2008 ptrace_trap_notify(t);
2013 if (likely(!current->ptrace)) {
2017 * If there are no other threads in the group, or if there
2018 * is a group stop in progress and we are the last to stop,
2019 * report to the parent.
2021 if (task_participate_group_stop(current))
2022 notify = CLD_STOPPED;
2024 __set_current_state(TASK_STOPPED);
2025 spin_unlock_irq(¤t->sighand->siglock);
2028 * Notify the parent of the group stop completion. Because
2029 * we're not holding either the siglock or tasklist_lock
2030 * here, ptracer may attach inbetween; however, this is for
2031 * group stop and should always be delivered to the real
2032 * parent of the group leader. The new ptracer will get
2033 * its notification when this task transitions into
2037 read_lock(&tasklist_lock);
2038 do_notify_parent_cldstop(current, false, notify);
2039 read_unlock(&tasklist_lock);
2042 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2043 freezable_schedule();
2047 * While ptraced, group stop is handled by STOP trap.
2048 * Schedule it and let the caller deal with it.
2050 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2056 * do_jobctl_trap - take care of ptrace jobctl traps
2058 * When PT_SEIZED, it's used for both group stop and explicit
2059 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2060 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2061 * the stop signal; otherwise, %SIGTRAP.
2063 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2064 * number as exit_code and no siginfo.
2067 * Must be called with @current->sighand->siglock held, which may be
2068 * released and re-acquired before returning with intervening sleep.
2070 static void do_jobctl_trap(void)
2072 struct signal_struct *signal = current->signal;
2073 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2075 if (current->ptrace & PT_SEIZED) {
2076 if (!signal->group_stop_count &&
2077 !(signal->flags & SIGNAL_STOP_STOPPED))
2079 WARN_ON_ONCE(!signr);
2080 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2083 WARN_ON_ONCE(!signr);
2084 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2085 current->exit_code = 0;
2089 static int ptrace_signal(int signr, siginfo_t *info)
2091 ptrace_signal_deliver();
2093 * We do not check sig_kernel_stop(signr) but set this marker
2094 * unconditionally because we do not know whether debugger will
2095 * change signr. This flag has no meaning unless we are going
2096 * to stop after return from ptrace_stop(). In this case it will
2097 * be checked in do_signal_stop(), we should only stop if it was
2098 * not cleared by SIGCONT while we were sleeping. See also the
2099 * comment in dequeue_signal().
2101 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2102 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2104 /* We're back. Did the debugger cancel the sig? */
2105 signr = current->exit_code;
2109 current->exit_code = 0;
2112 * Update the siginfo structure if the signal has
2113 * changed. If the debugger wanted something
2114 * specific in the siginfo structure then it should
2115 * have updated *info via PTRACE_SETSIGINFO.
2117 if (signr != info->si_signo) {
2118 info->si_signo = signr;
2120 info->si_code = SI_USER;
2122 info->si_pid = task_pid_vnr(current->parent);
2123 info->si_uid = from_kuid_munged(current_user_ns(),
2124 task_uid(current->parent));
2128 /* If the (new) signal is now blocked, requeue it. */
2129 if (sigismember(¤t->blocked, signr)) {
2130 specific_send_sig_info(signr, info, current);
2137 int get_signal(struct ksignal *ksig)
2139 struct sighand_struct *sighand = current->sighand;
2140 struct signal_struct *signal = current->signal;
2143 if (unlikely(current->task_works))
2146 if (unlikely(uprobe_deny_signal()))
2150 * Do this once, we can't return to user-mode if freezing() == T.
2151 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2152 * thus do not need another check after return.
2157 spin_lock_irq(&sighand->siglock);
2159 * Every stopped thread goes here after wakeup. Check to see if
2160 * we should notify the parent, prepare_signal(SIGCONT) encodes
2161 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2163 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2166 if (signal->flags & SIGNAL_CLD_CONTINUED)
2167 why = CLD_CONTINUED;
2171 signal->flags &= ~SIGNAL_CLD_MASK;
2173 spin_unlock_irq(&sighand->siglock);
2176 * Notify the parent that we're continuing. This event is
2177 * always per-process and doesn't make whole lot of sense
2178 * for ptracers, who shouldn't consume the state via
2179 * wait(2) either, but, for backward compatibility, notify
2180 * the ptracer of the group leader too unless it's gonna be
2183 read_lock(&tasklist_lock);
2184 do_notify_parent_cldstop(current, false, why);
2186 if (ptrace_reparented(current->group_leader))
2187 do_notify_parent_cldstop(current->group_leader,
2189 read_unlock(&tasklist_lock);
2195 struct k_sigaction *ka;
2197 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2201 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2203 spin_unlock_irq(&sighand->siglock);
2207 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2210 break; /* will return 0 */
2212 if (unlikely(current->ptrace) && signr != SIGKILL) {
2213 signr = ptrace_signal(signr, &ksig->info);
2218 ka = &sighand->action[signr-1];
2220 /* Trace actually delivered signals. */
2221 trace_signal_deliver(signr, &ksig->info, ka);
2223 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2225 if (ka->sa.sa_handler != SIG_DFL) {
2226 /* Run the handler. */
2229 if (ka->sa.sa_flags & SA_ONESHOT)
2230 ka->sa.sa_handler = SIG_DFL;
2232 break; /* will return non-zero "signr" value */
2236 * Now we are doing the default action for this signal.
2238 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2242 * Global init gets no signals it doesn't want.
2243 * Container-init gets no signals it doesn't want from same
2246 * Note that if global/container-init sees a sig_kernel_only()
2247 * signal here, the signal must have been generated internally
2248 * or must have come from an ancestor namespace. In either
2249 * case, the signal cannot be dropped.
2251 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2252 !sig_kernel_only(signr))
2255 if (sig_kernel_stop(signr)) {
2257 * The default action is to stop all threads in
2258 * the thread group. The job control signals
2259 * do nothing in an orphaned pgrp, but SIGSTOP
2260 * always works. Note that siglock needs to be
2261 * dropped during the call to is_orphaned_pgrp()
2262 * because of lock ordering with tasklist_lock.
2263 * This allows an intervening SIGCONT to be posted.
2264 * We need to check for that and bail out if necessary.
2266 if (signr != SIGSTOP) {
2267 spin_unlock_irq(&sighand->siglock);
2269 /* signals can be posted during this window */
2271 if (is_current_pgrp_orphaned())
2274 spin_lock_irq(&sighand->siglock);
2277 if (likely(do_signal_stop(ksig->info.si_signo))) {
2278 /* It released the siglock. */
2283 * We didn't actually stop, due to a race
2284 * with SIGCONT or something like that.
2289 spin_unlock_irq(&sighand->siglock);
2292 * Anything else is fatal, maybe with a core dump.
2294 current->flags |= PF_SIGNALED;
2296 if (sig_kernel_coredump(signr)) {
2297 if (print_fatal_signals)
2298 print_fatal_signal(ksig->info.si_signo);
2299 proc_coredump_connector(current);
2301 * If it was able to dump core, this kills all
2302 * other threads in the group and synchronizes with
2303 * their demise. If we lost the race with another
2304 * thread getting here, it set group_exit_code
2305 * first and our do_group_exit call below will use
2306 * that value and ignore the one we pass it.
2308 do_coredump(&ksig->info);
2312 * Death signals, no core dump.
2314 do_group_exit(ksig->info.si_signo);
2317 spin_unlock_irq(&sighand->siglock);
2320 return ksig->sig > 0;
2324 * signal_delivered -
2325 * @ksig: kernel signal struct
2326 * @stepping: nonzero if debugger single-step or block-step in use
2328 * This function should be called when a signal has successfully been
2329 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2330 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2331 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2333 static void signal_delivered(struct ksignal *ksig, int stepping)
2337 /* A signal was successfully delivered, and the
2338 saved sigmask was stored on the signal frame,
2339 and will be restored by sigreturn. So we can
2340 simply clear the restore sigmask flag. */
2341 clear_restore_sigmask();
2343 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2344 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2345 sigaddset(&blocked, ksig->sig);
2346 set_current_blocked(&blocked);
2347 tracehook_signal_handler(stepping);
2350 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2353 force_sigsegv(ksig->sig, current);
2355 signal_delivered(ksig, stepping);
2359 * It could be that complete_signal() picked us to notify about the
2360 * group-wide signal. Other threads should be notified now to take
2361 * the shared signals in @which since we will not.
2363 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2366 struct task_struct *t;
2368 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2369 if (sigisemptyset(&retarget))
2373 while_each_thread(tsk, t) {
2374 if (t->flags & PF_EXITING)
2377 if (!has_pending_signals(&retarget, &t->blocked))
2379 /* Remove the signals this thread can handle. */
2380 sigandsets(&retarget, &retarget, &t->blocked);
2382 if (!signal_pending(t))
2383 signal_wake_up(t, 0);
2385 if (sigisemptyset(&retarget))
2390 void exit_signals(struct task_struct *tsk)
2396 * @tsk is about to have PF_EXITING set - lock out users which
2397 * expect stable threadgroup.
2399 cgroup_threadgroup_change_begin(tsk);
2401 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2402 tsk->flags |= PF_EXITING;
2403 cgroup_threadgroup_change_end(tsk);
2407 spin_lock_irq(&tsk->sighand->siglock);
2409 * From now this task is not visible for group-wide signals,
2410 * see wants_signal(), do_signal_stop().
2412 tsk->flags |= PF_EXITING;
2414 cgroup_threadgroup_change_end(tsk);
2416 if (!signal_pending(tsk))
2419 unblocked = tsk->blocked;
2420 signotset(&unblocked);
2421 retarget_shared_pending(tsk, &unblocked);
2423 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2424 task_participate_group_stop(tsk))
2425 group_stop = CLD_STOPPED;
2427 spin_unlock_irq(&tsk->sighand->siglock);
2430 * If group stop has completed, deliver the notification. This
2431 * should always go to the real parent of the group leader.
2433 if (unlikely(group_stop)) {
2434 read_lock(&tasklist_lock);
2435 do_notify_parent_cldstop(tsk, false, group_stop);
2436 read_unlock(&tasklist_lock);
2440 EXPORT_SYMBOL(recalc_sigpending);
2441 EXPORT_SYMBOL_GPL(dequeue_signal);
2442 EXPORT_SYMBOL(flush_signals);
2443 EXPORT_SYMBOL(force_sig);
2444 EXPORT_SYMBOL(send_sig);
2445 EXPORT_SYMBOL(send_sig_info);
2446 EXPORT_SYMBOL(sigprocmask);
2449 * System call entry points.
2453 * sys_restart_syscall - restart a system call
2455 SYSCALL_DEFINE0(restart_syscall)
2457 struct restart_block *restart = ¤t->restart_block;
2458 return restart->fn(restart);
2461 long do_no_restart_syscall(struct restart_block *param)
2466 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2468 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2469 sigset_t newblocked;
2470 /* A set of now blocked but previously unblocked signals. */
2471 sigandnsets(&newblocked, newset, ¤t->blocked);
2472 retarget_shared_pending(tsk, &newblocked);
2474 tsk->blocked = *newset;
2475 recalc_sigpending();
2479 * set_current_blocked - change current->blocked mask
2482 * It is wrong to change ->blocked directly, this helper should be used
2483 * to ensure the process can't miss a shared signal we are going to block.
2485 void set_current_blocked(sigset_t *newset)
2487 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2488 __set_current_blocked(newset);
2491 void __set_current_blocked(const sigset_t *newset)
2493 struct task_struct *tsk = current;
2496 * In case the signal mask hasn't changed, there is nothing we need
2497 * to do. The current->blocked shouldn't be modified by other task.
2499 if (sigequalsets(&tsk->blocked, newset))
2502 spin_lock_irq(&tsk->sighand->siglock);
2503 __set_task_blocked(tsk, newset);
2504 spin_unlock_irq(&tsk->sighand->siglock);
2508 * This is also useful for kernel threads that want to temporarily
2509 * (or permanently) block certain signals.
2511 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2512 * interface happily blocks "unblockable" signals like SIGKILL
2515 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2517 struct task_struct *tsk = current;
2520 /* Lockless, only current can change ->blocked, never from irq */
2522 *oldset = tsk->blocked;
2526 sigorsets(&newset, &tsk->blocked, set);
2529 sigandnsets(&newset, &tsk->blocked, set);
2538 __set_current_blocked(&newset);
2543 * sys_rt_sigprocmask - change the list of currently blocked signals
2544 * @how: whether to add, remove, or set signals
2545 * @nset: stores pending signals
2546 * @oset: previous value of signal mask if non-null
2547 * @sigsetsize: size of sigset_t type
2549 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2550 sigset_t __user *, oset, size_t, sigsetsize)
2552 sigset_t old_set, new_set;
2555 /* XXX: Don't preclude handling different sized sigset_t's. */
2556 if (sigsetsize != sizeof(sigset_t))
2559 old_set = current->blocked;
2562 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2564 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2566 error = sigprocmask(how, &new_set, NULL);
2572 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2579 #ifdef CONFIG_COMPAT
2580 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2581 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2584 sigset_t old_set = current->blocked;
2586 /* XXX: Don't preclude handling different sized sigset_t's. */
2587 if (sigsetsize != sizeof(sigset_t))
2591 compat_sigset_t new32;
2594 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2597 sigset_from_compat(&new_set, &new32);
2598 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2600 error = sigprocmask(how, &new_set, NULL);
2605 compat_sigset_t old32;
2606 sigset_to_compat(&old32, &old_set);
2607 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2612 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2613 (sigset_t __user *)oset, sigsetsize);
2618 static int do_sigpending(void *set, unsigned long sigsetsize)
2620 if (sigsetsize > sizeof(sigset_t))
2623 spin_lock_irq(¤t->sighand->siglock);
2624 sigorsets(set, ¤t->pending.signal,
2625 ¤t->signal->shared_pending.signal);
2626 spin_unlock_irq(¤t->sighand->siglock);
2628 /* Outside the lock because only this thread touches it. */
2629 sigandsets(set, ¤t->blocked, set);
2634 * sys_rt_sigpending - examine a pending signal that has been raised
2636 * @uset: stores pending signals
2637 * @sigsetsize: size of sigset_t type or larger
2639 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2642 int err = do_sigpending(&set, sigsetsize);
2643 if (!err && copy_to_user(uset, &set, sigsetsize))
2648 #ifdef CONFIG_COMPAT
2649 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2650 compat_size_t, sigsetsize)
2654 int err = do_sigpending(&set, sigsetsize);
2656 compat_sigset_t set32;
2657 sigset_to_compat(&set32, &set);
2658 /* we can get here only if sigsetsize <= sizeof(set) */
2659 if (copy_to_user(uset, &set32, sigsetsize))
2664 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2669 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2671 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2675 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2677 if (from->si_code < 0)
2678 return __copy_to_user(to, from, sizeof(siginfo_t))
2681 * If you change siginfo_t structure, please be sure
2682 * this code is fixed accordingly.
2683 * Please remember to update the signalfd_copyinfo() function
2684 * inside fs/signalfd.c too, in case siginfo_t changes.
2685 * It should never copy any pad contained in the structure
2686 * to avoid security leaks, but must copy the generic
2687 * 3 ints plus the relevant union member.
2689 err = __put_user(from->si_signo, &to->si_signo);
2690 err |= __put_user(from->si_errno, &to->si_errno);
2691 err |= __put_user((short)from->si_code, &to->si_code);
2692 switch (from->si_code & __SI_MASK) {
2694 err |= __put_user(from->si_pid, &to->si_pid);
2695 err |= __put_user(from->si_uid, &to->si_uid);
2698 err |= __put_user(from->si_tid, &to->si_tid);
2699 err |= __put_user(from->si_overrun, &to->si_overrun);
2700 err |= __put_user(from->si_ptr, &to->si_ptr);
2703 err |= __put_user(from->si_band, &to->si_band);
2704 err |= __put_user(from->si_fd, &to->si_fd);
2707 err |= __put_user(from->si_addr, &to->si_addr);
2708 #ifdef __ARCH_SI_TRAPNO
2709 err |= __put_user(from->si_trapno, &to->si_trapno);
2711 #ifdef BUS_MCEERR_AO
2713 * Other callers might not initialize the si_lsb field,
2714 * so check explicitly for the right codes here.
2716 if (from->si_signo == SIGBUS &&
2717 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2718 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2721 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2722 err |= __put_user(from->si_lower, &to->si_lower);
2723 err |= __put_user(from->si_upper, &to->si_upper);
2727 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2728 err |= __put_user(from->si_pkey, &to->si_pkey);
2732 err |= __put_user(from->si_pid, &to->si_pid);
2733 err |= __put_user(from->si_uid, &to->si_uid);
2734 err |= __put_user(from->si_status, &to->si_status);
2735 err |= __put_user(from->si_utime, &to->si_utime);
2736 err |= __put_user(from->si_stime, &to->si_stime);
2738 case __SI_RT: /* This is not generated by the kernel as of now. */
2739 case __SI_MESGQ: /* But this is */
2740 err |= __put_user(from->si_pid, &to->si_pid);
2741 err |= __put_user(from->si_uid, &to->si_uid);
2742 err |= __put_user(from->si_ptr, &to->si_ptr);
2744 #ifdef __ARCH_SIGSYS
2746 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2747 err |= __put_user(from->si_syscall, &to->si_syscall);
2748 err |= __put_user(from->si_arch, &to->si_arch);
2751 default: /* this is just in case for now ... */
2752 err |= __put_user(from->si_pid, &to->si_pid);
2753 err |= __put_user(from->si_uid, &to->si_uid);
2762 * do_sigtimedwait - wait for queued signals specified in @which
2763 * @which: queued signals to wait for
2764 * @info: if non-null, the signal's siginfo is returned here
2765 * @ts: upper bound on process time suspension
2767 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2768 const struct timespec *ts)
2770 ktime_t *to = NULL, timeout = KTIME_MAX;
2771 struct task_struct *tsk = current;
2772 sigset_t mask = *which;
2776 if (!timespec_valid(ts))
2778 timeout = timespec_to_ktime(*ts);
2783 * Invert the set of allowed signals to get those we want to block.
2785 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2788 spin_lock_irq(&tsk->sighand->siglock);
2789 sig = dequeue_signal(tsk, &mask, info);
2790 if (!sig && timeout) {
2792 * None ready, temporarily unblock those we're interested
2793 * while we are sleeping in so that we'll be awakened when
2794 * they arrive. Unblocking is always fine, we can avoid
2795 * set_current_blocked().
2797 tsk->real_blocked = tsk->blocked;
2798 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2799 recalc_sigpending();
2800 spin_unlock_irq(&tsk->sighand->siglock);
2802 __set_current_state(TASK_INTERRUPTIBLE);
2803 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2805 spin_lock_irq(&tsk->sighand->siglock);
2806 __set_task_blocked(tsk, &tsk->real_blocked);
2807 sigemptyset(&tsk->real_blocked);
2808 sig = dequeue_signal(tsk, &mask, info);
2810 spin_unlock_irq(&tsk->sighand->siglock);
2814 return ret ? -EINTR : -EAGAIN;
2818 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2820 * @uthese: queued signals to wait for
2821 * @uinfo: if non-null, the signal's siginfo is returned here
2822 * @uts: upper bound on process time suspension
2823 * @sigsetsize: size of sigset_t type
2825 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2826 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2834 /* XXX: Don't preclude handling different sized sigset_t's. */
2835 if (sigsetsize != sizeof(sigset_t))
2838 if (copy_from_user(&these, uthese, sizeof(these)))
2842 if (copy_from_user(&ts, uts, sizeof(ts)))
2846 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2848 if (ret > 0 && uinfo) {
2849 if (copy_siginfo_to_user(uinfo, &info))
2857 * sys_kill - send a signal to a process
2858 * @pid: the PID of the process
2859 * @sig: signal to be sent
2861 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2863 struct siginfo info;
2865 info.si_signo = sig;
2867 info.si_code = SI_USER;
2868 info.si_pid = task_tgid_vnr(current);
2869 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2871 return kill_something_info(sig, &info, pid);
2875 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2877 struct task_struct *p;
2881 p = find_task_by_vpid(pid);
2882 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2883 error = check_kill_permission(sig, info, p);
2885 * The null signal is a permissions and process existence
2886 * probe. No signal is actually delivered.
2888 if (!error && sig) {
2889 error = do_send_sig_info(sig, info, p, false);
2891 * If lock_task_sighand() failed we pretend the task
2892 * dies after receiving the signal. The window is tiny,
2893 * and the signal is private anyway.
2895 if (unlikely(error == -ESRCH))
2904 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2906 struct siginfo info = {};
2908 info.si_signo = sig;
2910 info.si_code = SI_TKILL;
2911 info.si_pid = task_tgid_vnr(current);
2912 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2914 return do_send_specific(tgid, pid, sig, &info);
2918 * sys_tgkill - send signal to one specific thread
2919 * @tgid: the thread group ID of the thread
2920 * @pid: the PID of the thread
2921 * @sig: signal to be sent
2923 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2924 * exists but it's not belonging to the target process anymore. This
2925 * method solves the problem of threads exiting and PIDs getting reused.
2927 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2929 /* This is only valid for single tasks */
2930 if (pid <= 0 || tgid <= 0)
2933 return do_tkill(tgid, pid, sig);
2937 * sys_tkill - send signal to one specific task
2938 * @pid: the PID of the task
2939 * @sig: signal to be sent
2941 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2943 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2945 /* This is only valid for single tasks */
2949 return do_tkill(0, pid, sig);
2952 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2954 /* Not even root can pretend to send signals from the kernel.
2955 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2957 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2958 (task_pid_vnr(current) != pid))
2961 info->si_signo = sig;
2963 /* POSIX.1b doesn't mention process groups. */
2964 return kill_proc_info(sig, info, pid);
2968 * sys_rt_sigqueueinfo - send signal information to a signal
2969 * @pid: the PID of the thread
2970 * @sig: signal to be sent
2971 * @uinfo: signal info to be sent
2973 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2974 siginfo_t __user *, uinfo)
2977 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2979 return do_rt_sigqueueinfo(pid, sig, &info);
2982 #ifdef CONFIG_COMPAT
2983 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
2986 struct compat_siginfo __user *, uinfo)
2988 siginfo_t info = {};
2989 int ret = copy_siginfo_from_user32(&info, uinfo);
2992 return do_rt_sigqueueinfo(pid, sig, &info);
2996 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2998 /* This is only valid for single tasks */
2999 if (pid <= 0 || tgid <= 0)
3002 /* Not even root can pretend to send signals from the kernel.
3003 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3005 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3006 (task_pid_vnr(current) != pid))
3009 info->si_signo = sig;
3011 return do_send_specific(tgid, pid, sig, info);
3014 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3015 siginfo_t __user *, uinfo)
3019 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3022 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3025 #ifdef CONFIG_COMPAT
3026 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3030 struct compat_siginfo __user *, uinfo)
3032 siginfo_t info = {};
3034 if (copy_siginfo_from_user32(&info, uinfo))
3036 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3041 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3043 void kernel_sigaction(int sig, __sighandler_t action)
3045 spin_lock_irq(¤t->sighand->siglock);
3046 current->sighand->action[sig - 1].sa.sa_handler = action;
3047 if (action == SIG_IGN) {
3051 sigaddset(&mask, sig);
3053 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3054 flush_sigqueue_mask(&mask, ¤t->pending);
3055 recalc_sigpending();
3057 spin_unlock_irq(¤t->sighand->siglock);
3059 EXPORT_SYMBOL(kernel_sigaction);
3061 void __weak sigaction_compat_abi(struct k_sigaction *act,
3062 struct k_sigaction *oact)
3066 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3068 struct task_struct *p = current, *t;
3069 struct k_sigaction *k;
3072 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3075 k = &p->sighand->action[sig-1];
3077 spin_lock_irq(&p->sighand->siglock);
3081 sigaction_compat_abi(act, oact);
3084 sigdelsetmask(&act->sa.sa_mask,
3085 sigmask(SIGKILL) | sigmask(SIGSTOP));
3089 * "Setting a signal action to SIG_IGN for a signal that is
3090 * pending shall cause the pending signal to be discarded,
3091 * whether or not it is blocked."
3093 * "Setting a signal action to SIG_DFL for a signal that is
3094 * pending and whose default action is to ignore the signal
3095 * (for example, SIGCHLD), shall cause the pending signal to
3096 * be discarded, whether or not it is blocked"
3098 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3100 sigaddset(&mask, sig);
3101 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3102 for_each_thread(p, t)
3103 flush_sigqueue_mask(&mask, &t->pending);
3107 spin_unlock_irq(&p->sighand->siglock);
3112 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3117 oss.ss_sp = (void __user *) current->sas_ss_sp;
3118 oss.ss_size = current->sas_ss_size;
3119 oss.ss_flags = sas_ss_flags(sp) |
3120 (current->sas_ss_flags & SS_FLAG_BITS);
3129 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3131 error = __get_user(ss_sp, &uss->ss_sp) |
3132 __get_user(ss_flags, &uss->ss_flags) |
3133 __get_user(ss_size, &uss->ss_size);
3138 if (on_sig_stack(sp))
3141 ss_mode = ss_flags & ~SS_FLAG_BITS;
3143 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3147 if (ss_mode == SS_DISABLE) {
3152 if (ss_size < MINSIGSTKSZ)
3156 current->sas_ss_sp = (unsigned long) ss_sp;
3157 current->sas_ss_size = ss_size;
3158 current->sas_ss_flags = ss_flags;
3164 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3166 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3167 __put_user(oss.ss_size, &uoss->ss_size) |
3168 __put_user(oss.ss_flags, &uoss->ss_flags);
3174 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3176 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3179 int restore_altstack(const stack_t __user *uss)
3181 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3182 /* squash all but EFAULT for now */
3183 return err == -EFAULT ? err : 0;
3186 int __save_altstack(stack_t __user *uss, unsigned long sp)
3188 struct task_struct *t = current;
3189 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3190 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3191 __put_user(t->sas_ss_size, &uss->ss_size);
3194 if (t->sas_ss_flags & SS_AUTODISARM)
3199 #ifdef CONFIG_COMPAT
3200 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3201 const compat_stack_t __user *, uss_ptr,
3202 compat_stack_t __user *, uoss_ptr)
3209 compat_stack_t uss32;
3211 memset(&uss, 0, sizeof(stack_t));
3212 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3214 uss.ss_sp = compat_ptr(uss32.ss_sp);
3215 uss.ss_flags = uss32.ss_flags;
3216 uss.ss_size = uss32.ss_size;
3220 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3221 (stack_t __force __user *) &uoss,
3222 compat_user_stack_pointer());
3224 if (ret >= 0 && uoss_ptr) {
3225 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3226 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3227 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3228 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3234 int compat_restore_altstack(const compat_stack_t __user *uss)
3236 int err = compat_sys_sigaltstack(uss, NULL);
3237 /* squash all but -EFAULT for now */
3238 return err == -EFAULT ? err : 0;
3241 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3244 struct task_struct *t = current;
3245 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3247 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3248 __put_user(t->sas_ss_size, &uss->ss_size);
3251 if (t->sas_ss_flags & SS_AUTODISARM)
3257 #ifdef __ARCH_WANT_SYS_SIGPENDING
3260 * sys_sigpending - examine pending signals
3261 * @set: where mask of pending signal is returned
3263 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3265 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3270 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3272 * sys_sigprocmask - examine and change blocked signals
3273 * @how: whether to add, remove, or set signals
3274 * @nset: signals to add or remove (if non-null)
3275 * @oset: previous value of signal mask if non-null
3277 * Some platforms have their own version with special arguments;
3278 * others support only sys_rt_sigprocmask.
3281 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3282 old_sigset_t __user *, oset)
3284 old_sigset_t old_set, new_set;
3285 sigset_t new_blocked;
3287 old_set = current->blocked.sig[0];
3290 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3293 new_blocked = current->blocked;
3297 sigaddsetmask(&new_blocked, new_set);
3300 sigdelsetmask(&new_blocked, new_set);
3303 new_blocked.sig[0] = new_set;
3309 set_current_blocked(&new_blocked);
3313 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3319 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3321 #ifndef CONFIG_ODD_RT_SIGACTION
3323 * sys_rt_sigaction - alter an action taken by a process
3324 * @sig: signal to be sent
3325 * @act: new sigaction
3326 * @oact: used to save the previous sigaction
3327 * @sigsetsize: size of sigset_t type
3329 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3330 const struct sigaction __user *, act,
3331 struct sigaction __user *, oact,
3334 struct k_sigaction new_sa, old_sa;
3337 /* XXX: Don't preclude handling different sized sigset_t's. */
3338 if (sigsetsize != sizeof(sigset_t))
3342 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3346 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3349 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3355 #ifdef CONFIG_COMPAT
3356 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3357 const struct compat_sigaction __user *, act,
3358 struct compat_sigaction __user *, oact,
3359 compat_size_t, sigsetsize)
3361 struct k_sigaction new_ka, old_ka;
3362 compat_sigset_t mask;
3363 #ifdef __ARCH_HAS_SA_RESTORER
3364 compat_uptr_t restorer;
3368 /* XXX: Don't preclude handling different sized sigset_t's. */
3369 if (sigsetsize != sizeof(compat_sigset_t))
3373 compat_uptr_t handler;
3374 ret = get_user(handler, &act->sa_handler);
3375 new_ka.sa.sa_handler = compat_ptr(handler);
3376 #ifdef __ARCH_HAS_SA_RESTORER
3377 ret |= get_user(restorer, &act->sa_restorer);
3378 new_ka.sa.sa_restorer = compat_ptr(restorer);
3380 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3381 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3384 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3387 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3389 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3390 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3392 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3393 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3394 #ifdef __ARCH_HAS_SA_RESTORER
3395 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3396 &oact->sa_restorer);
3402 #endif /* !CONFIG_ODD_RT_SIGACTION */
3404 #ifdef CONFIG_OLD_SIGACTION
3405 SYSCALL_DEFINE3(sigaction, int, sig,
3406 const struct old_sigaction __user *, act,
3407 struct old_sigaction __user *, oact)
3409 struct k_sigaction new_ka, old_ka;
3414 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3415 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3416 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3417 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3418 __get_user(mask, &act->sa_mask))
3420 #ifdef __ARCH_HAS_KA_RESTORER
3421 new_ka.ka_restorer = NULL;
3423 siginitset(&new_ka.sa.sa_mask, mask);
3426 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3429 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3430 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3431 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3432 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3433 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3440 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3441 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3442 const struct compat_old_sigaction __user *, act,
3443 struct compat_old_sigaction __user *, oact)
3445 struct k_sigaction new_ka, old_ka;
3447 compat_old_sigset_t mask;
3448 compat_uptr_t handler, restorer;
3451 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3452 __get_user(handler, &act->sa_handler) ||
3453 __get_user(restorer, &act->sa_restorer) ||
3454 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3455 __get_user(mask, &act->sa_mask))
3458 #ifdef __ARCH_HAS_KA_RESTORER
3459 new_ka.ka_restorer = NULL;
3461 new_ka.sa.sa_handler = compat_ptr(handler);
3462 new_ka.sa.sa_restorer = compat_ptr(restorer);
3463 siginitset(&new_ka.sa.sa_mask, mask);
3466 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3469 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3470 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3471 &oact->sa_handler) ||
3472 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3473 &oact->sa_restorer) ||
3474 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3475 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3482 #ifdef CONFIG_SGETMASK_SYSCALL
3485 * For backwards compatibility. Functionality superseded by sigprocmask.
3487 SYSCALL_DEFINE0(sgetmask)
3490 return current->blocked.sig[0];
3493 SYSCALL_DEFINE1(ssetmask, int, newmask)
3495 int old = current->blocked.sig[0];
3498 siginitset(&newset, newmask);
3499 set_current_blocked(&newset);
3503 #endif /* CONFIG_SGETMASK_SYSCALL */
3505 #ifdef __ARCH_WANT_SYS_SIGNAL
3507 * For backwards compatibility. Functionality superseded by sigaction.
3509 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3511 struct k_sigaction new_sa, old_sa;
3514 new_sa.sa.sa_handler = handler;
3515 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3516 sigemptyset(&new_sa.sa.sa_mask);
3518 ret = do_sigaction(sig, &new_sa, &old_sa);
3520 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3522 #endif /* __ARCH_WANT_SYS_SIGNAL */
3524 #ifdef __ARCH_WANT_SYS_PAUSE
3526 SYSCALL_DEFINE0(pause)
3528 while (!signal_pending(current)) {
3529 __set_current_state(TASK_INTERRUPTIBLE);
3532 return -ERESTARTNOHAND;
3537 static int sigsuspend(sigset_t *set)
3539 current->saved_sigmask = current->blocked;
3540 set_current_blocked(set);
3542 while (!signal_pending(current)) {
3543 __set_current_state(TASK_INTERRUPTIBLE);
3546 set_restore_sigmask();
3547 return -ERESTARTNOHAND;
3551 * sys_rt_sigsuspend - replace the signal mask for a value with the
3552 * @unewset value until a signal is received
3553 * @unewset: new signal mask value
3554 * @sigsetsize: size of sigset_t type
3556 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3560 /* XXX: Don't preclude handling different sized sigset_t's. */
3561 if (sigsetsize != sizeof(sigset_t))
3564 if (copy_from_user(&newset, unewset, sizeof(newset)))
3566 return sigsuspend(&newset);
3569 #ifdef CONFIG_COMPAT
3570 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3574 compat_sigset_t newset32;
3576 /* XXX: Don't preclude handling different sized sigset_t's. */
3577 if (sigsetsize != sizeof(sigset_t))
3580 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3582 sigset_from_compat(&newset, &newset32);
3583 return sigsuspend(&newset);
3585 /* on little-endian bitmaps don't care about granularity */
3586 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3591 #ifdef CONFIG_OLD_SIGSUSPEND
3592 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3595 siginitset(&blocked, mask);
3596 return sigsuspend(&blocked);
3599 #ifdef CONFIG_OLD_SIGSUSPEND3
3600 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3603 siginitset(&blocked, mask);
3604 return sigsuspend(&blocked);
3608 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3613 void __init signals_init(void)
3615 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3616 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3617 != offsetof(struct siginfo, _sifields._pad));
3619 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3622 #ifdef CONFIG_KGDB_KDB
3623 #include <linux/kdb.h>
3625 * kdb_send_sig_info - Allows kdb to send signals without exposing
3626 * signal internals. This function checks if the required locks are
3627 * available before calling the main signal code, to avoid kdb
3631 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3633 static struct task_struct *kdb_prev_t;
3635 if (!spin_trylock(&t->sighand->siglock)) {
3636 kdb_printf("Can't do kill command now.\n"
3637 "The sigmask lock is held somewhere else in "
3638 "kernel, try again later\n");
3641 spin_unlock(&t->sighand->siglock);
3642 new_t = kdb_prev_t != t;
3644 if (t->state != TASK_RUNNING && new_t) {
3645 kdb_printf("Process is not RUNNING, sending a signal from "
3646 "kdb risks deadlock\n"
3647 "on the run queue locks. "
3648 "The signal has _not_ been sent.\n"
3649 "Reissue the kill command if you want to risk "
3653 sig = info->si_signo;
3654 if (send_sig_info(sig, info, t))
3655 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3658 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3660 #endif /* CONFIG_KGDB_KDB */