media: dw9807-vcm: Add regulator support to the driver
[platform/kernel/linux-rpi.git] / kernel / signal.c
index 952741f..0bbd89f 100644 (file)
@@ -426,22 +426,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
         */
        rcu_read_lock();
        ucounts = task_ucounts(t);
-       sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
-       switch (sigpending) {
-       case 1:
-               if (likely(get_ucounts(ucounts)))
-                       break;
-               fallthrough;
-       case LONG_MAX:
-               /*
-                * we need to decrease the ucount in the userns tree on any
-                * failure to avoid counts leaking.
-                */
-               dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
-               rcu_read_unlock();
-               return NULL;
-       }
+       sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
        rcu_read_unlock();
+       if (!sigpending)
+               return NULL;
 
        if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
                q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
@@ -450,8 +438,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
        }
 
        if (unlikely(q == NULL)) {
-               if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
-                       put_ucounts(ucounts);
+               dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
        } else {
                INIT_LIST_HEAD(&q->list);
                q->flags = sigqueue_flags;
@@ -464,8 +451,8 @@ static void __sigqueue_free(struct sigqueue *q)
 {
        if (q->flags & SIGQUEUE_PREALLOC)
                return;
-       if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
-               put_ucounts(q->ucounts);
+       if (q->ucounts) {
+               dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
                q->ucounts = NULL;
        }
        kmem_cache_free(sigqueue_cachep, q);
@@ -1311,6 +1298,12 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
        return ret;
 }
 
+enum sig_handler {
+       HANDLER_CURRENT, /* If reachable use the current handler */
+       HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
+       HANDLER_EXIT,    /* Only visible as the process exit code */
+};
+
 /*
  * Force a signal that the process can't ignore: if necessary
  * we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1323,19 +1316,50 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
  * that is why we also clear SIGNAL_UNKILLABLE.
  */
 static int
-force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
+force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
+       enum sig_handler handler)
 {
        unsigned long int flags;
        int ret, blocked, ignored;
        struct k_sigaction *action;
        int sig = info->si_signo;
 
+       /*
+        * On some archs, PREEMPT_RT has to delay sending a signal from a trap
+        * since it can not enable preemption, and the signal code's spin_locks
+        * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
+        * send the signal on exit of the trap.
+        */
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+       if (in_atomic()) {
+               struct task_struct *t = current;
+
+               if (WARN_ON_ONCE(t->forced_info.si_signo))
+                       return 0;
+
+               if (is_si_special(info)) {
+                       WARN_ON_ONCE(info != SEND_SIG_PRIV);
+                       t->forced_info.si_signo = info->si_signo;
+                       t->forced_info.si_errno = 0;
+                       t->forced_info.si_code = SI_KERNEL;
+                       t->forced_info.si_pid = 0;
+                       t->forced_info.si_uid = 0;
+               } else {
+                       t->forced_info = *info;
+               }
+
+               set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+               return 0;
+       }
+#endif
        spin_lock_irqsave(&t->sighand->siglock, flags);
        action = &t->sighand->action[sig-1];
        ignored = action->sa.sa_handler == SIG_IGN;
        blocked = sigismember(&t->blocked, sig);
-       if (blocked || ignored || sigdfl) {
+       if (blocked || ignored || (handler != HANDLER_CURRENT)) {
                action->sa.sa_handler = SIG_DFL;
+               if (handler == HANDLER_EXIT)
+                       action->sa.sa_flags |= SA_IMMUTABLE;
                if (blocked) {
                        sigdelset(&t->blocked, sig);
                        recalc_sigpending_and_wake(t);
@@ -1343,9 +1367,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
        }
        /*
         * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
-        * debugging to leave init killable.
+        * debugging to leave init killable. But HANDLER_EXIT is always fatal.
         */
-       if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
+       if (action->sa.sa_handler == SIG_DFL &&
+           (!t->ptrace || (handler == HANDLER_EXIT)))
                t->signal->flags &= ~SIGNAL_UNKILLABLE;
        ret = send_signal(sig, info, t, PIDTYPE_PID);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
@@ -1355,7 +1380,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
 
 int force_sig_info(struct kernel_siginfo *info)
 {
-       return force_sig_info_to_task(info, current, false);
+       return force_sig_info_to_task(info, current, HANDLER_CURRENT);
 }
 
 /*
@@ -1662,6 +1687,32 @@ void force_sig(int sig)
 }
 EXPORT_SYMBOL(force_sig);
 
+void force_fatal_sig(int sig)
+{
+       struct kernel_siginfo info;
+
+       clear_siginfo(&info);
+       info.si_signo = sig;
+       info.si_errno = 0;
+       info.si_code = SI_KERNEL;
+       info.si_pid = 0;
+       info.si_uid = 0;
+       force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
+}
+
+void force_exit_sig(int sig)
+{
+       struct kernel_siginfo info;
+
+       clear_siginfo(&info);
+       info.si_signo = sig;
+       info.si_errno = 0;
+       info.si_code = SI_KERNEL;
+       info.si_pid = 0;
+       info.si_uid = 0;
+       force_sig_info_to_task(&info, current, HANDLER_EXIT);
+}
+
 /*
  * When things go south during signal handling, we
  * will force a SIGSEGV. And if the signal that caused
@@ -1670,15 +1721,10 @@ EXPORT_SYMBOL(force_sig);
  */
 void force_sigsegv(int sig)
 {
-       struct task_struct *p = current;
-
-       if (sig == SIGSEGV) {
-               unsigned long flags;
-               spin_lock_irqsave(&p->sighand->siglock, flags);
-               p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
-               spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       }
-       force_sig(SIGSEGV);
+       if (sig == SIGSEGV)
+               force_fatal_sig(SIGSEGV);
+       else
+               force_sig(SIGSEGV);
 }
 
 int force_sig_fault_to_task(int sig, int code, void __user *addr
@@ -1697,7 +1743,7 @@ int force_sig_fault_to_task(int sig, int code, void __user *addr
        info.si_flags = flags;
        info.si_isr = isr;
 #endif
-       return force_sig_info_to_task(&info, t, false);
+       return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
 }
 
 int force_sig_fault(int sig, int code, void __user *addr
@@ -1784,7 +1830,7 @@ int force_sig_pkuerr(void __user *addr, u32 pkey)
 }
 #endif
 
-int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
+int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
 {
        struct kernel_siginfo info;
 
@@ -1796,7 +1842,18 @@ int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
        info.si_perf_data = sig_data;
        info.si_perf_type = type;
 
-       return force_sig_info(&info);
+       /*
+        * Signals generated by perf events should not terminate the whole
+        * process if SIGTRAP is blocked, however, delivering the signal
+        * asynchronously is better than not delivering at all. But tell user
+        * space if the signal was asynchronous, so it can clearly be
+        * distinguished from normal synchronous ones.
+        */
+       info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
+                                    TRAP_PERF_FLAG_ASYNC :
+                                    0;
+
+       return send_sig_info(info.si_signo, &info, current);
 }
 
 /**
@@ -1817,7 +1874,8 @@ int force_sig_seccomp(int syscall, int reason, bool force_coredump)
        info.si_errno = reason;
        info.si_arch = syscall_get_arch(current);
        info.si_syscall = syscall;
-       return force_sig_info_to_task(&info, current, force_coredump);
+       return force_sig_info_to_task(&info, current,
+               force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
 }
 
 /* For the crazy architectures that include trap information in
@@ -1997,12 +2055,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
        bool autoreap = false;
        u64 utime, stime;
 
-       BUG_ON(sig == -1);
+       WARN_ON_ONCE(sig == -1);
 
-       /* do_notify_parent_cldstop should have been called instead.  */
-       BUG_ON(task_is_stopped_or_traced(tsk));
+       /* do_notify_parent_cldstop should have been called instead.  */
+       WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
 
-       BUG_ON(!tsk->ptrace &&
+       WARN_ON_ONCE(!tsk->ptrace &&
               (tsk->group_leader != tsk || !thread_group_empty(tsk)));
 
        /* Wake up all pidfd waiters */
@@ -2182,15 +2240,6 @@ static inline bool may_ptrace_stop(void)
        return true;
 }
 
-/*
- * Return non-zero if there is a SIGKILL that should be waking us up.
- * Called with the siglock held.
- */
-static bool sigkill_pending(struct task_struct *tsk)
-{
-       return sigismember(&tsk->pending.signal, SIGKILL) ||
-              sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
-}
 
 /*
  * This must be called with current->sighand->siglock held.
@@ -2217,17 +2266,16 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
                 * calling arch_ptrace_stop, so we must release it now.
                 * To preserve proper semantics, we must do this before
                 * any signal bookkeeping like checking group_stop_count.
-                * Meanwhile, a SIGKILL could come in before we retake the
-                * siglock.  That must prevent us from sleeping in TASK_TRACED.
-                * So after regaining the lock, we must check for SIGKILL.
                 */
                spin_unlock_irq(&current->sighand->siglock);
                arch_ptrace_stop(exit_code, info);
                spin_lock_irq(&current->sighand->siglock);
-               if (sigkill_pending(current))
-                       return;
        }
 
+       /*
+        * schedule() will not sleep if there is a pending signal that
+        * can awaken the task.
+        */
        set_special_state(TASK_TRACED);
 
        /*
@@ -2288,16 +2336,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
                if (gstop_done && ptrace_reparented(current))
                        do_notify_parent_cldstop(current, false, why);
 
-               /*
-                * Don't want to allow preemption here, because
-                * sys_ptrace() needs this task to be inactive.
-                *
-                * XXX: implement read_unlock_no_resched().
-                */
-               preempt_disable();
                read_unlock(&tasklist_lock);
                cgroup_enter_frozen();
-               preempt_enable_no_resched();
                freezable_schedule();
                cgroup_leave_frozen(true);
        } else {
@@ -2701,19 +2741,19 @@ relock:
                goto relock;
        }
 
-       /* Has this task already been marked for death? */
-       if (signal_group_exit(signal)) {
-               ksig->info.si_signo = signr = SIGKILL;
-               sigdelset(&current->pending.signal, SIGKILL);
-               trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
-                               &sighand->action[SIGKILL - 1]);
-               recalc_sigpending();
-               goto fatal;
-       }
-
        for (;;) {
                struct k_sigaction *ka;
 
+               /* Has this task already been marked for death? */
+               if (signal_group_exit(signal)) {
+                       ksig->info.si_signo = signr = SIGKILL;
+                       sigdelset(&current->pending.signal, SIGKILL);
+                       trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
+                               &sighand->action[SIGKILL - 1]);
+                       recalc_sigpending();
+                       goto fatal;
+               }
+
                if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
                    do_signal_stop(0))
                        goto relock;
@@ -2752,7 +2792,8 @@ relock:
                if (!signr)
                        break; /* will return 0 */
 
-               if (unlikely(current->ptrace) && signr != SIGKILL) {
+               if (unlikely(current->ptrace) && (signr != SIGKILL) &&
+                   !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
                        signr = ptrace_signal(signr, &ksig->info);
                        if (!signr)
                                continue;
@@ -3435,6 +3476,7 @@ void copy_siginfo_to_external32(struct compat_siginfo *to,
                to->si_addr = ptr_to_compat(from->si_addr);
                to->si_perf_data = from->si_perf_data;
                to->si_perf_type = from->si_perf_type;
+               to->si_perf_flags = from->si_perf_flags;
                break;
        case SIL_CHLD:
                to->si_pid = from->si_pid;
@@ -3512,6 +3554,7 @@ static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
                to->si_addr = compat_ptr(from->si_addr);
                to->si_perf_data = from->si_perf_data;
                to->si_perf_type = from->si_perf_type;
+               to->si_perf_flags = from->si_perf_flags;
                break;
        case SIL_CHLD:
                to->si_pid    = from->si_pid;
@@ -4102,6 +4145,10 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
        k = &p->sighand->action[sig-1];
 
        spin_lock_irq(&p->sighand->siglock);
+       if (k->sa.sa_flags & SA_IMMUTABLE) {
+               spin_unlock_irq(&p->sighand->siglock);
+               return -EINVAL;
+       }
        if (oact)
                *oact = *k;
 
@@ -4688,6 +4735,7 @@ static inline void siginfo_buildtime_checks(void)
        CHECK_OFFSET(si_pkey);
        CHECK_OFFSET(si_perf_data);
        CHECK_OFFSET(si_perf_type);
+       CHECK_OFFSET(si_perf_flags);
 
        /* sigpoll */
        CHECK_OFFSET(si_band);