drm/edid/firmware: Add built-in edid/1280x720.bin firmware
[platform/kernel/linux-starfive.git] / kernel / rcu / tasks.h
index f5bf6fb..df96832 100644 (file)
@@ -384,6 +384,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
 {
        int cpu;
        unsigned long flags;
+       bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
        long n;
        long ncbs = 0;
        long ncbsnz = 0;
@@ -425,21 +426,23 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
                        WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
                        smp_store_release(&rtp->percpu_enqueue_lim, 1);
                        rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
+                       gpdone = false;
                        pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
                }
                raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
        }
-       if (rcu_task_cb_adjust && !ncbsnz &&
-           poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
+       if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
                raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
                if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
                        WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
                        pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
                }
-               for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
-                       struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
+               if (rtp->percpu_dequeue_lim == 1) {
+                       for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
+                               struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
 
-                       WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
+                               WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
+                       }
                }
                raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
        }
@@ -452,6 +455,7 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
 {
        int cpu;
        int cpunext;
+       int cpuwq;
        unsigned long flags;
        int len;
        struct rcu_head *rhp;
@@ -462,11 +466,13 @@ static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu
        cpunext = cpu * 2 + 1;
        if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
                rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
-               queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
+               cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
+               queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
                cpunext++;
                if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
                        rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
-                       queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
+                       cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
+                       queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
                }
        }
 
@@ -560,8 +566,9 @@ static int __noreturn rcu_tasks_kthread(void *arg)
 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
 {
        /* Complain if the scheduler has not started.  */
-       WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
-                        "synchronize_rcu_tasks called too soon");
+       if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
+                        "synchronize_%s() called too soon", rtp->name))
+               return;
 
        // If the grace-period kthread is running, use it.
        if (READ_ONCE(rtp->kthread_ptr)) {
@@ -827,11 +834,21 @@ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
 static void rcu_tasks_postscan(struct list_head *hop)
 {
        /*
-        * Wait for tasks that are in the process of exiting.  This
-        * does only part of the job, ensuring that all tasks that were
-        * previously exiting reach the point where they have disabled
-        * preemption, allowing the later synchronize_rcu() to finish
-        * the job.
+        * Exiting tasks may escape the tasklist scan. Those are vulnerable
+        * until their final schedule() with TASK_DEAD state. To cope with
+        * this, divide the fragile exit path part in two intersecting
+        * read side critical sections:
+        *
+        * 1) An _SRCU_ read side starting before calling exit_notify(),
+        *    which may remove the task from the tasklist, and ending after
+        *    the final preempt_disable() call in do_exit().
+        *
+        * 2) An _RCU_ read side starting with the final preempt_disable()
+        *    call in do_exit() and ending with the final call to schedule()
+        *    with TASK_DEAD state.
+        *
+        * This handles the part 1). And postgp will handle part 2) with a
+        * call to synchronize_rcu().
         */
        synchronize_srcu(&tasks_rcu_exit_srcu);
 }
@@ -898,7 +915,10 @@ static void rcu_tasks_postgp(struct rcu_tasks *rtp)
         *
         * In addition, this synchronize_rcu() waits for exiting tasks
         * to complete their final preempt_disable() region of execution,
-        * cleaning up after the synchronize_srcu() above.
+        * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
+        * enforcing the whole region before tasklist removal until
+        * the final schedule() with TASK_DEAD state to be an RCU TASKS
+        * read side critical section.
         */
        synchronize_rcu();
 }
@@ -988,27 +1008,42 @@ void show_rcu_tasks_classic_gp_kthread(void)
 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
 #endif // !defined(CONFIG_TINY_RCU)
 
-/* Do the srcu_read_lock() for the above synchronize_srcu().  */
+/*
+ * Contribute to protect against tasklist scan blind spot while the
+ * task is exiting and may be removed from the tasklist. See
+ * corresponding synchronize_srcu() for further details.
+ */
 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
 {
-       preempt_disable();
        current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
-       preempt_enable();
 }
 
-/* Do the srcu_read_unlock() for the above synchronize_srcu().  */
-void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
+/*
+ * Contribute to protect against tasklist scan blind spot while the
+ * task is exiting and may be removed from the tasklist. See
+ * corresponding synchronize_srcu() for further details.
+ */
+void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
 {
        struct task_struct *t = current;
 
-       preempt_disable();
        __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
-       preempt_enable();
-       exit_tasks_rcu_finish_trace(t);
+}
+
+/*
+ * Contribute to protect against tasklist scan blind spot while the
+ * task is exiting and may be removed from the tasklist. See
+ * corresponding synchronize_srcu() for further details.
+ */
+void exit_tasks_rcu_finish(void)
+{
+       exit_tasks_rcu_stop();
+       exit_tasks_rcu_finish_trace(current);
 }
 
 #else /* #ifdef CONFIG_TASKS_RCU */
 void exit_tasks_rcu_start(void) { }
+void exit_tasks_rcu_stop(void) { }
 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
@@ -1036,9 +1071,6 @@ static void rcu_tasks_be_rude(struct work_struct *work)
 // Wait for one rude RCU-tasks grace period.
 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
 {
-       if (num_online_cpus() <= 1)
-               return; // Fastpath for only one CPU.
-
        rtp->n_ipis += cpumask_weight(cpu_online_mask);
        schedule_on_each_cpu(rcu_tasks_be_rude);
 }