rcu-tasks: Add data structures for lightweight grace periods
authorPaul E. McKenney <paulmck@kernel.org>
Tue, 17 May 2022 00:56:16 +0000 (17:56 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 20 Jun 2022 16:22:28 +0000 (09:22 -0700)
This commit adds fields to task_struct and to rcu_tasks_percpu that will
be used to avoid the task-list scan for RCU Tasks Trace grace periods,
and also initializes these fields.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: KP Singh <kpsingh@kernel.org>
include/linux/sched.h
init/init_task.c
kernel/fork.c
kernel/rcu/tasks.h

index e6eb5871593e9fad6d9552a252b3202e0bf83e79..b88caf54e16868c749646679946dbf9e90b280cc 100644 (file)
@@ -844,6 +844,8 @@ struct task_struct {
        int                             trc_ipi_to_cpu;
        union rcu_special               trc_reader_special;
        struct list_head                trc_holdout_list;
+       struct list_head                trc_blkd_node;
+       int                             trc_blkd_cpu;
 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 
        struct sched_info               sched_info;
index 73cc8f03511a3e66720393c42171337f5e64ec14..ff6c4b9bfe6b1c01c0d99d605d5f0c22693470d3 100644 (file)
@@ -157,6 +157,7 @@ struct task_struct init_task
        .trc_reader_nesting = 0,
        .trc_reader_special.s = 0,
        .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
+       .trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node),
 #endif
 #ifdef CONFIG_CPUSETS
        .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
index 9d44f2d46c6964d5cf7e29e06ad377b03fe25dc2..1950eb87024416de2dd76c2187d28746d4b666a5 100644 (file)
@@ -1814,6 +1814,7 @@ static inline void rcu_copy_process(struct task_struct *p)
        p->trc_reader_nesting = 0;
        p->trc_reader_special.s = 0;
        INIT_LIST_HEAD(&p->trc_holdout_list);
+       INIT_LIST_HEAD(&p->trc_blkd_node);
 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 }
 
index 64eb4d7b142e38011c36e657f7edb955ea279fc4..fd4508af055e6b11e4c76c7a42e86a210e3bb10b 100644 (file)
@@ -29,6 +29,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
  * @rtp_work: Work queue for invoking callbacks.
  * @rtp_irq_work: IRQ work queue for deferred wakeups.
  * @barrier_q_head: RCU callback for barrier operation.
+ * @rtp_blkd_tasks: List of tasks blocked as readers.
  * @cpu: CPU number corresponding to this entry.
  * @rtpp: Pointer to the rcu_tasks structure.
  */
@@ -40,6 +41,7 @@ struct rcu_tasks_percpu {
        struct work_struct rtp_work;
        struct irq_work rtp_irq_work;
        struct rcu_head barrier_q_head;
+       struct list_head rtp_blkd_tasks;
        int cpu;
        struct rcu_tasks *rtpp;
 };
@@ -256,6 +258,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
                INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
                rtpcp->cpu = cpu;
                rtpcp->rtpp = rtp;
+               if (!rtpcp->rtp_blkd_tasks.next)
+                       INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
                raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
        }
        raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);