rcu/kvfree: Add debug to check grace periods
authorPaul E. McKenney <paulmck@kernel.org>
Mon, 3 Apr 2023 23:49:14 +0000 (16:49 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Wed, 10 May 2023 00:26:21 +0000 (17:26 -0700)
This commit adds debugging checks to verify that the required RCU
grace period has elapsed for each kvfree_rcu_bulk_data structure that
arrives at the kvfree_rcu_bulk() function.  These checks make use
of that structure's ->gp_snap field, which has been upgraded from an
unsigned long to an rcu_gp_oldstate structure.  This upgrade reduces
the chances of false positives to nearly zero, even on 32-bit systems,
for which this structure carries 64 bits of state.

Cc: Ziwei Dai <ziwei.dai@unisoc.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index f52ff7241041666e0bef539351ad24c0ca326926..91d75fd6c579cee58a32adea8fd77a7e6461556e 100644 (file)
@@ -2756,7 +2756,7 @@ EXPORT_SYMBOL_GPL(call_rcu);
  */
 struct kvfree_rcu_bulk_data {
        struct list_head list;
-       unsigned long gp_snap;
+       struct rcu_gp_oldstate gp_snap;
        unsigned long nr_records;
        void *records[];
 };
@@ -2921,23 +2921,24 @@ kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
        int i;
 
        debug_rcu_bhead_unqueue(bnode);
-
-       rcu_lock_acquire(&rcu_callback_map);
-       if (idx == 0) { // kmalloc() / kfree().
-               trace_rcu_invoke_kfree_bulk_callback(
-                       rcu_state.name, bnode->nr_records,
-                       bnode->records);
-
-               kfree_bulk(bnode->nr_records, bnode->records);
-       } else { // vmalloc() / vfree().
-               for (i = 0; i < bnode->nr_records; i++) {
-                       trace_rcu_invoke_kvfree_callback(
-                               rcu_state.name, bnode->records[i], 0);
-
-                       vfree(bnode->records[i]);
+       if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) {
+               rcu_lock_acquire(&rcu_callback_map);
+               if (idx == 0) { // kmalloc() / kfree().
+                       trace_rcu_invoke_kfree_bulk_callback(
+                               rcu_state.name, bnode->nr_records,
+                               bnode->records);
+
+                       kfree_bulk(bnode->nr_records, bnode->records);
+               } else { // vmalloc() / vfree().
+                       for (i = 0; i < bnode->nr_records; i++) {
+                               trace_rcu_invoke_kvfree_callback(
+                                       rcu_state.name, bnode->records[i], 0);
+
+                               vfree(bnode->records[i]);
+                       }
                }
+               rcu_lock_release(&rcu_callback_map);
        }
-       rcu_lock_release(&rcu_callback_map);
 
        raw_spin_lock_irqsave(&krcp->lock, flags);
        if (put_cached_bnode(krcp, bnode))
@@ -3081,7 +3082,7 @@ kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp)
                INIT_LIST_HEAD(&bulk_ready[i]);
 
                list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) {
-                       if (!poll_state_synchronize_rcu(bnode->gp_snap))
+                       if (!poll_state_synchronize_rcu_full(&bnode->gp_snap))
                                break;
 
                        atomic_sub(bnode->nr_records, &krcp->bulk_count[i]);
@@ -3285,7 +3286,7 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
 
        // Finally insert and update the GP for this page.
        bnode->records[bnode->nr_records++] = ptr;
-       bnode->gp_snap = get_state_synchronize_rcu();
+       get_state_synchronize_rcu_full(&bnode->gp_snap);
        atomic_inc(&(*krcp)->bulk_count[idx]);
 
        return true;