bpf: Remove bpf_selem_free_fields*_rcu
[platform/kernel/linux-starfive.git] / kernel / bpf / bpf_local_storage.c
index 5585dbf..715deaa 100644 (file)
@@ -109,27 +109,6 @@ static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
                kfree_rcu(local_storage, rcu);
 }
 
-static void bpf_selem_free_fields_rcu(struct rcu_head *rcu)
-{
-       struct bpf_local_storage_elem *selem;
-       struct bpf_local_storage_map *smap;
-
-       selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
-       /* protected by the rcu_barrier*() */
-       smap = rcu_dereference_protected(SDATA(selem)->smap, true);
-       bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
-       kfree(selem);
-}
-
-static void bpf_selem_free_fields_trace_rcu(struct rcu_head *rcu)
-{
-       /* Free directly if Tasks Trace RCU GP also implies RCU GP */
-       if (rcu_trace_implies_rcu_gp())
-               bpf_selem_free_fields_rcu(rcu);
-       else
-               call_rcu(rcu, bpf_selem_free_fields_rcu);
-}
-
 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
 {
        struct bpf_local_storage_elem *selem;
@@ -147,11 +126,10 @@ static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
  */
 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
                                            struct bpf_local_storage_elem *selem,
-                                           bool uncharge_mem, bool use_trace_rcu)
+                                           bool uncharge_mem, bool reuse_now)
 {
        struct bpf_local_storage_map *smap;
        bool free_local_storage;
-       struct btf_record *rec;
        void *owner;
 
        smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
@@ -192,26 +170,11 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
            SDATA(selem))
                RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
 
-       /* A different RCU callback is chosen whenever we need to free
-        * additional fields in selem data before freeing selem.
-        * bpf_local_storage_map_free only executes rcu_barrier to wait for RCU
-        * callbacks when it has special fields, hence we can only conditionally
-        * dereference smap, as by this time the map might have already been
-        * freed without waiting for our call_rcu callback if it did not have
-        * any special fields.
-        */
-       rec = smap->map.record;
-       if (use_trace_rcu) {
-               if (!IS_ERR_OR_NULL(rec))
-                       call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_fields_trace_rcu);
-               else
-                       call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
-       } else {
-               if (!IS_ERR_OR_NULL(rec))
-                       call_rcu(&selem->rcu, bpf_selem_free_fields_rcu);
-               else
-                       kfree_rcu(selem, rcu);
-       }
+       bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
+       if (!reuse_now)
+               call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
+       else
+               kfree_rcu(selem, rcu);
 
        if (rcu_access_pointer(local_storage->smap) == smap)
                RCU_INIT_POINTER(local_storage->smap, NULL);
@@ -220,7 +183,7 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
 }
 
 static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
-                                    bool use_trace_rcu)
+                                    bool reuse_now)
 {
        struct bpf_local_storage *local_storage;
        bool free_local_storage = false;
@@ -235,11 +198,11 @@ static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
        raw_spin_lock_irqsave(&local_storage->lock, flags);
        if (likely(selem_linked_to_storage(selem)))
                free_local_storage = bpf_selem_unlink_storage_nolock(
-                       local_storage, selem, true, use_trace_rcu);
+                       local_storage, selem, true, reuse_now);
        raw_spin_unlock_irqrestore(&local_storage->lock, flags);
 
        if (free_local_storage) {
-               if (use_trace_rcu)
+               if (!reuse_now)
                        call_rcu_tasks_trace(&local_storage->rcu,
                                     bpf_local_storage_free_rcu);
                else
@@ -284,14 +247,14 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
        raw_spin_unlock_irqrestore(&b->lock, flags);
 }
 
-void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
 {
        /* Always unlink from map before unlinking from local_storage
         * because selem will be freed after successfully unlinked from
         * the local_storage.
         */
        bpf_selem_unlink_map(selem);
-       bpf_selem_unlink_storage(selem, use_trace_rcu);
+       bpf_selem_unlink_storage(selem, reuse_now);
 }
 
 /* If cacheit_lockit is false, this lookup function is lockless */
@@ -538,7 +501,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
        if (old_sdata) {
                bpf_selem_unlink_map(SELEM(old_sdata));
                bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
-                                               false, true);
+                                               false, false);
        }
 
 unlock:
@@ -651,7 +614,7 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
                 * of the loop will set the free_cgroup_storage to true.
                 */
                free_storage = bpf_selem_unlink_storage_nolock(
-                       local_storage, selem, false, false);
+                       local_storage, selem, false, true);
        }
        raw_spin_unlock_irqrestore(&local_storage->lock, flags);
 
@@ -745,7 +708,7 @@ void bpf_local_storage_map_free(struct bpf_map *map,
                                migrate_disable();
                                this_cpu_inc(*busy_counter);
                        }
-                       bpf_selem_unlink(selem, false);
+                       bpf_selem_unlink(selem, true);
                        if (busy_counter) {
                                this_cpu_dec(*busy_counter);
                                migrate_enable();
@@ -769,26 +732,6 @@ void bpf_local_storage_map_free(struct bpf_map *map,
         */
        synchronize_rcu();
 
-       /* Only delay freeing of smap, buckets are not needed anymore */
        kvfree(smap->buckets);
-
-       /* When local storage has special fields, callbacks for
-        * bpf_selem_free_fields_rcu and bpf_selem_free_fields_trace_rcu will
-        * keep using the map BTF record, we need to execute an RCU barrier to
-        * wait for them as the record will be freed right after our map_free
-        * callback.
-        */
-       if (!IS_ERR_OR_NULL(smap->map.record)) {
-               rcu_barrier_tasks_trace();
-               /* We cannot skip rcu_barrier() when rcu_trace_implies_rcu_gp()
-                * is true, because while call_rcu invocation is skipped in that
-                * case in bpf_selem_free_fields_trace_rcu (and all local
-                * storage maps pass use_trace_rcu = true), there can be
-                * call_rcu callbacks based on use_trace_rcu = false in the
-                * while ((selem = ...)) loop above or when owner's free path
-                * calls bpf_local_storage_unlink_nolock.
-                */
-               rcu_barrier();
-       }
        bpf_map_area_free(smap);
 }