bpf: Add bpf_selem_free()
authorMartin KaFai Lau <martin.lau@kernel.org>
Wed, 8 Mar 2023 06:59:28 +0000 (22:59 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 10 Mar 2023 19:05:28 +0000 (11:05 -0800)
This patch refactors the selem freeing logic into bpf_selem_free().
It is a preparation work for a later patch using
bpf_mem_cache_alloc/free. The other kfree(selem) cases
are also changed to bpf_selem_free(..., reuse_now = true).

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20230308065936.1550103-10-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_local_storage.h
kernel/bpf/bpf_local_storage.c
net/core/bpf_sk_storage.c

index 18a31ad..a34f614 100644 (file)
@@ -152,6 +152,10 @@ struct bpf_local_storage_elem *
 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
                bool charge_mem, gfp_t gfp_flags);
 
+void bpf_selem_free(struct bpf_local_storage_elem *selem,
+                   struct bpf_local_storage_map *smap,
+                   bool reuse_now);
+
 int
 bpf_local_storage_alloc(void *owner,
                        struct bpf_local_storage_map *smap,
index 146e9ca..512943a 100644 (file)
@@ -125,6 +125,17 @@ static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
                call_rcu(rcu, bpf_selem_free_rcu);
 }
 
+void bpf_selem_free(struct bpf_local_storage_elem *selem,
+                   struct bpf_local_storage_map *smap,
+                   bool reuse_now)
+{
+       bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
+       if (!reuse_now)
+               call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
+       else
+               call_rcu(&selem->rcu, bpf_selem_free_rcu);
+}
+
 /* local_storage->lock must be held and selem->local_storage == local_storage.
  * The caller must ensure selem->smap is still valid to be
  * dereferenced for its smap->elem_size and smap->cache_idx.
@@ -175,11 +186,7 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
            SDATA(selem))
                RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
 
-       bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
-       if (!reuse_now)
-               call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
-       else
-               call_rcu(&selem->rcu, bpf_selem_free_rcu);
+       bpf_selem_free(selem, smap, reuse_now);
 
        if (rcu_access_pointer(local_storage->smap) == smap)
                RCU_INIT_POINTER(local_storage->smap, NULL);
@@ -423,7 +430,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
 
                err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
                if (err) {
-                       kfree(selem);
+                       bpf_selem_free(selem, smap, true);
                        mem_uncharge(smap, owner, smap->elem_size);
                        return ERR_PTR(err);
                }
@@ -517,7 +524,7 @@ unlock_err:
        raw_spin_unlock_irqrestore(&local_storage->lock, flags);
        if (selem) {
                mem_uncharge(smap, owner, smap->elem_size);
-               kfree(selem);
+               bpf_selem_free(selem, smap, true);
        }
        return ERR_PTR(err);
 }
index a5f185b..24c3dc0 100644 (file)
@@ -197,7 +197,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
                } else {
                        ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
                        if (ret) {
-                               kfree(copy_selem);
+                               bpf_selem_free(copy_selem, smap, true);
                                atomic_sub(smap->elem_size,
                                           &newsk->sk_omem_alloc);
                                bpf_map_put(map);