bpf: Re-enable unit_size checking for global per-cpu allocator
authorHou Tao <houtao1@huawei.com>
Fri, 20 Oct 2023 13:31:58 +0000 (21:31 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 25 Jan 2024 23:35:28 +0000 (15:35 -0800)
[ Upstream commit baa8fdecd87bb8751237b45e3bcb5a179e5a12ca ]

With pcpu_alloc_size() in place, check whether or not the size of
the dynamic per-cpu area is matched with unit_size.

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20231020133202.4043247-4-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Stable-dep-of: 7ac5c53e0073 ("bpf: Use c->unit_size to select target cache during free")
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/memalloc.c

index 956f80e..9657d59 100644 (file)
@@ -491,21 +491,17 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
        struct llist_node *first;
        unsigned int obj_size;
 
-       /* For per-cpu allocator, the size of free objects in free list doesn't
-        * match with unit_size and now there is no way to get the size of
-        * per-cpu pointer saved in free object, so just skip the checking.
-        */
-       if (c->percpu_size)
-               return 0;
-
        first = c->free_llist.first;
        if (!first)
                return 0;
 
-       obj_size = ksize(first);
+       if (c->percpu_size)
+               obj_size = pcpu_alloc_size(((void **)first)[1]);
+       else
+               obj_size = ksize(first);
        if (obj_size != c->unit_size) {
-               WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
-                         idx, obj_size, c->unit_size);
+               WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
+                         idx, c->percpu_size, obj_size, c->unit_size);
                return -EINVAL;
        }
        return 0;
@@ -967,6 +963,12 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
        return !ret ? NULL : ret + LLIST_NODE_SZ;
 }
 
+/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
+ * actual size of dynamic per-cpu area will always be matched and there is
+ * no need to adjust size_index for per-cpu allocation. However for the
+ * simplicity of the implementation, use an unified size_index for both
+ * kmalloc and per-cpu allocation.
+ */
 static __init int bpf_mem_cache_adjust_size(void)
 {
        unsigned int size;