bpf: minimize number of allocated lsm slots per program
authorStanislav Fomichev <sdf@google.com>
Tue, 28 Jun 2022 17:43:07 +0000 (10:43 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 29 Jun 2022 20:21:52 +0000 (13:21 -0700)
Previous patch adds 1:1 mapping between all 211 LSM hooks
and bpf_cgroup program array. Instead of reserving a slot per
possible hook, reserve 10 slots per cgroup for lsm programs.
Those slots are dynamically allocated on demand and reclaimed.

struct cgroup_bpf {
struct bpf_prog_array *    effective[33];        /*     0   264 */
/* --- cacheline 4 boundary (256 bytes) was 8 bytes ago --- */
struct hlist_head          progs[33];            /*   264   264 */
/* --- cacheline 8 boundary (512 bytes) was 16 bytes ago --- */
u8                         flags[33];            /*   528    33 */

/* XXX 7 bytes hole, try to pack */

struct list_head           storages;             /*   568    16 */
/* --- cacheline 9 boundary (576 bytes) was 8 bytes ago --- */
struct bpf_prog_array *    inactive;             /*   584     8 */
struct percpu_ref          refcnt;               /*   592    16 */
struct work_struct         release_work;         /*   608    72 */

/* size: 680, cachelines: 11, members: 7 */
/* sum members: 673, holes: 1, sum holes: 7 */
/* last cacheline: 40 bytes */
};

Reviewed-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/r/20220628174314.1216643-5-sdf@google.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf-cgroup-defs.h
include/linux/bpf.h
include/linux/bpf_lsm.h
kernel/bpf/bpf_lsm.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/trampoline.c

index b99f8c3..7b121bd 100644 (file)
@@ -11,7 +11,8 @@
 struct bpf_prog_array;
 
 #ifdef CONFIG_BPF_LSM
-#define CGROUP_LSM_NUM 211 /* will be addressed in the next patch */
+/* Maximum number of concurrently attachable per-cgroup LSM hooks. */
+#define CGROUP_LSM_NUM 10
 #else
 #define CGROUP_LSM_NUM 0
 #endif
index 77cd613..5d2afa5 100644 (file)
@@ -2508,7 +2508,6 @@ int bpf_arch_text_invalidate(void *dst, size_t len);
 
 struct btf_id_set;
 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
-int btf_id_set_index(const struct btf_id_set *set, u32 id);
 
 #define MAX_BPRINTF_VARARGS            12
 
@@ -2545,4 +2544,12 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
 int bpf_dynptr_check_size(u32 size);
 
+#ifdef CONFIG_BPF_LSM
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
+void bpf_cgroup_atype_put(int cgroup_atype);
+#else
+static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
+static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
+#endif /* CONFIG_BPF_LSM */
+
 #endif /* _LINUX_BPF_H */
index 61787a5..4bcf76a 100644 (file)
@@ -42,7 +42,6 @@ extern const struct bpf_func_proto bpf_inode_storage_get_proto;
 extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
 void bpf_inode_storage_free(struct inode *inode);
 
-int bpf_lsm_hook_idx(u32 btf_id);
 void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
 
 #else /* !CONFIG_BPF_LSM */
@@ -73,11 +72,6 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
 {
 }
 
-static inline int bpf_lsm_hook_idx(u32 btf_id)
-{
-       return -EINVAL;
-}
-
 #endif /* CONFIG_BPF_LSM */
 
 #endif /* _LINUX_BPF_LSM_H */
index 0f72020..83aa431 100644 (file)
@@ -69,11 +69,6 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
                *bpf_func = __cgroup_bpf_run_lsm_current;
 }
 
-int bpf_lsm_hook_idx(u32 btf_id)
-{
-       return btf_id_set_index(&bpf_lsm_hooks, btf_id);
-}
-
 int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
                        const struct bpf_prog *prog)
 {
index 7c1fe42..8d3c7ab 100644 (file)
@@ -6843,16 +6843,6 @@ static int btf_id_cmp_func(const void *a, const void *b)
        return *pa - *pb;
 }
 
-int btf_id_set_index(const struct btf_id_set *set, u32 id)
-{
-       const u32 *p;
-
-       p = bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func);
-       if (!p)
-               return -1;
-       return p - set->ids;
-}
-
 bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
 {
        return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
index 9cf41dd..169cbd0 100644 (file)
@@ -127,12 +127,57 @@ unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
 }
 
 #ifdef CONFIG_BPF_LSM
+struct cgroup_lsm_atype {
+       u32 attach_btf_id;
+       int refcnt;
+};
+
+static struct cgroup_lsm_atype cgroup_lsm_atype[CGROUP_LSM_NUM];
+
 static enum cgroup_bpf_attach_type
 bpf_cgroup_atype_find(enum bpf_attach_type attach_type, u32 attach_btf_id)
 {
+       int i;
+
+       lockdep_assert_held(&cgroup_mutex);
+
        if (attach_type != BPF_LSM_CGROUP)
                return to_cgroup_bpf_attach_type(attach_type);
-       return CGROUP_LSM_START + bpf_lsm_hook_idx(attach_btf_id);
+
+       for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
+               if (cgroup_lsm_atype[i].attach_btf_id == attach_btf_id)
+                       return CGROUP_LSM_START + i;
+
+       for (i = 0; i < ARRAY_SIZE(cgroup_lsm_atype); i++)
+               if (cgroup_lsm_atype[i].attach_btf_id == 0)
+                       return CGROUP_LSM_START + i;
+
+       return -E2BIG;
+
+}
+
+void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype)
+{
+       int i = cgroup_atype - CGROUP_LSM_START;
+
+       lockdep_assert_held(&cgroup_mutex);
+
+       WARN_ON_ONCE(cgroup_lsm_atype[i].attach_btf_id &&
+                    cgroup_lsm_atype[i].attach_btf_id != attach_btf_id);
+
+       cgroup_lsm_atype[i].attach_btf_id = attach_btf_id;
+       cgroup_lsm_atype[i].refcnt++;
+}
+
+void bpf_cgroup_atype_put(int cgroup_atype)
+{
+       int i = cgroup_atype - CGROUP_LSM_START;
+
+       mutex_lock(&cgroup_mutex);
+       if (--cgroup_lsm_atype[i].refcnt <= 0)
+               cgroup_lsm_atype[i].attach_btf_id = 0;
+       WARN_ON_ONCE(cgroup_lsm_atype[i].refcnt < 0);
+       mutex_unlock(&cgroup_mutex);
 }
 #else
 static enum cgroup_bpf_attach_type
index 4cc10b9..805c2ad 100644 (file)
@@ -107,6 +107,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
        fp->aux->prog = fp;
        fp->jit_requested = ebpf_jit_enabled();
        fp->blinding_requested = bpf_jit_blinding_enabled(fp);
+#ifdef CONFIG_CGROUP_BPF
+       aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
+#endif
 
        INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
        mutex_init(&fp->aux->used_maps_mutex);
@@ -2570,6 +2573,10 @@ static void bpf_prog_free_deferred(struct work_struct *work)
 #ifdef CONFIG_BPF_SYSCALL
        bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
 #endif
+#ifdef CONFIG_CGROUP_BPF
+       if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
+               bpf_cgroup_atype_put(aux->cgroup_atype);
+#endif
        bpf_free_used_maps(aux);
        bpf_free_used_btfs(aux);
        if (bpf_prog_is_dev_bound(aux))
index d7c251d..6cd2265 100644 (file)
@@ -555,6 +555,7 @@ static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog
        bpf_prog_inc(p);
        bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
                      &bpf_shim_tramp_link_lops, p);
+       bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
 
        return shim_link;
 }