bpf: Centralize permissions checks for all BPF map types
authorAndrii Nakryiko <andrii@kernel.org>
Tue, 13 Jun 2023 22:35:32 +0000 (15:35 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 19 Jun 2023 12:04:04 +0000 (14:04 +0200)
This allows to do more centralized decisions later on, and generally
makes it very explicit which maps are privileged and which are not
(e.g., LRU_HASH and LRU_PERCPU_HASH, which are privileged HASH variants,
as opposed to unprivileged HASH and HASH_PERCPU; now this is explicit
and easy to verify).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/20230613223533.3689589-4-andrii@kernel.org
14 files changed:
kernel/bpf/bloom_filter.c
kernel/bpf/bpf_local_storage.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/lpm_trie.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/reuseport_array.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
net/core/sock_map.c
net/xdp/xskmap.c
tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c

index 540331b..addf3dd 100644 (file)
@@ -86,9 +86,6 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
        int numa_node = bpf_map_attr_numa_node(attr);
        struct bpf_bloom_filter *bloom;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        if (attr->key_size != 0 || attr->value_size == 0 ||
            attr->max_entries == 0 ||
            attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
index 47d9948..b5149cf 100644 (file)
@@ -723,9 +723,6 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
            !attr->btf_key_type_id || !attr->btf_value_type_id)
                return -EINVAL;
 
-       if (!bpf_capable())
-               return -EPERM;
-
        if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
                return -E2BIG;
 
index d3f0a48..116a0ce 100644 (file)
@@ -655,9 +655,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
        const struct btf_type *t, *vt;
        struct bpf_map *map;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
        if (!st_ops)
                return ERR_PTR(-ENOTSUPP);
index 8ec18fa..8a33e87 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
-#include <linux/capability.h>
 #include <trace/events/xdp.h>
 #include <linux/btf_ids.h>
 
@@ -89,9 +88,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
        u32 value_size = attr->value_size;
        struct bpf_cpu_map *cmap;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
index 802692f..49cc0b5 100644 (file)
@@ -160,9 +160,6 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        struct bpf_dtab *dtab;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
-
        dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
        if (!dtab)
                return ERR_PTR(-ENOMEM);
index 9901efe..56d3da7 100644 (file)
@@ -422,12 +422,6 @@ static int htab_map_alloc_check(union bpf_attr *attr)
        BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
                     offsetof(struct htab_elem, hash_node.pprev));
 
-       if (lru && !bpf_capable())
-               /* LRU implementation is much complicated than other
-                * maps.  Hence, limit to CAP_BPF.
-                */
-               return -EPERM;
-
        if (zero_seed && !capable(CAP_SYS_ADMIN))
                /* Guard against local DoS, and discourage production use. */
                return -EPERM;
index e0d3ddf..17c7e77 100644 (file)
@@ -544,9 +544,6 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
 {
        struct lpm_trie *trie;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        /* check sanity of attributes */
        if (attr->max_entries == 0 ||
            !(attr->map_flags & BPF_F_NO_PREALLOC) ||
index 6016091..8d2ddcb 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/bpf.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/capability.h>
 #include <linux/btf_ids.h>
 #include "percpu_freelist.h"
 
@@ -46,9 +45,6 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
 /* Called from syscall */
 static int queue_stack_map_alloc_check(union bpf_attr *attr)
 {
-       if (!bpf_capable())
-               return -EPERM;
-
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 0 ||
            attr->value_size == 0 ||
index cbf2d8d..4b4f967 100644 (file)
@@ -151,9 +151,6 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
        int numa_node = bpf_map_attr_numa_node(attr);
        struct reuseport_array *array;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        /* allocate all map elements and zero-initialize them */
        array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node);
        if (!array)
index b25fce4..458bb80 100644 (file)
@@ -74,9 +74,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        u64 cost, n_buckets;
        int err;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
                return ERR_PTR(-EINVAL);
 
index 6ef3027..658d115 100644 (file)
@@ -1156,6 +1156,53 @@ static int map_create(union bpf_attr *attr)
        if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
                return -EPERM;
 
+       /* check privileged map type permissions */
+       switch (map_type) {
+       case BPF_MAP_TYPE_ARRAY:
+       case BPF_MAP_TYPE_PERCPU_ARRAY:
+       case BPF_MAP_TYPE_PROG_ARRAY:
+       case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+       case BPF_MAP_TYPE_CGROUP_ARRAY:
+       case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+       case BPF_MAP_TYPE_HASH:
+       case BPF_MAP_TYPE_PERCPU_HASH:
+       case BPF_MAP_TYPE_HASH_OF_MAPS:
+       case BPF_MAP_TYPE_RINGBUF:
+       case BPF_MAP_TYPE_USER_RINGBUF:
+       case BPF_MAP_TYPE_CGROUP_STORAGE:
+       case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+               /* unprivileged */
+               break;
+       case BPF_MAP_TYPE_SK_STORAGE:
+       case BPF_MAP_TYPE_INODE_STORAGE:
+       case BPF_MAP_TYPE_TASK_STORAGE:
+       case BPF_MAP_TYPE_CGRP_STORAGE:
+       case BPF_MAP_TYPE_BLOOM_FILTER:
+       case BPF_MAP_TYPE_LPM_TRIE:
+       case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+       case BPF_MAP_TYPE_STACK_TRACE:
+       case BPF_MAP_TYPE_QUEUE:
+       case BPF_MAP_TYPE_STACK:
+       case BPF_MAP_TYPE_LRU_HASH:
+       case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+       case BPF_MAP_TYPE_STRUCT_OPS:
+       case BPF_MAP_TYPE_CPUMAP:
+               if (!bpf_capable())
+                       return -EPERM;
+               break;
+       case BPF_MAP_TYPE_SOCKMAP:
+       case BPF_MAP_TYPE_SOCKHASH:
+       case BPF_MAP_TYPE_DEVMAP:
+       case BPF_MAP_TYPE_DEVMAP_HASH:
+       case BPF_MAP_TYPE_XSKMAP:
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+               break;
+       default:
+               WARN(1, "unsupported map type %d", map_type);
+               return -EPERM;
+       }
+
        map = ops->map_alloc(attr);
        if (IS_ERR(map))
                return PTR_ERR(map);
index 00afb66..19538d6 100644 (file)
@@ -32,8 +32,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 {
        struct bpf_stab *stab;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
        if (attr->max_entries == 0 ||
            attr->key_size    != 4 ||
            (attr->value_size != sizeof(u32) &&
@@ -1085,8 +1083,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
        struct bpf_shtab *htab;
        int i, err;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
        if (attr->max_entries == 0 ||
            attr->key_size    == 0 ||
            (attr->value_size != sizeof(u32) &&
index 2c14270..e1c526f 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
-#include <linux/capability.h>
 #include <net/xdp_sock.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
@@ -68,9 +67,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
        int numa_node;
        u64 size;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
-
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            attr->value_size != 4 ||
            attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
index 8383a99..0adf8d9 100644 (file)
@@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
                                prog_insns, prog_insn_cnt, &load_opts),
                  -EPERM, "prog_load_fails");
 
-       for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++)
+       /* some map types require particular correct parameters which could be
+        * sanity-checked before enforcing -EPERM, so only validate that
+        * the simple ARRAY and HASH maps are failing with -EPERM
+        */
+       for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
                ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
                          -EPERM, "map_create_fails");