bpf: Centralize permissions checks for all BPF map types
authorAndrii Nakryiko <andrii@kernel.org>
Tue, 13 Jun 2023 22:35:32 +0000 (15:35 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 19 Jun 2023 12:04:04 +0000 (14:04 +0200)
This allows to do more centralized decisions later on, and generally
makes it very explicit which maps are privileged and which are not
(e.g., LRU_HASH and LRU_PERCPU_HASH, which are privileged HASH variants,
as opposed to unprivileged HASH and HASH_PERCPU; now this is explicit
and easy to verify).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Stanislav Fomichev <sdf@google.com>
Link: https://lore.kernel.org/bpf/20230613223533.3689589-4-andrii@kernel.org
14 files changed:
kernel/bpf/bloom_filter.c
kernel/bpf/bpf_local_storage.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/lpm_trie.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/reuseport_array.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
net/core/sock_map.c
net/xdp/xskmap.c
tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c

index 540331b610a97fc73409cd127ef39cfc627a7351..addf3dd57b59b574653a8ad3a374d6ef0f27b51b 100644 (file)
@@ -86,9 +86,6 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
        int numa_node = bpf_map_attr_numa_node(attr);
        struct bpf_bloom_filter *bloom;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        if (attr->key_size != 0 || attr->value_size == 0 ||
            attr->max_entries == 0 ||
            attr->map_flags & ~BLOOM_CREATE_FLAG_MASK ||
index 47d9948d768f0747dffce4751df34ee467017f83..b5149cfce7d4defaac057a095cb2b055f888384b 100644 (file)
@@ -723,9 +723,6 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
            !attr->btf_key_type_id || !attr->btf_value_type_id)
                return -EINVAL;
 
-       if (!bpf_capable())
-               return -EPERM;
-
        if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
                return -E2BIG;
 
index d3f0a4825fa6191395541598d6f6ff8e0741ac56..116a0ce378ecddae73892cb1fb8d616a0a5f4497 100644 (file)
@@ -655,9 +655,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
        const struct btf_type *t, *vt;
        struct bpf_map *map;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
        if (!st_ops)
                return ERR_PTR(-ENOTSUPP);
index 8ec18faa74ac3ef0f09a2daf21db9e78dbbed97b..8a33e8747a0e2c2841fa792de1edd05651e04345 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/kthread.h>
-#include <linux/capability.h>
 #include <trace/events/xdp.h>
 #include <linux/btf_ids.h>
 
@@ -89,9 +88,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
        u32 value_size = attr->value_size;
        struct bpf_cpu_map *cmap;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
index 802692fa3905cc97ef1fa83266d082d96d7a170d..49cc0b5671c619132c8f3bf974bf68861638e2ba 100644 (file)
@@ -160,9 +160,6 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
        struct bpf_dtab *dtab;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
-
        dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
        if (!dtab)
                return ERR_PTR(-ENOMEM);
index 9901efee4339d8592f34c820a5a26a6c85e34de1..56d3da7d0bc66b6ffa9a6e89321d8887c36b2a8b 100644 (file)
@@ -422,12 +422,6 @@ static int htab_map_alloc_check(union bpf_attr *attr)
        BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
                     offsetof(struct htab_elem, hash_node.pprev));
 
-       if (lru && !bpf_capable())
-               /* LRU implementation is much complicated than other
-                * maps.  Hence, limit to CAP_BPF.
-                */
-               return -EPERM;
-
        if (zero_seed && !capable(CAP_SYS_ADMIN))
                /* Guard against local DoS, and discourage production use. */
                return -EPERM;
index e0d3ddf2037aba6e3b4ee60136acfec2bad48d16..17c7e7782a1f7fa2c7730594c3546e7e2febb520 100644 (file)
@@ -544,9 +544,6 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
 {
        struct lpm_trie *trie;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        /* check sanity of attributes */
        if (attr->max_entries == 0 ||
            !(attr->map_flags & BPF_F_NO_PREALLOC) ||
index 601609164ef341331940ae98d332ae103d9ee964..8d2ddcb7566b7fe262e7571700f1b1ac48dcc782 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/bpf.h>
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/capability.h>
 #include <linux/btf_ids.h>
 #include "percpu_freelist.h"
 
@@ -46,9 +45,6 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
 /* Called from syscall */
 static int queue_stack_map_alloc_check(union bpf_attr *attr)
 {
-       if (!bpf_capable())
-               return -EPERM;
-
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 0 ||
            attr->value_size == 0 ||
index cbf2d8d784b8943094c1f511cfe9a811f6ac50d3..4b4f9670f1a9a6b320c23f390f3e7f2db2cd7c0c 100644 (file)
@@ -151,9 +151,6 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
        int numa_node = bpf_map_attr_numa_node(attr);
        struct reuseport_array *array;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        /* allocate all map elements and zero-initialize them */
        array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node);
        if (!array)
index b25fce425b2c63f781985a65902770122e6d4468..458bb80b14d5744a829d5ace8ca0f8a7426851c1 100644 (file)
@@ -74,9 +74,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        u64 cost, n_buckets;
        int err;
 
-       if (!bpf_capable())
-               return ERR_PTR(-EPERM);
-
        if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
                return ERR_PTR(-EINVAL);
 
index 6ef302709ab03f0618b6ac25f49e26b0f68d11ff..658d1154f221692f0c53148fd0ff96031d97024e 100644 (file)
@@ -1156,6 +1156,53 @@ static int map_create(union bpf_attr *attr)
        if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
                return -EPERM;
 
+       /* check privileged map type permissions */
+       switch (map_type) {
+       case BPF_MAP_TYPE_ARRAY:
+       case BPF_MAP_TYPE_PERCPU_ARRAY:
+       case BPF_MAP_TYPE_PROG_ARRAY:
+       case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+       case BPF_MAP_TYPE_CGROUP_ARRAY:
+       case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+       case BPF_MAP_TYPE_HASH:
+       case BPF_MAP_TYPE_PERCPU_HASH:
+       case BPF_MAP_TYPE_HASH_OF_MAPS:
+       case BPF_MAP_TYPE_RINGBUF:
+       case BPF_MAP_TYPE_USER_RINGBUF:
+       case BPF_MAP_TYPE_CGROUP_STORAGE:
+       case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
+               /* unprivileged */
+               break;
+       case BPF_MAP_TYPE_SK_STORAGE:
+       case BPF_MAP_TYPE_INODE_STORAGE:
+       case BPF_MAP_TYPE_TASK_STORAGE:
+       case BPF_MAP_TYPE_CGRP_STORAGE:
+       case BPF_MAP_TYPE_BLOOM_FILTER:
+       case BPF_MAP_TYPE_LPM_TRIE:
+       case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
+       case BPF_MAP_TYPE_STACK_TRACE:
+       case BPF_MAP_TYPE_QUEUE:
+       case BPF_MAP_TYPE_STACK:
+       case BPF_MAP_TYPE_LRU_HASH:
+       case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+       case BPF_MAP_TYPE_STRUCT_OPS:
+       case BPF_MAP_TYPE_CPUMAP:
+               if (!bpf_capable())
+                       return -EPERM;
+               break;
+       case BPF_MAP_TYPE_SOCKMAP:
+       case BPF_MAP_TYPE_SOCKHASH:
+       case BPF_MAP_TYPE_DEVMAP:
+       case BPF_MAP_TYPE_DEVMAP_HASH:
+       case BPF_MAP_TYPE_XSKMAP:
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+               break;
+       default:
+               WARN(1, "unsupported map type %d", map_type);
+               return -EPERM;
+       }
+
        map = ops->map_alloc(attr);
        if (IS_ERR(map))
                return PTR_ERR(map);
index 00afb66cd095010158c2d49a7bea5a8d88539973..19538d6287144afb7dc840e31932b49339651eae 100644 (file)
@@ -32,8 +32,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
 {
        struct bpf_stab *stab;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
        if (attr->max_entries == 0 ||
            attr->key_size    != 4 ||
            (attr->value_size != sizeof(u32) &&
@@ -1085,8 +1083,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
        struct bpf_shtab *htab;
        int i, err;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
        if (attr->max_entries == 0 ||
            attr->key_size    == 0 ||
            (attr->value_size != sizeof(u32) &&
index 2c1427074a3bb9ec6aade6a43cd92c21a25acc26..e1c526f97ce31ffcbca743b70d7bc5c87dd45207 100644 (file)
@@ -5,7 +5,6 @@
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
-#include <linux/capability.h>
 #include <net/xdp_sock.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
@@ -68,9 +67,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
        int numa_node;
        u64 size;
 
-       if (!capable(CAP_NET_ADMIN))
-               return ERR_PTR(-EPERM);
-
        if (attr->max_entries == 0 || attr->key_size != 4 ||
            attr->value_size != 4 ||
            attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
index 8383a99f610fd0ac7ee4f207b59d2bd012e28b83..0adf8d9475cb2ab82fb153e8761e2abf0eb5979e 100644 (file)
@@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s
                                prog_insns, prog_insn_cnt, &load_opts),
                  -EPERM, "prog_load_fails");
 
-       for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++)
+       /* some map types require particular correct parameters which could be
+        * sanity-checked before enforcing -EPERM, so only validate that
+        * the simple ARRAY and HASH maps are failing with -EPERM
+        */
+       for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
                ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
                          -EPERM, "map_create_fails");