1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20 #include <linux/kernel.h>
22 DEFINE_PER_CPU(int, bpf_prog_active);
24 int sysctl_unprivileged_bpf_disabled __read_mostly;
26 static LIST_HEAD(bpf_map_types);
28 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
30 struct bpf_map_type_list *tl;
33 list_for_each_entry(tl, &bpf_map_types, list_node) {
34 if (tl->type == attr->map_type) {
35 map = tl->ops->map_alloc(attr);
39 map->map_type = attr->map_type;
43 return ERR_PTR(-EINVAL);
46 /* boot time registration of different map implementations */
47 void bpf_register_map_type(struct bpf_map_type_list *tl)
49 list_add(&tl->list_node, &bpf_map_types);
52 int bpf_map_precharge_memlock(u32 pages)
54 struct user_struct *user = get_current_user();
55 unsigned long memlock_limit, cur;
57 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
58 cur = atomic_long_read(&user->locked_vm);
60 if (cur + pages > memlock_limit)
65 static int bpf_map_charge_memlock(struct bpf_map *map)
67 struct user_struct *user = get_current_user();
68 unsigned long memlock_limit;
70 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
72 atomic_long_add(map->pages, &user->locked_vm);
74 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
75 atomic_long_sub(map->pages, &user->locked_vm);
83 static void bpf_map_uncharge_memlock(struct bpf_map *map)
85 struct user_struct *user = map->user;
87 atomic_long_sub(map->pages, &user->locked_vm);
91 /* called from workqueue */
92 static void bpf_map_free_deferred(struct work_struct *work)
94 struct bpf_map *map = container_of(work, struct bpf_map, work);
96 bpf_map_uncharge_memlock(map);
97 /* implementation dependent freeing */
98 map->ops->map_free(map);
101 static void bpf_map_put_uref(struct bpf_map *map)
103 if (atomic_dec_and_test(&map->usercnt)) {
104 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
105 bpf_fd_array_map_clear(map);
109 /* decrement map refcnt and schedule it for freeing via workqueue
110 * (unrelying map implementation ops->map_free() might sleep)
112 void bpf_map_put(struct bpf_map *map)
114 if (atomic_dec_and_test(&map->refcnt)) {
115 INIT_WORK(&map->work, bpf_map_free_deferred);
116 schedule_work(&map->work);
120 void bpf_map_put_with_uref(struct bpf_map *map)
122 bpf_map_put_uref(map);
126 static int bpf_map_release(struct inode *inode, struct file *filp)
128 struct bpf_map *map = filp->private_data;
130 if (map->ops->map_release)
131 map->ops->map_release(map, filp);
133 bpf_map_put_with_uref(map);
137 #ifdef CONFIG_PROC_FS
138 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
140 const struct bpf_map *map = filp->private_data;
141 const struct bpf_array *array;
142 u32 owner_prog_type = 0;
144 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
145 array = container_of(map, struct bpf_array, map);
146 owner_prog_type = array->owner_prog_type;
161 map->pages * 1ULL << PAGE_SHIFT);
164 seq_printf(m, "owner_prog_type:\t%u\n",
169 static const struct file_operations bpf_map_fops = {
170 #ifdef CONFIG_PROC_FS
171 .show_fdinfo = bpf_map_show_fdinfo,
173 .release = bpf_map_release,
176 int bpf_map_new_fd(struct bpf_map *map)
178 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
182 /* helper macro to check that unused fields 'union bpf_attr' are zero */
183 #define CHECK_ATTR(CMD) \
184 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
185 sizeof(attr->CMD##_LAST_FIELD), 0, \
187 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
188 sizeof(attr->CMD##_LAST_FIELD)) != NULL
190 #define BPF_MAP_CREATE_LAST_FIELD map_flags
191 /* called via syscall */
192 static int map_create(union bpf_attr *attr)
197 err = CHECK_ATTR(BPF_MAP_CREATE);
201 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
202 map = find_and_alloc_map(attr);
206 atomic_set(&map->refcnt, 1);
207 atomic_set(&map->usercnt, 1);
209 err = bpf_map_charge_memlock(map);
211 goto free_map_nouncharge;
213 err = bpf_map_new_fd(map);
215 /* failed to allocate fd */
221 bpf_map_uncharge_memlock(map);
223 map->ops->map_free(map);
227 /* if error is returned, fd is released.
228 * On success caller should complete fd access with matching fdput()
230 struct bpf_map *__bpf_map_get(struct fd f)
233 return ERR_PTR(-EBADF);
234 if (f.file->f_op != &bpf_map_fops) {
236 return ERR_PTR(-EINVAL);
239 return f.file->private_data;
242 /* prog's and map's refcnt limit */
243 #define BPF_MAX_REFCNT 32768
245 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
247 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
248 atomic_dec(&map->refcnt);
249 return ERR_PTR(-EBUSY);
252 atomic_inc(&map->usercnt);
256 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
258 struct fd f = fdget(ufd);
261 map = __bpf_map_get(f);
265 map = bpf_map_inc(map, true);
271 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
276 /* last field in 'union bpf_attr' used by this command */
277 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
279 static int map_lookup_elem(union bpf_attr *attr)
281 void __user *ukey = u64_to_user_ptr(attr->key);
282 void __user *uvalue = u64_to_user_ptr(attr->value);
283 int ufd = attr->map_fd;
285 void *key, *value, *ptr;
290 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
294 map = __bpf_map_get(f);
299 key = kmalloc(map->key_size, GFP_USER);
304 if (copy_from_user(key, ukey, map->key_size) != 0)
307 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
308 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
309 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
310 value_size = round_up(map->value_size, 8) * num_possible_cpus();
312 value_size = map->value_size;
315 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
319 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
320 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
321 err = bpf_percpu_hash_copy(map, key, value);
322 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
323 err = bpf_percpu_array_copy(map, key, value);
324 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
325 err = bpf_stackmap_copy(map, key, value);
328 ptr = map->ops->map_lookup_elem(map, key);
330 memcpy(value, ptr, value_size);
332 err = ptr ? 0 : -ENOENT;
339 if (copy_to_user(uvalue, value, value_size) != 0)
353 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
355 static int map_update_elem(union bpf_attr *attr)
357 void __user *ukey = u64_to_user_ptr(attr->key);
358 void __user *uvalue = u64_to_user_ptr(attr->value);
359 int ufd = attr->map_fd;
366 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
370 map = __bpf_map_get(f);
375 key = kmalloc(map->key_size, GFP_USER);
380 if (copy_from_user(key, ukey, map->key_size) != 0)
383 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
384 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
385 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
386 value_size = round_up(map->value_size, 8) * num_possible_cpus();
388 value_size = map->value_size;
391 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
396 if (copy_from_user(value, uvalue, value_size) != 0)
399 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
400 * inside bpf map update or delete otherwise deadlocks are possible
403 __this_cpu_inc(bpf_prog_active);
404 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
405 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
406 err = bpf_percpu_hash_update(map, key, value, attr->flags);
407 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
408 err = bpf_percpu_array_update(map, key, value, attr->flags);
409 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
410 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
411 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
413 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
418 err = map->ops->map_update_elem(map, key, value, attr->flags);
421 __this_cpu_dec(bpf_prog_active);
433 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
435 static int map_delete_elem(union bpf_attr *attr)
437 void __user *ukey = u64_to_user_ptr(attr->key);
438 int ufd = attr->map_fd;
444 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
448 map = __bpf_map_get(f);
453 key = kmalloc(map->key_size, GFP_USER);
458 if (copy_from_user(key, ukey, map->key_size) != 0)
462 __this_cpu_inc(bpf_prog_active);
464 err = map->ops->map_delete_elem(map, key);
466 __this_cpu_dec(bpf_prog_active);
476 /* last field in 'union bpf_attr' used by this command */
477 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
479 static int map_get_next_key(union bpf_attr *attr)
481 void __user *ukey = u64_to_user_ptr(attr->key);
482 void __user *unext_key = u64_to_user_ptr(attr->next_key);
483 int ufd = attr->map_fd;
485 void *key, *next_key;
489 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
493 map = __bpf_map_get(f);
498 key = kmalloc(map->key_size, GFP_USER);
503 if (copy_from_user(key, ukey, map->key_size) != 0)
507 next_key = kmalloc(map->key_size, GFP_USER);
512 err = map->ops->map_get_next_key(map, key, next_key);
518 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
532 static LIST_HEAD(bpf_prog_types);
534 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
536 struct bpf_prog_type_list *tl;
538 list_for_each_entry(tl, &bpf_prog_types, list_node) {
539 if (tl->type == type) {
540 prog->aux->ops = tl->ops;
549 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
551 list_add(&tl->list_node, &bpf_prog_types);
554 /* fixup insn->imm field of bpf_call instructions:
555 * if (insn->imm == BPF_FUNC_map_lookup_elem)
556 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
557 * else if (insn->imm == BPF_FUNC_map_update_elem)
558 * insn->imm = bpf_map_update_elem - __bpf_call_base;
561 * this function is called after eBPF program passed verification
563 static void fixup_bpf_calls(struct bpf_prog *prog)
565 const struct bpf_func_proto *fn;
568 for (i = 0; i < prog->len; i++) {
569 struct bpf_insn *insn = &prog->insnsi[i];
571 if (insn->code == (BPF_JMP | BPF_CALL)) {
572 /* we reach here when program has bpf_call instructions
573 * and it passed bpf_check(), means that
574 * ops->get_func_proto must have been supplied, check it
576 BUG_ON(!prog->aux->ops->get_func_proto);
578 if (insn->imm == BPF_FUNC_get_route_realm)
579 prog->dst_needed = 1;
580 if (insn->imm == BPF_FUNC_get_prandom_u32)
581 bpf_user_rnd_init_once();
582 if (insn->imm == BPF_FUNC_xdp_adjust_head)
583 prog->xdp_adjust_head = 1;
584 if (insn->imm == BPF_FUNC_tail_call) {
585 /* mark bpf_tail_call as different opcode
586 * to avoid conditional branch in
587 * interpeter for every normal call
588 * and to prevent accidental JITing by
589 * JIT compiler that doesn't support
597 fn = prog->aux->ops->get_func_proto(insn->imm);
598 /* all functions that have prototype and verifier allowed
599 * programs to call them, must be real in-kernel functions
602 insn->imm = fn->func - __bpf_call_base;
607 /* drop refcnt on maps used by eBPF program and free auxilary data */
608 static void free_used_maps(struct bpf_prog_aux *aux)
612 for (i = 0; i < aux->used_map_cnt; i++)
613 bpf_map_put(aux->used_maps[i]);
615 kfree(aux->used_maps);
618 int __bpf_prog_charge(struct user_struct *user, u32 pages)
620 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
621 unsigned long user_bufs;
624 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
625 if (user_bufs > memlock_limit) {
626 atomic_long_sub(pages, &user->locked_vm);
634 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
637 atomic_long_sub(pages, &user->locked_vm);
640 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
642 struct user_struct *user = get_current_user();
645 ret = __bpf_prog_charge(user, prog->pages);
651 prog->aux->user = user;
655 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
657 struct user_struct *user = prog->aux->user;
659 __bpf_prog_uncharge(user, prog->pages);
663 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
665 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
668 bpf_prog_uncharge_memlock(aux->prog);
669 bpf_prog_free(aux->prog);
672 void bpf_prog_put(struct bpf_prog *prog)
674 if (atomic_dec_and_test(&prog->aux->refcnt))
675 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
677 EXPORT_SYMBOL_GPL(bpf_prog_put);
679 static int bpf_prog_release(struct inode *inode, struct file *filp)
681 struct bpf_prog *prog = filp->private_data;
687 #ifdef CONFIG_PROC_FS
688 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
690 const struct bpf_prog *prog = filp->private_data;
691 char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
693 bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
702 prog->pages * 1ULL << PAGE_SHIFT);
706 static const struct file_operations bpf_prog_fops = {
707 #ifdef CONFIG_PROC_FS
708 .show_fdinfo = bpf_prog_show_fdinfo,
710 .release = bpf_prog_release,
713 int bpf_prog_new_fd(struct bpf_prog *prog)
715 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
719 static struct bpf_prog *____bpf_prog_get(struct fd f)
722 return ERR_PTR(-EBADF);
723 if (f.file->f_op != &bpf_prog_fops) {
725 return ERR_PTR(-EINVAL);
728 return f.file->private_data;
731 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
733 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
734 atomic_sub(i, &prog->aux->refcnt);
735 return ERR_PTR(-EBUSY);
739 EXPORT_SYMBOL_GPL(bpf_prog_add);
741 void bpf_prog_sub(struct bpf_prog *prog, int i)
743 /* Only to be used for undoing previous bpf_prog_add() in some
744 * error path. We still know that another entity in our call
745 * path holds a reference to the program, thus atomic_sub() can
746 * be safely used in such cases!
748 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
750 EXPORT_SYMBOL_GPL(bpf_prog_sub);
752 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
754 return bpf_prog_add(prog, 1);
756 EXPORT_SYMBOL_GPL(bpf_prog_inc);
758 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
760 struct fd f = fdget(ufd);
761 struct bpf_prog *prog;
763 prog = ____bpf_prog_get(f);
766 if (type && prog->type != *type) {
767 prog = ERR_PTR(-EINVAL);
771 prog = bpf_prog_inc(prog);
777 struct bpf_prog *bpf_prog_get(u32 ufd)
779 return __bpf_prog_get(ufd, NULL);
782 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
784 return __bpf_prog_get(ufd, &type);
786 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
788 /* last field in 'union bpf_attr' used by this command */
789 #define BPF_PROG_LOAD_LAST_FIELD kern_version
791 static int bpf_prog_load(union bpf_attr *attr)
793 enum bpf_prog_type type = attr->prog_type;
794 struct bpf_prog *prog;
799 if (CHECK_ATTR(BPF_PROG_LOAD))
802 /* copy eBPF program license from user space */
803 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
804 sizeof(license) - 1) < 0)
806 license[sizeof(license) - 1] = 0;
808 /* eBPF programs must be GPL compatible to use GPL-ed functions */
809 is_gpl = license_is_gpl_compatible(license);
811 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
814 if (type == BPF_PROG_TYPE_KPROBE &&
815 attr->kern_version != LINUX_VERSION_CODE)
818 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
821 /* plain bpf_prog allocation */
822 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
826 err = bpf_prog_charge_memlock(prog);
828 goto free_prog_nouncharge;
830 prog->len = attr->insn_cnt;
833 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
834 bpf_prog_insn_size(prog)) != 0)
837 prog->orig_prog = NULL;
840 atomic_set(&prog->aux->refcnt, 1);
841 prog->gpl_compatible = is_gpl ? 1 : 0;
843 /* find program type: socket_filter vs tracing_filter */
844 err = find_prog_type(type, prog);
848 /* run eBPF verifier */
849 err = bpf_check(&prog, attr);
853 /* fixup BPF_CALL->imm field */
854 fixup_bpf_calls(prog);
856 /* eBPF program is ready to be JITed */
857 prog = bpf_prog_select_runtime(prog, &err);
861 err = bpf_prog_new_fd(prog);
863 /* failed to allocate fd */
869 free_used_maps(prog->aux);
871 bpf_prog_uncharge_memlock(prog);
872 free_prog_nouncharge:
877 #define BPF_OBJ_LAST_FIELD bpf_fd
879 static int bpf_obj_pin(const union bpf_attr *attr)
881 if (CHECK_ATTR(BPF_OBJ))
884 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
887 static int bpf_obj_get(const union bpf_attr *attr)
889 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
892 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
895 #ifdef CONFIG_CGROUP_BPF
897 #define BPF_PROG_ATTACH_LAST_FIELD attach_type
899 static int bpf_prog_attach(const union bpf_attr *attr)
901 struct bpf_prog *prog;
903 enum bpf_prog_type ptype;
905 if (!capable(CAP_NET_ADMIN))
908 if (CHECK_ATTR(BPF_PROG_ATTACH))
911 switch (attr->attach_type) {
912 case BPF_CGROUP_INET_INGRESS:
913 case BPF_CGROUP_INET_EGRESS:
914 ptype = BPF_PROG_TYPE_CGROUP_SKB;
916 case BPF_CGROUP_INET_SOCK_CREATE:
917 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
923 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
925 return PTR_ERR(prog);
927 cgrp = cgroup_get_from_fd(attr->target_fd);
930 return PTR_ERR(cgrp);
933 cgroup_bpf_update(cgrp, prog, attr->attach_type);
939 #define BPF_PROG_DETACH_LAST_FIELD attach_type
941 static int bpf_prog_detach(const union bpf_attr *attr)
945 if (!capable(CAP_NET_ADMIN))
948 if (CHECK_ATTR(BPF_PROG_DETACH))
951 switch (attr->attach_type) {
952 case BPF_CGROUP_INET_INGRESS:
953 case BPF_CGROUP_INET_EGRESS:
954 case BPF_CGROUP_INET_SOCK_CREATE:
955 cgrp = cgroup_get_from_fd(attr->target_fd);
957 return PTR_ERR(cgrp);
959 cgroup_bpf_update(cgrp, NULL, attr->attach_type);
969 #endif /* CONFIG_CGROUP_BPF */
971 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
973 union bpf_attr attr = {};
976 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
979 if (!access_ok(VERIFY_READ, uattr, 1))
982 if (size > PAGE_SIZE) /* silly large */
985 /* If we're handed a bigger struct than we know of,
986 * ensure all the unknown bits are 0 - i.e. new
987 * user-space does not rely on any kernel feature
988 * extensions we dont know about yet.
990 if (size > sizeof(attr)) {
991 unsigned char __user *addr;
992 unsigned char __user *end;
995 addr = (void __user *)uattr + sizeof(attr);
996 end = (void __user *)uattr + size;
998 for (; addr < end; addr++) {
999 err = get_user(val, addr);
1005 size = sizeof(attr);
1008 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1009 if (copy_from_user(&attr, uattr, size) != 0)
1013 case BPF_MAP_CREATE:
1014 err = map_create(&attr);
1016 case BPF_MAP_LOOKUP_ELEM:
1017 err = map_lookup_elem(&attr);
1019 case BPF_MAP_UPDATE_ELEM:
1020 err = map_update_elem(&attr);
1022 case BPF_MAP_DELETE_ELEM:
1023 err = map_delete_elem(&attr);
1025 case BPF_MAP_GET_NEXT_KEY:
1026 err = map_get_next_key(&attr);
1029 err = bpf_prog_load(&attr);
1032 err = bpf_obj_pin(&attr);
1035 err = bpf_obj_get(&attr);
1038 #ifdef CONFIG_CGROUP_BPF
1039 case BPF_PROG_ATTACH:
1040 err = bpf_prog_attach(&attr);
1042 case BPF_PROG_DETACH:
1043 err = bpf_prog_detach(&attr);