1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
5 #include <linux/bpf_verifier.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
15 enum bpf_struct_ops_state {
16 BPF_STRUCT_OPS_STATE_INIT,
17 BPF_STRUCT_OPS_STATE_INUSE,
18 BPF_STRUCT_OPS_STATE_TOBEFREE,
21 #define BPF_STRUCT_OPS_COMMON_VALUE \
23 enum bpf_struct_ops_state state
25 struct bpf_struct_ops_value {
26 BPF_STRUCT_OPS_COMMON_VALUE;
27 char data[] ____cacheline_aligned_in_smp;
30 struct bpf_struct_ops_map {
33 const struct bpf_struct_ops *st_ops;
34 /* protect map_update */
36 /* link has all the bpf_links that is populated
37 * to the func ptr of the kernel's struct
40 struct bpf_link **links;
41 /* image is a page that has all the trampolines
42 * that stores the func args before calling the bpf_prog.
43 * A PAGE_SIZE "image" is enough to store all trampoline for
47 /* uvalue->data stores the kernel struct
48 * (e.g. tcp_congestion_ops) that is more useful
49 * to userspace than the kvalue. For example,
50 * the bpf_prog's id is stored instead of the kernel
51 * address of a func ptr.
53 struct bpf_struct_ops_value *uvalue;
54 /* kvalue.data stores the actual kernel's struct
55 * (e.g. tcp_congestion_ops) that will be
56 * registered to the kernel subsystem.
58 struct bpf_struct_ops_value kvalue;
61 #define VALUE_PREFIX "bpf_struct_ops_"
62 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
64 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
65 * the map's value exposed to the userspace and its btf-type-id is
66 * stored at the map->btf_vmlinux_value_type_id.
69 #define BPF_STRUCT_OPS_TYPE(_name) \
70 extern struct bpf_struct_ops bpf_##_name; \
72 struct bpf_struct_ops_##_name { \
73 BPF_STRUCT_OPS_COMMON_VALUE; \
74 struct _name data ____cacheline_aligned_in_smp; \
76 #include "bpf_struct_ops_types.h"
77 #undef BPF_STRUCT_OPS_TYPE
80 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
81 #include "bpf_struct_ops_types.h"
82 #undef BPF_STRUCT_OPS_TYPE
83 __NR_BPF_STRUCT_OPS_TYPE,
86 static struct bpf_struct_ops * const bpf_struct_ops[] = {
87 #define BPF_STRUCT_OPS_TYPE(_name) \
88 [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
89 #include "bpf_struct_ops_types.h"
90 #undef BPF_STRUCT_OPS_TYPE
93 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
96 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
98 .test_run = bpf_struct_ops_test_run,
102 static const struct btf_type *module_type;
104 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
106 s32 type_id, value_id, module_id;
107 const struct btf_member *member;
108 struct bpf_struct_ops *st_ops;
109 const struct btf_type *t;
110 char value_name[128];
114 /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
115 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
116 #include "bpf_struct_ops_types.h"
117 #undef BPF_STRUCT_OPS_TYPE
119 module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
121 pr_warn("Cannot find struct module in btf_vmlinux\n");
124 module_type = btf_type_by_id(btf, module_id);
126 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
127 st_ops = bpf_struct_ops[i];
129 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
130 sizeof(value_name)) {
131 pr_warn("struct_ops name %s is too long\n",
135 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
137 value_id = btf_find_by_name_kind(btf, value_name,
140 pr_warn("Cannot find struct %s in btf_vmlinux\n",
145 type_id = btf_find_by_name_kind(btf, st_ops->name,
148 pr_warn("Cannot find struct %s in btf_vmlinux\n",
152 t = btf_type_by_id(btf, type_id);
153 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
154 pr_warn("Cannot support #%u members in struct %s\n",
155 btf_type_vlen(t), st_ops->name);
159 for_each_member(j, t, member) {
160 const struct btf_type *func_proto;
162 mname = btf_name_by_offset(btf, member->name_off);
164 pr_warn("anon member in struct %s is not supported\n",
169 if (__btf_member_bitfield_size(t, member)) {
170 pr_warn("bit field member %s in struct %s is not supported\n",
171 mname, st_ops->name);
175 func_proto = btf_type_resolve_func_ptr(btf,
179 btf_distill_func_proto(log, btf,
181 &st_ops->func_models[j])) {
182 pr_warn("Error in parsing func ptr %s in struct %s\n",
183 mname, st_ops->name);
188 if (j == btf_type_vlen(t)) {
189 if (st_ops->init(btf)) {
190 pr_warn("Error in init bpf_struct_ops %s\n",
193 st_ops->type_id = type_id;
195 st_ops->value_id = value_id;
196 st_ops->value_type = btf_type_by_id(btf,
203 extern struct btf *btf_vmlinux;
205 static const struct bpf_struct_ops *
206 bpf_struct_ops_find_value(u32 value_id)
210 if (!value_id || !btf_vmlinux)
213 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
214 if (bpf_struct_ops[i]->value_id == value_id)
215 return bpf_struct_ops[i];
221 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
225 if (!type_id || !btf_vmlinux)
228 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
229 if (bpf_struct_ops[i]->type_id == type_id)
230 return bpf_struct_ops[i];
236 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
239 if (key && *(u32 *)key == 0)
242 *(u32 *)next_key = 0;
246 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
249 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
250 struct bpf_struct_ops_value *uvalue, *kvalue;
251 enum bpf_struct_ops_state state;
253 if (unlikely(*(u32 *)key != 0))
256 kvalue = &st_map->kvalue;
257 /* Pair with smp_store_release() during map_update */
258 state = smp_load_acquire(&kvalue->state);
259 if (state == BPF_STRUCT_OPS_STATE_INIT) {
260 memset(value, 0, map->value_size);
264 /* No lock is needed. state and refcnt do not need
265 * to be updated together under atomic context.
268 memcpy(uvalue, st_map->uvalue, map->value_size);
269 uvalue->state = state;
270 refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
275 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
277 return ERR_PTR(-EINVAL);
280 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
282 const struct btf_type *t = st_map->st_ops->type;
285 for (i = 0; i < btf_type_vlen(t); i++) {
286 if (st_map->links[i]) {
287 bpf_link_put(st_map->links[i]);
288 st_map->links[i] = NULL;
293 static int check_zero_holes(const struct btf_type *t, void *data)
295 const struct btf_member *member;
296 u32 i, moff, msize, prev_mend = 0;
297 const struct btf_type *mtype;
299 for_each_member(i, t, member) {
300 moff = __btf_member_bit_offset(t, member) / 8;
301 if (moff > prev_mend &&
302 memchr_inv(data + prev_mend, 0, moff - prev_mend))
305 mtype = btf_type_by_id(btf_vmlinux, member->type);
306 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
308 return PTR_ERR(mtype);
309 prev_mend = moff + msize;
312 if (t->size > prev_mend &&
313 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
319 static void bpf_struct_ops_link_release(struct bpf_link *link)
323 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
325 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
330 const struct bpf_link_ops bpf_struct_ops_link_lops = {
331 .release = bpf_struct_ops_link_release,
332 .dealloc = bpf_struct_ops_link_dealloc,
335 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
336 struct bpf_tramp_link *link,
337 const struct btf_func_model *model,
338 void *image, void *image_end)
342 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
343 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
344 /* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
345 * and it must be used alone.
347 flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0;
348 return arch_prepare_bpf_trampoline(NULL, image, image_end,
349 model, flags, tlinks, NULL);
352 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
353 void *value, u64 flags)
355 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
356 const struct bpf_struct_ops *st_ops = st_map->st_ops;
357 struct bpf_struct_ops_value *uvalue, *kvalue;
358 const struct btf_member *member;
359 const struct btf_type *t = st_ops->type;
360 struct bpf_tramp_links *tlinks = NULL;
362 int prog_fd, err = 0;
363 void *image, *image_end;
369 if (*(u32 *)key != 0)
372 err = check_zero_holes(st_ops->value_type, value);
377 err = check_zero_holes(t, uvalue->data);
381 if (uvalue->state || refcount_read(&uvalue->refcnt))
384 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
388 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
389 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
391 mutex_lock(&st_map->lock);
393 if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
398 memcpy(uvalue, value, map->value_size);
400 udata = &uvalue->data;
401 kdata = &kvalue->data;
402 image = st_map->image;
403 image_end = st_map->image + PAGE_SIZE;
405 for_each_member(i, t, member) {
406 const struct btf_type *mtype, *ptype;
407 struct bpf_prog *prog;
408 struct bpf_tramp_link *link;
411 moff = __btf_member_bit_offset(t, member) / 8;
412 ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
413 if (ptype == module_type) {
414 if (*(void **)(udata + moff))
416 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
420 err = st_ops->init_member(t, member, kdata, udata);
424 /* The ->init_member() has handled this member */
428 /* If st_ops->init_member does not handle it,
429 * we will only handle func ptrs and zero-ed members
430 * here. Reject everything else.
433 /* All non func ptr member must be 0 */
434 if (!ptype || !btf_type_is_func_proto(ptype)) {
437 mtype = btf_type_by_id(btf_vmlinux, member->type);
438 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
440 err = PTR_ERR(mtype);
444 if (memchr_inv(udata + moff, 0, msize)) {
452 prog_fd = (int)(*(unsigned long *)(udata + moff));
453 /* Similar check as the attr->attach_prog_fd */
457 prog = bpf_prog_get(prog_fd);
463 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
464 prog->aux->attach_btf_id != st_ops->type_id ||
465 prog->expected_attach_type != i) {
471 link = kzalloc(sizeof(*link), GFP_USER);
477 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
478 &bpf_struct_ops_link_lops, prog);
479 st_map->links[i] = &link->link;
481 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
482 &st_ops->func_models[i],
487 *(void **)(kdata + moff) = image;
490 /* put prog_id to udata */
491 *(unsigned long *)(udata + moff) = prog->aux->id;
494 refcount_set(&kvalue->refcnt, 1);
497 set_memory_ro((long)st_map->image, 1);
498 set_memory_x((long)st_map->image, 1);
499 err = st_ops->reg(kdata);
501 /* Pair with smp_load_acquire() during lookup_elem().
502 * It ensures the above udata updates (e.g. prog->aux->id)
503 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
505 smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
509 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
510 * verified as a whole, after all init_member() calls. Can also happen if
511 * there was a race in registering the struct_ops (under the same name) to
512 * a sub-system through different struct_ops's maps.
514 set_memory_nx((long)st_map->image, 1);
515 set_memory_rw((long)st_map->image, 1);
519 bpf_struct_ops_map_put_progs(st_map);
520 memset(uvalue, 0, map->value_size);
521 memset(kvalue, 0, map->value_size);
524 mutex_unlock(&st_map->lock);
528 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
530 enum bpf_struct_ops_state prev_state;
531 struct bpf_struct_ops_map *st_map;
533 st_map = (struct bpf_struct_ops_map *)map;
534 prev_state = cmpxchg(&st_map->kvalue.state,
535 BPF_STRUCT_OPS_STATE_INUSE,
536 BPF_STRUCT_OPS_STATE_TOBEFREE);
537 switch (prev_state) {
538 case BPF_STRUCT_OPS_STATE_INUSE:
539 st_map->st_ops->unreg(&st_map->kvalue.data);
540 if (refcount_dec_and_test(&st_map->kvalue.refcnt))
543 case BPF_STRUCT_OPS_STATE_TOBEFREE:
545 case BPF_STRUCT_OPS_STATE_INIT:
549 /* Should never happen. Treat it as not found. */
554 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
560 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
564 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
566 btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
574 static void bpf_struct_ops_map_free(struct bpf_map *map)
576 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
579 bpf_struct_ops_map_put_progs(st_map);
580 bpf_map_area_free(st_map->links);
581 bpf_jit_free_exec(st_map->image);
582 bpf_map_area_free(st_map->uvalue);
583 bpf_map_area_free(st_map);
586 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
588 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
589 attr->map_flags || !attr->btf_vmlinux_value_type_id)
594 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
596 const struct bpf_struct_ops *st_ops;
598 struct bpf_struct_ops_map *st_map;
599 const struct btf_type *t, *vt;
603 return ERR_PTR(-EPERM);
605 st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
607 return ERR_PTR(-ENOTSUPP);
609 vt = st_ops->value_type;
610 if (attr->value_size != vt->size)
611 return ERR_PTR(-EINVAL);
615 st_map_size = sizeof(*st_map) +
617 * struct bpf_struct_ops_tcp_congestions_ops
619 (vt->size - sizeof(struct bpf_struct_ops_value));
621 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
623 return ERR_PTR(-ENOMEM);
625 st_map->st_ops = st_ops;
628 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
630 bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *),
632 st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
633 if (!st_map->uvalue || !st_map->links || !st_map->image) {
634 bpf_struct_ops_map_free(map);
635 return ERR_PTR(-ENOMEM);
638 mutex_init(&st_map->lock);
639 set_vm_flush_reset_perms(st_map->image);
640 bpf_map_init_from_attr(map, attr);
645 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
646 const struct bpf_map_ops bpf_struct_ops_map_ops = {
647 .map_alloc_check = bpf_struct_ops_map_alloc_check,
648 .map_alloc = bpf_struct_ops_map_alloc,
649 .map_free = bpf_struct_ops_map_free,
650 .map_get_next_key = bpf_struct_ops_map_get_next_key,
651 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
652 .map_delete_elem = bpf_struct_ops_map_delete_elem,
653 .map_update_elem = bpf_struct_ops_map_update_elem,
654 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
655 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
658 /* "const void *" because some subsystem is
659 * passing a const (e.g. const struct tcp_congestion_ops *)
661 bool bpf_struct_ops_get(const void *kdata)
663 struct bpf_struct_ops_value *kvalue;
665 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
667 return refcount_inc_not_zero(&kvalue->refcnt);
670 static void bpf_struct_ops_put_rcu(struct rcu_head *head)
672 struct bpf_struct_ops_map *st_map;
674 st_map = container_of(head, struct bpf_struct_ops_map, rcu);
675 bpf_map_put(&st_map->map);
678 void bpf_struct_ops_put(const void *kdata)
680 struct bpf_struct_ops_value *kvalue;
682 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
683 if (refcount_dec_and_test(&kvalue->refcnt)) {
684 struct bpf_struct_ops_map *st_map;
686 st_map = container_of(kvalue, struct bpf_struct_ops_map,
688 /* The struct_ops's function may switch to another struct_ops.
690 * For example, bpf_tcp_cc_x->init() may switch to
691 * another tcp_cc_y by calling
692 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
693 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
694 * and its map->refcnt may reach 0 which then free its
695 * trampoline image while tcp_cc_x is still running.
697 * Thus, a rcu grace period is needed here.
699 call_rcu(&st_map->rcu, bpf_struct_ops_put_rcu);