1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/rcupdate_trace.h>
13 #include <linux/sched/signal.h>
14 #include <net/bpf_sk_storage.h>
17 #include <net/net_namespace.h>
18 #include <net/page_pool.h>
19 #include <linux/error-injection.h>
20 #include <linux/smp.h>
21 #include <linux/sock_diag.h>
22 #include <linux/netfilter.h>
24 #include <net/netfilter/nf_bpf_link.h>
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/bpf_test_run.h>
29 struct bpf_test_timer {
30 enum { NO_PREEMPT, NO_MIGRATE } mode;
32 u64 time_start, time_spent;
35 static void bpf_test_timer_enter(struct bpf_test_timer *t)
39 if (t->mode == NO_PREEMPT)
44 t->time_start = ktime_get_ns();
47 static void bpf_test_timer_leave(struct bpf_test_timer *t)
52 if (t->mode == NO_PREEMPT)
59 static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
60 u32 repeat, int *err, u32 *duration)
66 t->time_spent += ktime_get_ns() - t->time_start;
67 do_div(t->time_spent, t->i);
68 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
73 if (signal_pending(current)) {
74 /* During iteration: we've been cancelled, abort. */
80 /* During iteration: we need to reschedule between runs. */
81 t->time_spent += ktime_get_ns() - t->time_start;
82 bpf_test_timer_leave(t);
84 bpf_test_timer_enter(t);
87 /* Do another round. */
95 /* We put this struct at the head of each page with a context and frame
96 * initialised when the page is allocated, so we don't have to do this on each
97 * repetition of the test run.
99 struct xdp_page_head {
100 struct xdp_buff orig_ctx;
103 /* ::data_hard_start starts here */
104 DECLARE_FLEX_ARRAY(struct xdp_frame, frame);
105 DECLARE_FLEX_ARRAY(u8, data);
109 struct xdp_test_data {
110 struct xdp_buff *orig_ctx;
111 struct xdp_rxq_info rxq;
112 struct net_device *dev;
113 struct page_pool *pp;
114 struct xdp_frame **frames;
115 struct sk_buff **skbs;
116 struct xdp_mem_info mem;
121 /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE
122 * must be updated accordingly this gets changed, otherwise BPF selftests
125 #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head))
126 #define TEST_XDP_MAX_BATCH 256
128 static void xdp_test_run_init_page(struct page *page, void *arg)
130 struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
131 struct xdp_buff *new_ctx, *orig_ctx;
132 u32 headroom = XDP_PACKET_HEADROOM;
133 struct xdp_test_data *xdp = arg;
134 size_t frm_len, meta_len;
135 struct xdp_frame *frm;
138 orig_ctx = xdp->orig_ctx;
139 frm_len = orig_ctx->data_end - orig_ctx->data_meta;
140 meta_len = orig_ctx->data - orig_ctx->data_meta;
141 headroom -= meta_len;
143 new_ctx = &head->ctx;
146 memcpy(data + headroom, orig_ctx->data_meta, frm_len);
148 xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
149 xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
150 new_ctx->data = new_ctx->data_meta + meta_len;
152 xdp_update_frame_from_buff(new_ctx, frm);
153 frm->mem = new_ctx->rxq->mem;
155 memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
158 static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
160 struct page_pool *pp;
162 struct page_pool_params pp_params = {
165 .pool_size = xdp->batch_size,
167 .init_callback = xdp_test_run_init_page,
171 xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
175 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
179 pp = page_pool_create(&pp_params);
185 /* will copy 'mem.id' into pp->xdp_mem_id */
186 err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp);
192 /* We create a 'fake' RXQ referencing the original dev, but with an
193 * xdp_mem_info pointing to our page_pool
195 xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
196 xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
197 xdp->rxq.mem.id = pp->xdp_mem_id;
198 xdp->dev = orig_ctx->rxq->dev;
199 xdp->orig_ctx = orig_ctx;
204 page_pool_destroy(pp);
212 static void xdp_test_run_teardown(struct xdp_test_data *xdp)
214 xdp_unreg_mem_model(&xdp->mem);
215 page_pool_destroy(xdp->pp);
220 static bool frame_was_changed(const struct xdp_page_head *head)
222 /* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
223 * i.e. has the highest chances to be overwritten. If those two are
224 * untouched, it's most likely safe to skip the context reset.
226 return head->frame->data != head->orig_ctx.data ||
227 head->frame->flags != head->orig_ctx.flags;
230 static bool ctx_was_changed(struct xdp_page_head *head)
232 return head->orig_ctx.data != head->ctx.data ||
233 head->orig_ctx.data_meta != head->ctx.data_meta ||
234 head->orig_ctx.data_end != head->ctx.data_end;
237 static void reset_ctx(struct xdp_page_head *head)
239 if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
242 head->ctx.data = head->orig_ctx.data;
243 head->ctx.data_meta = head->orig_ctx.data_meta;
244 head->ctx.data_end = head->orig_ctx.data_end;
245 xdp_update_frame_from_buff(&head->ctx, head->frame);
248 static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
249 struct sk_buff **skbs,
250 struct net_device *dev)
252 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
256 n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
257 if (unlikely(n == 0)) {
258 for (i = 0; i < nframes; i++)
259 xdp_return_frame(frames[i]);
263 for (i = 0; i < nframes; i++) {
264 struct xdp_frame *xdpf = frames[i];
265 struct sk_buff *skb = skbs[i];
267 skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
269 xdp_return_frame(xdpf);
273 list_add_tail(&skb->list, &list);
275 netif_receive_skb_list(&list);
280 static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
283 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
284 int err = 0, act, ret, i, nframes = 0, batch_sz;
285 struct xdp_frame **frames = xdp->frames;
286 struct xdp_page_head *head;
287 struct xdp_frame *frm;
288 bool redirect = false;
289 struct xdp_buff *ctx;
292 batch_sz = min_t(u32, repeat, xdp->batch_size);
295 xdp_set_return_frame_no_direct();
297 for (i = 0; i < batch_sz; i++) {
298 page = page_pool_dev_alloc_pages(xdp->pp);
304 head = phys_to_virt(page_to_phys(page));
310 act = bpf_prog_run_xdp(prog, ctx);
312 /* if program changed pkt bounds we need to update the xdp_frame */
313 if (unlikely(ctx_was_changed(head))) {
314 ret = xdp_update_frame_from_buff(ctx, frm);
316 xdp_return_buff(ctx);
323 /* we can't do a real XDP_TX since we're not in the
324 * driver, so turn it into a REDIRECT back to the same
327 ri->tgt_index = xdp->dev->ifindex;
328 ri->map_id = INT_MAX;
329 ri->map_type = BPF_MAP_TYPE_UNSPEC;
333 ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
335 xdp_return_buff(ctx);
338 frames[nframes++] = frm;
341 bpf_warn_invalid_xdp_action(NULL, prog, act);
344 xdp_return_buff(ctx);
353 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
358 xdp_clear_return_frame_no_direct();
363 static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
364 u32 repeat, u32 batch_size, u32 *time)
367 struct xdp_test_data xdp = { .batch_size = batch_size };
368 struct bpf_test_timer t = { .mode = NO_MIGRATE };
374 ret = xdp_test_run_setup(&xdp, ctx);
378 bpf_test_timer_enter(&t);
381 ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
382 if (unlikely(ret < 0))
384 } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
385 bpf_test_timer_leave(&t);
387 xdp_test_run_teardown(&xdp);
391 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
392 u32 *retval, u32 *time, bool xdp)
394 struct bpf_prog_array_item item = {.prog = prog};
395 struct bpf_run_ctx *old_ctx;
396 struct bpf_cg_run_ctx run_ctx;
397 struct bpf_test_timer t = { NO_MIGRATE };
398 enum bpf_cgroup_storage_type stype;
401 for_each_cgroup_storage_type(stype) {
402 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
403 if (IS_ERR(item.cgroup_storage[stype])) {
404 item.cgroup_storage[stype] = NULL;
405 for_each_cgroup_storage_type(stype)
406 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
414 bpf_test_timer_enter(&t);
415 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
417 run_ctx.prog_item = &item;
420 *retval = bpf_prog_run_xdp(prog, ctx);
422 *retval = bpf_prog_run(prog, ctx);
424 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
425 bpf_reset_run_ctx(old_ctx);
426 bpf_test_timer_leave(&t);
428 for_each_cgroup_storage_type(stype)
429 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
434 static int bpf_test_finish(const union bpf_attr *kattr,
435 union bpf_attr __user *uattr, const void *data,
436 struct skb_shared_info *sinfo, u32 size,
437 u32 retval, u32 duration)
439 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
441 u32 copy_size = size;
443 /* Clamp copy if the user has provided a size hint, but copy the full
444 * buffer if not to retain old behaviour.
446 if (kattr->test.data_size_out &&
447 copy_size > kattr->test.data_size_out) {
448 copy_size = kattr->test.data_size_out;
453 int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
460 if (copy_to_user(data_out, data, len))
467 for (i = 0; i < sinfo->nr_frags; i++) {
468 skb_frag_t *frag = &sinfo->frags[i];
470 if (offset >= copy_size) {
475 data_len = min_t(u32, copy_size - offset,
476 skb_frag_size(frag));
478 if (copy_to_user(data_out + offset,
479 skb_frag_address(frag),
488 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
490 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
492 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
497 trace_bpf_test_finish(&err);
501 /* Integer types of various sizes and pointer combinations cover variety of
502 * architecture dependent calling conventions. 7+ can be supported in the
506 __diag_ignore_all("-Wmissing-prototypes",
507 "Global functions as their definitions will be in vmlinux BTF");
508 __bpf_kfunc int bpf_fentry_test1(int a)
512 EXPORT_SYMBOL_GPL(bpf_fentry_test1);
514 int noinline bpf_fentry_test2(int a, u64 b)
519 int noinline bpf_fentry_test3(char a, int b, u64 c)
524 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
526 return (long)a + b + c + d;
529 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
531 return a + (long)b + c + d + e;
534 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
536 return a + (long)b + c + d + (long)e + f;
539 struct bpf_fentry_test_t {
540 struct bpf_fentry_test_t *a;
543 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
548 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
553 __bpf_kfunc u32 bpf_fentry_test9(u32 *a)
558 void noinline bpf_fentry_test_sinfo(struct skb_shared_info *sinfo)
562 __bpf_kfunc int bpf_modify_return_test(int a, int *b)
568 __bpf_kfunc int bpf_modify_return_test2(int a, int *b, short c, int d,
569 void *e, char f, int g)
572 return a + *b + c + d + (long)e + f + g;
575 int noinline bpf_fentry_shadow_test(int a)
580 struct prog_test_member1 {
584 struct prog_test_member {
585 struct prog_test_member1 m;
589 struct prog_test_ref_kfunc {
592 struct prog_test_member memb;
593 struct prog_test_ref_kfunc *next;
597 __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
599 refcount_dec(&p->cnt);
602 __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
608 BTF_SET8_START(bpf_test_modify_return_ids)
609 BTF_ID_FLAGS(func, bpf_modify_return_test)
610 BTF_ID_FLAGS(func, bpf_modify_return_test2)
611 BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE)
612 BTF_SET8_END(bpf_test_modify_return_ids)
614 static const struct btf_kfunc_id_set bpf_test_modify_return_set = {
615 .owner = THIS_MODULE,
616 .set = &bpf_test_modify_return_ids,
619 BTF_SET8_START(test_sk_check_kfunc_ids)
620 BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE)
621 BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE)
622 BTF_SET8_END(test_sk_check_kfunc_ids)
624 static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
625 u32 size, u32 headroom, u32 tailroom)
627 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
630 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
631 return ERR_PTR(-EINVAL);
633 if (user_size > size)
634 return ERR_PTR(-EMSGSIZE);
636 size = SKB_DATA_ALIGN(size);
637 data = kzalloc(size + headroom + tailroom, GFP_USER);
639 return ERR_PTR(-ENOMEM);
641 if (copy_from_user(data + headroom, data_in, user_size)) {
643 return ERR_PTR(-EFAULT);
649 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
650 const union bpf_attr *kattr,
651 union bpf_attr __user *uattr)
653 struct bpf_fentry_test_t arg = {};
654 u16 side_effect = 0, ret = 0;
655 int b = 2, err = -EFAULT;
658 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
661 switch (prog->expected_attach_type) {
662 case BPF_TRACE_FENTRY:
663 case BPF_TRACE_FEXIT:
664 if (bpf_fentry_test1(1) != 2 ||
665 bpf_fentry_test2(2, 3) != 5 ||
666 bpf_fentry_test3(4, 5, 6) != 15 ||
667 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
668 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
669 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
670 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
671 bpf_fentry_test8(&arg) != 0 ||
672 bpf_fentry_test9(&retval) != 0)
675 case BPF_MODIFY_RETURN:
676 ret = bpf_modify_return_test(1, &b);
680 ret += bpf_modify_return_test2(1, &b, 3, 4, (void *)5, 6, 7);
688 retval = ((u32)side_effect << 16) | ret;
689 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
694 trace_bpf_test_finish(&err);
698 struct bpf_raw_tp_test_run_info {
699 struct bpf_prog *prog;
705 __bpf_prog_test_run_raw_tp(void *data)
707 struct bpf_raw_tp_test_run_info *info = data;
710 info->retval = bpf_prog_run(info->prog, info->ctx);
714 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
715 const union bpf_attr *kattr,
716 union bpf_attr __user *uattr)
718 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
719 __u32 ctx_size_in = kattr->test.ctx_size_in;
720 struct bpf_raw_tp_test_run_info info;
721 int cpu = kattr->test.cpu, err = 0;
724 /* doesn't support data_in/out, ctx_out, duration, or repeat */
725 if (kattr->test.data_in || kattr->test.data_out ||
726 kattr->test.ctx_out || kattr->test.duration ||
727 kattr->test.repeat || kattr->test.batch_size)
730 if (ctx_size_in < prog->aux->max_ctx_offset ||
731 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
734 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
738 info.ctx = memdup_user(ctx_in, ctx_size_in);
739 if (IS_ERR(info.ctx))
740 return PTR_ERR(info.ctx);
747 current_cpu = get_cpu();
748 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
749 cpu == current_cpu) {
750 __bpf_prog_test_run_raw_tp(&info);
751 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
752 /* smp_call_function_single() also checks cpu_online()
753 * after csd_lock(). However, since cpu is from user
754 * space, let's do an extra quick check to filter out
755 * invalid value before smp_call_function_single().
759 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
765 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
772 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
774 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
775 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
776 u32 size = kattr->test.ctx_size_in;
780 if (!data_in && !data_out)
783 data = kzalloc(max_size, GFP_USER);
785 return ERR_PTR(-ENOMEM);
788 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
794 size = min_t(u32, max_size, size);
795 if (copy_from_user(data, data_in, size)) {
797 return ERR_PTR(-EFAULT);
803 static int bpf_ctx_finish(const union bpf_attr *kattr,
804 union bpf_attr __user *uattr, const void *data,
807 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
809 u32 copy_size = size;
811 if (!data || !data_out)
814 if (copy_size > kattr->test.ctx_size_out) {
815 copy_size = kattr->test.ctx_size_out;
819 if (copy_to_user(data_out, data, copy_size))
821 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
830 * range_is_zero - test whether buffer is initialized
831 * @buf: buffer to check
832 * @from: check from this position
833 * @to: check up until (excluding) this position
835 * This function returns true if the there is a non-zero byte
836 * in the buf in the range [from,to).
838 static inline bool range_is_zero(void *buf, size_t from, size_t to)
840 return !memchr_inv((u8 *)buf + from, 0, to - from);
843 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
845 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
850 /* make sure the fields we don't use are zeroed */
851 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
854 /* mark is allowed */
856 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
857 offsetof(struct __sk_buff, priority)))
860 /* priority is allowed */
861 /* ingress_ifindex is allowed */
862 /* ifindex is allowed */
864 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
865 offsetof(struct __sk_buff, cb)))
870 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
871 offsetof(struct __sk_buff, tstamp)))
874 /* tstamp is allowed */
875 /* wire_len is allowed */
876 /* gso_segs is allowed */
878 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
879 offsetof(struct __sk_buff, gso_size)))
882 /* gso_size is allowed */
884 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
885 offsetof(struct __sk_buff, hwtstamp)))
888 /* hwtstamp is allowed */
890 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
891 sizeof(struct __sk_buff)))
894 skb->mark = __skb->mark;
895 skb->priority = __skb->priority;
896 skb->skb_iif = __skb->ingress_ifindex;
897 skb->tstamp = __skb->tstamp;
898 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
900 if (__skb->wire_len == 0) {
901 cb->pkt_len = skb->len;
903 if (__skb->wire_len < skb->len ||
904 __skb->wire_len > GSO_LEGACY_MAX_SIZE)
906 cb->pkt_len = __skb->wire_len;
909 if (__skb->gso_segs > GSO_MAX_SEGS)
911 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
912 skb_shinfo(skb)->gso_size = __skb->gso_size;
913 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
918 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
920 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
925 __skb->mark = skb->mark;
926 __skb->priority = skb->priority;
927 __skb->ingress_ifindex = skb->skb_iif;
928 __skb->ifindex = skb->dev->ifindex;
929 __skb->tstamp = skb->tstamp;
930 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
931 __skb->wire_len = cb->pkt_len;
932 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
933 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
936 static struct proto bpf_dummy_proto = {
938 .owner = THIS_MODULE,
939 .obj_size = sizeof(struct sock),
942 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
943 union bpf_attr __user *uattr)
945 bool is_l2 = false, is_direct_pkt_access = false;
946 struct net *net = current->nsproxy->net_ns;
947 struct net_device *dev = net->loopback_dev;
948 u32 size = kattr->test.data_size_in;
949 u32 repeat = kattr->test.repeat;
950 struct __sk_buff *ctx = NULL;
951 u32 retval, duration;
952 int hh_len = ETH_HLEN;
958 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
961 data = bpf_test_init(kattr, kattr->test.data_size_in,
962 size, NET_SKB_PAD + NET_IP_ALIGN,
963 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
965 return PTR_ERR(data);
967 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
973 switch (prog->type) {
974 case BPF_PROG_TYPE_SCHED_CLS:
975 case BPF_PROG_TYPE_SCHED_ACT:
978 case BPF_PROG_TYPE_LWT_IN:
979 case BPF_PROG_TYPE_LWT_OUT:
980 case BPF_PROG_TYPE_LWT_XMIT:
981 is_direct_pkt_access = true;
987 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
993 sock_init_data(NULL, sk);
995 skb = slab_build_skb(data);
1004 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1005 __skb_put(skb, size);
1006 if (ctx && ctx->ifindex > 1) {
1007 dev = dev_get_by_index(net, ctx->ifindex);
1013 skb->protocol = eth_type_trans(skb, dev);
1014 skb_reset_network_header(skb);
1016 switch (skb->protocol) {
1017 case htons(ETH_P_IP):
1018 sk->sk_family = AF_INET;
1019 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
1020 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
1021 sk->sk_daddr = ip_hdr(skb)->daddr;
1024 #if IS_ENABLED(CONFIG_IPV6)
1025 case htons(ETH_P_IPV6):
1026 sk->sk_family = AF_INET6;
1027 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
1028 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
1029 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
1038 __skb_push(skb, hh_len);
1039 if (is_direct_pkt_access)
1040 bpf_compute_data_pointers(skb);
1041 ret = convert___skb_to_skb(skb, ctx);
1044 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
1048 if (skb_headroom(skb) < hh_len) {
1049 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
1051 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
1056 memset(__skb_push(skb, hh_len), 0, hh_len);
1058 convert_skb_to___skb(skb, ctx);
1061 /* bpf program can never convert linear skb to non-linear */
1062 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
1063 size = skb_headlen(skb);
1064 ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval,
1067 ret = bpf_ctx_finish(kattr, uattr, ctx,
1068 sizeof(struct __sk_buff));
1070 if (dev && dev != net->loopback_dev)
1078 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
1080 unsigned int ingress_ifindex, rx_queue_index;
1081 struct netdev_rx_queue *rxqueue;
1082 struct net_device *device;
1087 if (xdp_md->egress_ifindex != 0)
1090 ingress_ifindex = xdp_md->ingress_ifindex;
1091 rx_queue_index = xdp_md->rx_queue_index;
1093 if (!ingress_ifindex && rx_queue_index)
1096 if (ingress_ifindex) {
1097 device = dev_get_by_index(current->nsproxy->net_ns,
1102 if (rx_queue_index >= device->real_num_rx_queues)
1105 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
1107 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
1110 xdp->rxq = &rxqueue->xdp_rxq;
1111 /* The device is now tracked in the xdp->rxq for later
1116 xdp->data = xdp->data_meta + xdp_md->data;
1124 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
1129 xdp_md->data = xdp->data - xdp->data_meta;
1130 xdp_md->data_end = xdp->data_end - xdp->data_meta;
1132 if (xdp_md->ingress_ifindex)
1133 dev_put(xdp->rxq->dev);
1136 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1137 union bpf_attr __user *uattr)
1139 bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
1140 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1141 u32 batch_size = kattr->test.batch_size;
1142 u32 retval = 0, duration, max_data_sz;
1143 u32 size = kattr->test.data_size_in;
1144 u32 headroom = XDP_PACKET_HEADROOM;
1145 u32 repeat = kattr->test.repeat;
1146 struct netdev_rx_queue *rxqueue;
1147 struct skb_shared_info *sinfo;
1148 struct xdp_buff xdp = {};
1149 int i, ret = -EINVAL;
1153 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
1154 prog->expected_attach_type == BPF_XDP_CPUMAP)
1157 if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
1160 if (bpf_prog_is_dev_bound(prog->aux))
1165 batch_size = NAPI_POLL_WEIGHT;
1166 else if (batch_size > TEST_XDP_MAX_BATCH)
1169 headroom += sizeof(struct xdp_page_head);
1170 } else if (batch_size) {
1174 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
1176 return PTR_ERR(ctx);
1179 /* There can't be user provided data before the meta data */
1180 if (ctx->data_meta || ctx->data_end != size ||
1181 ctx->data > ctx->data_end ||
1182 unlikely(xdp_metalen_invalid(ctx->data)) ||
1183 (do_live && (kattr->test.data_out || kattr->test.ctx_out)))
1185 /* Meta data is allocated from the headroom */
1186 headroom -= ctx->data;
1189 max_data_sz = 4096 - headroom - tailroom;
1190 if (size > max_data_sz) {
1191 /* disallow live data mode for jumbo frames */
1197 data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
1199 ret = PTR_ERR(data);
1203 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
1204 rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom;
1205 xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq);
1206 xdp_prepare_buff(&xdp, data, headroom, size, true);
1207 sinfo = xdp_get_shared_info_from_buff(&xdp);
1209 ret = xdp_convert_md_to_buff(ctx, &xdp);
1213 if (unlikely(kattr->test.data_size_in > size)) {
1214 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
1216 while (size < kattr->test.data_size_in) {
1221 if (sinfo->nr_frags == MAX_SKB_FRAGS) {
1226 page = alloc_page(GFP_KERNEL);
1232 frag = &sinfo->frags[sinfo->nr_frags++];
1234 data_len = min_t(u32, kattr->test.data_size_in - size,
1236 skb_frag_fill_page_desc(frag, page, 0, data_len);
1238 if (copy_from_user(page_address(page), data_in + size,
1243 sinfo->xdp_frags_size += data_len;
1246 xdp_buff_set_frags_flag(&xdp);
1250 bpf_prog_change_xdp(NULL, prog);
1253 ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
1255 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
1256 /* We convert the xdp_buff back to an xdp_md before checking the return
1257 * code so the reference count of any held netdevice will be decremented
1258 * even if the test run failed.
1260 xdp_convert_buff_to_md(&xdp, ctx);
1264 size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size;
1265 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size,
1268 ret = bpf_ctx_finish(kattr, uattr, ctx,
1269 sizeof(struct xdp_md));
1273 bpf_prog_change_xdp(prog, NULL);
1275 for (i = 0; i < sinfo->nr_frags; i++)
1276 __free_page(skb_frag_page(&sinfo->frags[i]));
1283 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
1285 /* make sure the fields we don't use are zeroed */
1286 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
1289 /* flags is allowed */
1291 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
1292 sizeof(struct bpf_flow_keys)))
1298 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1299 const union bpf_attr *kattr,
1300 union bpf_attr __user *uattr)
1302 struct bpf_test_timer t = { NO_PREEMPT };
1303 u32 size = kattr->test.data_size_in;
1304 struct bpf_flow_dissector ctx = {};
1305 u32 repeat = kattr->test.repeat;
1306 struct bpf_flow_keys *user_ctx;
1307 struct bpf_flow_keys flow_keys;
1308 const struct ethhdr *eth;
1309 unsigned int flags = 0;
1310 u32 retval, duration;
1314 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1317 if (size < ETH_HLEN)
1320 data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0);
1322 return PTR_ERR(data);
1324 eth = (struct ethhdr *)data;
1329 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
1330 if (IS_ERR(user_ctx)) {
1332 return PTR_ERR(user_ctx);
1335 ret = verify_user_bpf_flow_keys(user_ctx);
1338 flags = user_ctx->flags;
1341 ctx.flow_keys = &flow_keys;
1343 ctx.data_end = (__u8 *)data + size;
1345 bpf_test_timer_enter(&t);
1347 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
1349 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1350 bpf_test_timer_leave(&t);
1355 ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL,
1356 sizeof(flow_keys), retval, duration);
1358 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
1359 sizeof(struct bpf_flow_keys));
1367 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
1368 union bpf_attr __user *uattr)
1370 struct bpf_test_timer t = { NO_PREEMPT };
1371 struct bpf_prog_array *progs = NULL;
1372 struct bpf_sk_lookup_kern ctx = {};
1373 u32 repeat = kattr->test.repeat;
1374 struct bpf_sk_lookup *user_ctx;
1375 u32 retval, duration;
1378 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1381 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
1382 kattr->test.data_size_out)
1388 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
1389 if (IS_ERR(user_ctx))
1390 return PTR_ERR(user_ctx);
1398 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
1401 if (user_ctx->local_port > U16_MAX) {
1406 ctx.family = (u16)user_ctx->family;
1407 ctx.protocol = (u16)user_ctx->protocol;
1408 ctx.dport = (u16)user_ctx->local_port;
1409 ctx.sport = user_ctx->remote_port;
1411 switch (ctx.family) {
1413 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
1414 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
1417 #if IS_ENABLED(CONFIG_IPV6)
1419 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
1420 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
1425 ret = -EAFNOSUPPORT;
1429 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
1435 progs->items[0].prog = prog;
1437 bpf_test_timer_enter(&t);
1439 ctx.selected_sk = NULL;
1440 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1441 } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
1442 bpf_test_timer_leave(&t);
1447 user_ctx->cookie = 0;
1448 if (ctx.selected_sk) {
1449 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1454 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1457 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1459 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1462 bpf_prog_array_free(progs);
1467 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1468 const union bpf_attr *kattr,
1469 union bpf_attr __user *uattr)
1471 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1472 __u32 ctx_size_in = kattr->test.ctx_size_in;
1477 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1478 if (kattr->test.data_in || kattr->test.data_out ||
1479 kattr->test.ctx_out || kattr->test.duration ||
1480 kattr->test.repeat || kattr->test.flags ||
1481 kattr->test.batch_size)
1484 if (ctx_size_in < prog->aux->max_ctx_offset ||
1485 ctx_size_in > U16_MAX)
1489 ctx = memdup_user(ctx_in, ctx_size_in);
1491 return PTR_ERR(ctx);
1494 rcu_read_lock_trace();
1495 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1496 rcu_read_unlock_trace();
1498 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1503 if (copy_to_user(ctx_in, ctx, ctx_size_in))
1510 static int verify_and_copy_hook_state(struct nf_hook_state *state,
1511 const struct nf_hook_state *user,
1512 struct net_device *dev)
1514 if (user->in || user->out)
1517 if (user->net || user->sk || user->okfn)
1523 switch (state->hook) {
1524 case NF_INET_PRE_ROUTING:
1527 case NF_INET_LOCAL_IN:
1530 case NF_INET_FORWARD:
1534 case NF_INET_LOCAL_OUT:
1537 case NF_INET_POST_ROUTING:
1547 state->pf = user->pf;
1548 state->hook = user->hook;
1553 static __be16 nfproto_eth(int nfproto)
1557 return htons(ETH_P_IP);
1562 return htons(ETH_P_IPV6);
1565 int bpf_prog_test_run_nf(struct bpf_prog *prog,
1566 const union bpf_attr *kattr,
1567 union bpf_attr __user *uattr)
1569 struct net *net = current->nsproxy->net_ns;
1570 struct net_device *dev = net->loopback_dev;
1571 struct nf_hook_state *user_ctx, hook_state = {
1573 .hook = NF_INET_LOCAL_OUT,
1575 u32 size = kattr->test.data_size_in;
1576 u32 repeat = kattr->test.repeat;
1577 struct bpf_nf_ctx ctx = {
1578 .state = &hook_state,
1580 struct sk_buff *skb = NULL;
1581 u32 retval, duration;
1585 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
1588 if (size < sizeof(struct iphdr))
1591 data = bpf_test_init(kattr, kattr->test.data_size_in, size,
1592 NET_SKB_PAD + NET_IP_ALIGN,
1593 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1595 return PTR_ERR(data);
1600 user_ctx = bpf_ctx_init(kattr, sizeof(struct nf_hook_state));
1601 if (IS_ERR(user_ctx)) {
1603 return PTR_ERR(user_ctx);
1607 ret = verify_and_copy_hook_state(&hook_state, user_ctx, dev);
1612 skb = slab_build_skb(data);
1618 data = NULL; /* data released via kfree_skb */
1620 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1621 __skb_put(skb, size);
1625 if (hook_state.hook != NF_INET_LOCAL_OUT) {
1626 if (size < ETH_HLEN + sizeof(struct iphdr))
1629 skb->protocol = eth_type_trans(skb, dev);
1630 switch (skb->protocol) {
1631 case htons(ETH_P_IP):
1632 if (hook_state.pf == NFPROTO_IPV4)
1635 case htons(ETH_P_IPV6):
1636 if (size < ETH_HLEN + sizeof(struct ipv6hdr))
1638 if (hook_state.pf == NFPROTO_IPV6)
1646 skb_reset_network_header(skb);
1648 skb->protocol = nfproto_eth(hook_state.pf);
1653 ret = bpf_test_run(prog, &ctx, repeat, &retval, &duration, false);
1657 ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration);
1666 static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = {
1667 .owner = THIS_MODULE,
1668 .set = &test_sk_check_kfunc_ids,
1671 BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids)
1672 BTF_ID(struct, prog_test_ref_kfunc)
1673 BTF_ID(func, bpf_kfunc_call_test_release)
1674 BTF_ID(struct, prog_test_member)
1675 BTF_ID(func, bpf_kfunc_call_memb_release)
1677 static int __init bpf_prog_test_run_init(void)
1679 const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
1681 .btf_id = bpf_prog_test_dtor_kfunc_ids[0],
1682 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
1685 .btf_id = bpf_prog_test_dtor_kfunc_ids[2],
1686 .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
1691 ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set);
1692 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
1693 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
1694 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
1695 return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
1696 ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
1699 late_initcall(bpf_prog_test_run_init);