1 /* Copyright (c) 2017 Facebook
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
12 #include <linux/sched/signal.h>
13 #include <net/bpf_sk_storage.h>
17 #define CREATE_TRACE_POINTS
18 #include <trace/events/bpf_test_run.h>
20 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
21 u32 *retval, u32 *time)
23 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
24 enum bpf_cgroup_storage_type stype;
25 u64 time_start, time_spent = 0;
29 for_each_cgroup_storage_type(stype) {
30 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
31 if (IS_ERR(storage[stype])) {
32 storage[stype] = NULL;
33 for_each_cgroup_storage_type(stype)
34 bpf_cgroup_storage_free(storage[stype]);
44 time_start = ktime_get_ns();
45 for (i = 0; i < repeat; i++) {
46 bpf_cgroup_storage_set(storage);
47 *retval = BPF_PROG_RUN(prog, ctx);
49 if (signal_pending(current)) {
55 time_spent += ktime_get_ns() - time_start;
63 time_start = ktime_get_ns();
66 time_spent += ktime_get_ns() - time_start;
70 do_div(time_spent, repeat);
71 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
73 for_each_cgroup_storage_type(stype)
74 bpf_cgroup_storage_free(storage[stype]);
79 static int bpf_test_finish(const union bpf_attr *kattr,
80 union bpf_attr __user *uattr, const void *data,
81 u32 size, u32 retval, u32 duration)
83 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
87 /* Clamp copy if the user has provided a size hint, but copy the full
88 * buffer if not to retain old behaviour.
90 if (kattr->test.data_size_out &&
91 copy_size > kattr->test.data_size_out) {
92 copy_size = kattr->test.data_size_out;
96 if (data_out && copy_to_user(data_out, data, copy_size))
98 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
100 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
102 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
107 trace_bpf_test_finish(&err);
111 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
112 u32 headroom, u32 tailroom)
114 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
117 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
118 return ERR_PTR(-EINVAL);
120 data = kzalloc(size + headroom + tailroom, GFP_USER);
122 return ERR_PTR(-ENOMEM);
124 if (copy_from_user(data + headroom, data_in, size)) {
126 return ERR_PTR(-EFAULT);
131 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
133 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
134 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
135 u32 size = kattr->test.ctx_size_in;
139 if (!data_in && !data_out)
142 data = kzalloc(max_size, GFP_USER);
144 return ERR_PTR(-ENOMEM);
147 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
153 size = min_t(u32, max_size, size);
154 if (copy_from_user(data, data_in, size)) {
156 return ERR_PTR(-EFAULT);
162 static int bpf_ctx_finish(const union bpf_attr *kattr,
163 union bpf_attr __user *uattr, const void *data,
166 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
168 u32 copy_size = size;
170 if (!data || !data_out)
173 if (copy_size > kattr->test.ctx_size_out) {
174 copy_size = kattr->test.ctx_size_out;
178 if (copy_to_user(data_out, data, copy_size))
180 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
189 * range_is_zero - test whether buffer is initialized
190 * @buf: buffer to check
191 * @from: check from this position
192 * @to: check up until (excluding) this position
194 * This function returns true if the there is a non-zero byte
195 * in the buf in the range [from,to).
197 static inline bool range_is_zero(void *buf, size_t from, size_t to)
199 return !memchr_inv((u8 *)buf + from, 0, to - from);
202 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
204 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
209 /* make sure the fields we don't use are zeroed */
210 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, priority)))
213 /* priority is allowed */
215 if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) +
216 FIELD_SIZEOF(struct __sk_buff, priority),
217 offsetof(struct __sk_buff, cb)))
222 if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) +
223 FIELD_SIZEOF(struct __sk_buff, cb),
224 sizeof(struct __sk_buff)))
227 skb->priority = __skb->priority;
228 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
233 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
235 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
240 __skb->priority = skb->priority;
241 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
244 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
245 union bpf_attr __user *uattr)
247 bool is_l2 = false, is_direct_pkt_access = false;
248 u32 size = kattr->test.data_size_in;
249 u32 repeat = kattr->test.repeat;
250 struct __sk_buff *ctx = NULL;
251 u32 retval, duration;
252 int hh_len = ETH_HLEN;
258 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
259 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
261 return PTR_ERR(data);
263 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
269 switch (prog->type) {
270 case BPF_PROG_TYPE_SCHED_CLS:
271 case BPF_PROG_TYPE_SCHED_ACT:
274 case BPF_PROG_TYPE_LWT_IN:
275 case BPF_PROG_TYPE_LWT_OUT:
276 case BPF_PROG_TYPE_LWT_XMIT:
277 is_direct_pkt_access = true;
283 sk = kzalloc(sizeof(struct sock), GFP_USER);
289 sock_net_set(sk, current->nsproxy->net_ns);
290 sock_init_data(NULL, sk);
292 skb = build_skb(data, 0);
301 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
302 __skb_put(skb, size);
303 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
304 skb_reset_network_header(skb);
307 __skb_push(skb, hh_len);
308 if (is_direct_pkt_access)
309 bpf_compute_data_pointers(skb);
310 ret = convert___skb_to_skb(skb, ctx);
313 ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
317 if (skb_headroom(skb) < hh_len) {
318 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
320 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
325 memset(__skb_push(skb, hh_len), 0, hh_len);
327 convert_skb_to___skb(skb, ctx);
330 /* bpf program can never convert linear skb to non-linear */
331 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
332 size = skb_headlen(skb);
333 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
335 ret = bpf_ctx_finish(kattr, uattr, ctx,
336 sizeof(struct __sk_buff));
339 bpf_sk_storage_free(sk);
345 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
346 union bpf_attr __user *uattr)
348 u32 size = kattr->test.data_size_in;
349 u32 repeat = kattr->test.repeat;
350 struct netdev_rx_queue *rxqueue;
351 struct xdp_buff xdp = {};
352 u32 retval, duration;
356 if (kattr->test.ctx_in || kattr->test.ctx_out)
359 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
361 return PTR_ERR(data);
363 xdp.data_hard_start = data;
364 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
365 xdp.data_meta = xdp.data;
366 xdp.data_end = xdp.data + size;
368 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
369 xdp.rxq = &rxqueue->xdp_rxq;
371 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
374 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
375 xdp.data_end != xdp.data + size)
376 size = xdp.data_end - xdp.data;
377 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
383 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
384 const union bpf_attr *kattr,
385 union bpf_attr __user *uattr)
387 u32 size = kattr->test.data_size_in;
388 struct bpf_flow_dissector ctx = {};
389 u32 repeat = kattr->test.repeat;
390 struct bpf_flow_keys flow_keys;
391 u64 time_start, time_spent = 0;
392 const struct ethhdr *eth;
393 u32 retval, duration;
398 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
401 if (kattr->test.ctx_in || kattr->test.ctx_out)
407 data = bpf_test_init(kattr, size, 0, 0);
409 return PTR_ERR(data);
411 eth = (struct ethhdr *)data;
416 ctx.flow_keys = &flow_keys;
418 ctx.data_end = (__u8 *)data + size;
422 time_start = ktime_get_ns();
423 for (i = 0; i < repeat; i++) {
424 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
427 if (signal_pending(current)) {
435 if (need_resched()) {
436 time_spent += ktime_get_ns() - time_start;
444 time_start = ktime_get_ns();
447 time_spent += ktime_get_ns() - time_start;
451 do_div(time_spent, repeat);
452 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
454 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),