bpf: Support specifying ingress via xdp_md context in BPF_PROG_TEST_RUN
authorZvi Effron <zeffron@riotgames.com>
Wed, 7 Jul 2021 22:16:56 +0000 (22:16 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 8 Jul 2021 02:51:13 +0000 (19:51 -0700)
Support specifying the ingress_ifindex and rx_queue_index of xdp_md
contexts for BPF_PROG_TEST_RUN.

The intended use case is to allow testing XDP programs that make decisions
based on the ingress interface or RX queue.

If ingress_ifindex is specified, look up the device by the provided index
in the current namespace and use its xdp_rxq for the xdp_buff. If the
rx_queue_index is out of range, or is non-zero when the ingress_ifindex is
0, return -EINVAL.

Co-developed-by: Cody Haas <chaas@riotgames.com>
Co-developed-by: Lisa Watanabe <lwatanabe@riotgames.com>
Signed-off-by: Cody Haas <chaas@riotgames.com>
Signed-off-by: Lisa Watanabe <lwatanabe@riotgames.com>
Signed-off-by: Zvi Effron <zeffron@riotgames.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20210707221657.3985075-4-zeffron@riotgames.com
net/bpf/test_run.c

index 229c5de..cda8375 100644 (file)
@@ -690,18 +690,60 @@ out:
 
 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
 {
+       unsigned int ingress_ifindex, rx_queue_index;
+       struct netdev_rx_queue *rxqueue;
+       struct net_device *device;
+
        if (!xdp_md)
                return 0;
 
        if (xdp_md->egress_ifindex != 0)
                return -EINVAL;
 
-       if (xdp_md->ingress_ifindex != 0 || xdp_md->rx_queue_index != 0)
+       ingress_ifindex = xdp_md->ingress_ifindex;
+       rx_queue_index = xdp_md->rx_queue_index;
+
+       if (!ingress_ifindex && rx_queue_index)
                return -EINVAL;
 
-       xdp->data = xdp->data_meta + xdp_md->data;
+       if (ingress_ifindex) {
+               device = dev_get_by_index(current->nsproxy->net_ns,
+                                         ingress_ifindex);
+               if (!device)
+                       return -ENODEV;
+
+               if (rx_queue_index >= device->real_num_rx_queues)
+                       goto free_dev;
+
+               rxqueue = __netif_get_rx_queue(device, rx_queue_index);
 
+               if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
+                       goto free_dev;
+
+               xdp->rxq = &rxqueue->xdp_rxq;
+               /* The device is now tracked in the xdp->rxq for later
+                * dev_put()
+                */
+       }
+
+       xdp->data = xdp->data_meta + xdp_md->data;
        return 0;
+
+free_dev:
+       dev_put(device);
+       return -EINVAL;
+}
+
+static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
+{
+       if (!xdp_md)
+               return;
+
+       xdp_md->data = xdp->data - xdp->data_meta;
+       xdp_md->data_end = xdp->data_end - xdp->data_meta;
+
+       if (xdp_md->ingress_ifindex)
+               dev_put(xdp->rxq->dev);
 }
 
 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
@@ -753,6 +795,11 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 
        bpf_prog_change_xdp(NULL, prog);
        ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
+       /* We convert the xdp_buff back to an xdp_md before checking the return
+        * code so the reference count of any held netdevice will be decremented
+        * even if the test run failed.
+        */
+       xdp_convert_buff_to_md(&xdp, ctx);
        if (ret)
                goto out;
 
@@ -760,11 +807,6 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
            xdp.data_end != xdp.data_meta + size)
                size = xdp.data_end - xdp.data_meta;
 
-       if (ctx) {
-               ctx->data = xdp.data - xdp.data_meta;
-               ctx->data_end = xdp.data_end - xdp.data_meta;
-       }
-
        ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
                              duration);
        if (!ret)