net: core: Split out code to run generic XDP prog
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Fri, 2 Jul 2021 11:18:21 +0000 (16:48 +0530)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 8 Jul 2021 03:01:45 +0000 (20:01 -0700)
This helper can later be utilized in code that runs cpumap and devmap
programs in generic redirect mode and adjust skb based on changes made
to xdp_buff.

When returning XDP_REDIRECT/XDP_TX, it invokes __skb_push, so whenever a
generic redirect path invokes devmap/cpumap prog if set, it must
__skb_pull again as we expect mac header to be pulled.

It also drops the skb_reset_mac_len call after do_xdp_generic, as the
mac_header and network_header are advanced by the same offset, so the
difference (mac_len) remains constant.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20210702111825.491065-2-memxor@gmail.com
include/linux/netdevice.h
net/core/dev.c

index eaf5bb0..42f6f86 100644 (file)
@@ -3984,6 +3984,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb)
        __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
 }
 
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+                            struct bpf_prog *xdp_prog);
 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
 int netif_rx(struct sk_buff *skb);
index c253c2a..93e80c3 100644 (file)
@@ -4744,45 +4744,18 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
        return rxqueue;
 }
 
-static u32 netif_receive_generic_xdp(struct sk_buff *skb,
-                                    struct xdp_buff *xdp,
-                                    struct bpf_prog *xdp_prog)
+u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
+                            struct bpf_prog *xdp_prog)
 {
        void *orig_data, *orig_data_end, *hard_start;
        struct netdev_rx_queue *rxqueue;
-       u32 metalen, act = XDP_DROP;
        bool orig_bcast, orig_host;
        u32 mac_len, frame_sz;
        __be16 orig_eth_type;
        struct ethhdr *eth;
+       u32 metalen, act;
        int off;
 
-       /* Reinjected packets coming from act_mirred or similar should
-        * not get XDP generic processing.
-        */
-       if (skb_is_redirected(skb))
-               return XDP_PASS;
-
-       /* XDP packets must be linear and must have sufficient headroom
-        * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
-        * native XDP provides, thus we need to do it here as well.
-        */
-       if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
-           skb_headroom(skb) < XDP_PACKET_HEADROOM) {
-               int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
-               int troom = skb->tail + skb->data_len - skb->end;
-
-               /* In case we have to go down the path and also linearize,
-                * then lets do the pskb_expand_head() work just once here.
-                */
-               if (pskb_expand_head(skb,
-                                    hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
-                                    troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
-                       goto do_drop;
-               if (skb_linearize(skb))
-                       goto do_drop;
-       }
-
        /* The XDP program wants to see the packet starting at the MAC
         * header.
         */
@@ -4837,6 +4810,13 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                skb->protocol = eth_type_trans(skb, skb->dev);
        }
 
+       /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
+        * before calling us again on redirect path. We do not call do_redirect
+        * as we leave that up to the caller.
+        *
+        * Caller is responsible for managing lifetime of skb (i.e. calling
+        * kfree_skb in response to actions it cannot handle/XDP_DROP).
+        */
        switch (act) {
        case XDP_REDIRECT:
        case XDP_TX:
@@ -4847,6 +4827,49 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                if (metalen)
                        skb_metadata_set(skb, metalen);
                break;
+       }
+
+       return act;
+}
+
+static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+                                    struct xdp_buff *xdp,
+                                    struct bpf_prog *xdp_prog)
+{
+       u32 act = XDP_DROP;
+
+       /* Reinjected packets coming from act_mirred or similar should
+        * not get XDP generic processing.
+        */
+       if (skb_is_redirected(skb))
+               return XDP_PASS;
+
+       /* XDP packets must be linear and must have sufficient headroom
+        * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
+        * native XDP provides, thus we need to do it here as well.
+        */
+       if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
+           skb_headroom(skb) < XDP_PACKET_HEADROOM) {
+               int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
+               int troom = skb->tail + skb->data_len - skb->end;
+
+               /* In case we have to go down the path and also linearize,
+                * then lets do the pskb_expand_head() work just once here.
+                */
+               if (pskb_expand_head(skb,
+                                    hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
+                                    troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
+                       goto do_drop;
+               if (skb_linearize(skb))
+                       goto do_drop;
+       }
+
+       act = bpf_prog_run_generic_xdp(skb, xdp, xdp_prog);
+       switch (act) {
+       case XDP_REDIRECT:
+       case XDP_TX:
+       case XDP_PASS:
+               break;
        default:
                bpf_warn_invalid_xdp_action(act);
                fallthrough;
@@ -5312,7 +5335,6 @@ another_round:
                        ret = NET_RX_DROP;
                        goto out;
                }
-               skb_reset_mac_len(skb);
        }
 
        if (eth_type_vlan(skb->protocol)) {