struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb);
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp);
void bpf_warn_invalid_xdp_action(u32 act);
__skb_push(skb, -off);
switch (act) {
+ case XDP_REDIRECT:
case XDP_TX:
__skb_push(skb, mac_len);
/* fall through */
if (xdp_prog) {
u32 act = netif_receive_generic_xdp(skb, xdp_prog);
+ int err;
if (act != XDP_PASS) {
- if (act == XDP_TX)
+ switch (act) {
+ case XDP_REDIRECT:
+ err = xdp_do_generic_redirect(skb->dev, skb);
+ if (err)
+ goto out_redir;
+ /* fallthru to submit skb */
+ case XDP_TX:
generic_xdp_tx(skb, xdp_prog);
+ break;
+ }
return XDP_DROP;
}
}
return XDP_PASS;
+out_redir:
+ trace_xdp_exception(skb->dev, xdp_prog, XDP_REDIRECT);
+ kfree_skb(skb);
+ return XDP_DROP;
}
static int netif_rx_internal(struct sk_buff *skb)
if (static_key_false(&generic_xdp_needed)) {
int ret = do_xdp_generic(skb);
+ /* Consider XDP consuming the packet a success from
+ * the netdev point of view we do not want to count
+ * this as an error.
+ */
if (ret != XDP_PASS)
- return NET_RX_DROP;
+ return NET_RX_SUCCESS;
}
#ifdef CONFIG_RPS
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
+int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb)
+{
+ struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ unsigned int len;
+
+ dev = dev_get_by_index_rcu(dev_net(dev), ri->ifindex);
+ ri->ifindex = 0;
+ if (unlikely(!dev)) {
+ bpf_warn_invalid_xdp_redirect(ri->ifindex);
+ goto err;
+ }
+
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto err;
+
+ len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
+ if (skb->len > len)
+ goto err;
+
+ skb->dev = dev;
+ return 0;
+err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
+
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
struct redirect_info *ri = this_cpu_ptr(&redirect_info);