Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[platform/kernel/linux-rpi.git] / drivers / net / bonding / bond_main.c
index 3ba5f48..365953e 100644 (file)
@@ -317,6 +317,19 @@ bool bond_sk_check(struct bonding *bond)
        }
 }
 
+static bool bond_xdp_check(struct bonding *bond)
+{
+       switch (BOND_MODE(bond)) {
+       case BOND_MODE_ROUNDROBIN:
+       case BOND_MODE_ACTIVEBACKUP:
+       case BOND_MODE_8023AD:
+       case BOND_MODE_XOR:
+               return true;
+       default:
+               return false;
+       }
+}
+
 /*---------------------------------- VLAN -----------------------------------*/
 
 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
@@ -2133,6 +2146,41 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                bond_update_slave_arr(bond, NULL);
 
 
+       if (!slave_dev->netdev_ops->ndo_bpf ||
+           !slave_dev->netdev_ops->ndo_xdp_xmit) {
+               if (bond->xdp_prog) {
+                       NL_SET_ERR_MSG(extack, "Slave does not support XDP");
+                       slave_err(bond_dev, slave_dev, "Slave does not support XDP\n");
+                       res = -EOPNOTSUPP;
+                       goto err_sysfs_del;
+               }
+       } else {
+               struct netdev_bpf xdp = {
+                       .command = XDP_SETUP_PROG,
+                       .flags   = 0,
+                       .prog    = bond->xdp_prog,
+                       .extack  = extack,
+               };
+
+               if (dev_xdp_prog_count(slave_dev) > 0) {
+                       NL_SET_ERR_MSG(extack,
+                                      "Slave has XDP program loaded, please unload before enslaving");
+                       slave_err(bond_dev, slave_dev,
+                                 "Slave has XDP program loaded, please unload before enslaving\n");
+                       res = -EOPNOTSUPP;
+                       goto err_sysfs_del;
+               }
+
+               res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+               if (res < 0) {
+                       /* ndo_bpf() sets extack error message */
+                       slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
+                       goto err_sysfs_del;
+               }
+               if (bond->xdp_prog)
+                       bpf_prog_inc(bond->xdp_prog);
+       }
+
        slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
                   bond_is_active_slave(new_slave) ? "an active" : "a backup",
                   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
@@ -2252,6 +2300,17 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* recompute stats just before removing the slave */
        bond_get_stats(bond->dev, &bond->bond_stats);
 
+       if (bond->xdp_prog) {
+               struct netdev_bpf xdp = {
+                       .command = XDP_SETUP_PROG,
+                       .flags   = 0,
+                       .prog    = NULL,
+                       .extack  = NULL,
+               };
+               if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
+                       slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
+       }
+
        /* unregister rx_handler early so bond_handle_frame wouldn't be called
         * for this slave anymore.
         */
@@ -3614,55 +3673,80 @@ static struct notifier_block bond_netdev_notifier = {
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
+/* Helper to access data in a packet, with or without a backing skb.
+ * If skb is given the data is linearized if necessary via pskb_may_pull.
+ */
+static inline const void *bond_pull_data(struct sk_buff *skb,
+                                        const void *data, int hlen, int n)
+{
+       if (likely(n <= hlen))
+               return data;
+       else if (skb && likely(pskb_may_pull(skb, n)))
+               return skb->head;
+
+       return NULL;
+}
+
 /* L2 hash helper */
-static inline u32 bond_eth_hash(struct sk_buff *skb)
+static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
 {
-       struct ethhdr *ep, hdr_tmp;
+       struct ethhdr *ep;
 
-       ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
-       if (ep)
-               return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
-       return 0;
+       data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
+       if (!data)
+               return 0;
+
+       ep = (struct ethhdr *)(data + mhoff);
+       return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
 }
 
-static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
-                        int *noff, int *proto, bool l34)
+static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
+                        int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
 {
        const struct ipv6hdr *iph6;
        const struct iphdr *iph;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
-               if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
+       if (l2_proto == htons(ETH_P_IP)) {
+               data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
+               if (!data)
                        return false;
-               iph = (const struct iphdr *)(skb->data + *noff);
+
+               iph = (const struct iphdr *)(data + *nhoff);
                iph_to_flow_copy_v4addrs(fk, iph);
-               *noff += iph->ihl << 2;
+               *nhoff += iph->ihl << 2;
                if (!ip_is_fragment(iph))
-                       *proto = iph->protocol;
-       } else if (skb->protocol == htons(ETH_P_IPV6)) {
-               if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
+                       *ip_proto = iph->protocol;
+       } else if (l2_proto == htons(ETH_P_IPV6)) {
+               data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
+               if (!data)
                        return false;
-               iph6 = (const struct ipv6hdr *)(skb->data + *noff);
+
+               iph6 = (const struct ipv6hdr *)(data + *nhoff);
                iph_to_flow_copy_v6addrs(fk, iph6);
-               *noff += sizeof(*iph6);
-               *proto = iph6->nexthdr;
+               *nhoff += sizeof(*iph6);
+               *ip_proto = iph6->nexthdr;
        } else {
                return false;
        }
 
-       if (l34 && *proto >= 0)
-               fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);
+       if (l34 && *ip_proto >= 0)
+               fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
 
        return true;
 }
 
-static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
+static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
 {
-       struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
+       struct ethhdr *mac_hdr;
        u32 srcmac_vendor = 0, srcmac_dev = 0;
        u16 vlan;
        int i;
 
+       data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
+       if (!data)
+               return 0;
+       mac_hdr = (struct ethhdr *)(data + mhoff);
+
        for (i = 0; i < 3; i++)
                srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
 
@@ -3678,26 +3762,25 @@ static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
 }
 
 /* Extract the appropriate headers based on bond's xmit policy */
-static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
-                             struct flow_keys *fk)
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
+                             __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
 {
        bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
-       int noff, proto = -1;
+       int ip_proto = -1;
 
        switch (bond->params.xmit_policy) {
        case BOND_XMIT_POLICY_ENCAP23:
        case BOND_XMIT_POLICY_ENCAP34:
                memset(fk, 0, sizeof(*fk));
                return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
-                                         fk, NULL, 0, 0, 0, 0);
+                                         fk, data, l2_proto, nhoff, hlen, 0);
        default:
                break;
        }
 
        fk->ports.ports = 0;
        memset(&fk->icmp, 0, sizeof(fk->icmp));
-       noff = skb_network_offset(skb);
-       if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
+       if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
                return false;
 
        /* ICMP error packets contains at least 8 bytes of the header
@@ -3705,22 +3788,20 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
         * to correlate ICMP error packets within the same flow which
         * generated the error.
         */
-       if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
-               skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
-                                     skb_transport_offset(skb),
-                                     skb_headlen(skb));
-               if (proto == IPPROTO_ICMP) {
+       if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
+               skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
+               if (ip_proto == IPPROTO_ICMP) {
                        if (!icmp_is_err(fk->icmp.type))
                                return true;
 
-                       noff += sizeof(struct icmphdr);
-               } else if (proto == IPPROTO_ICMPV6) {
+                       nhoff += sizeof(struct icmphdr);
+               } else if (ip_proto == IPPROTO_ICMPV6) {
                        if (!icmpv6_is_err(fk->icmp.type))
                                return true;
 
-                       noff += sizeof(struct icmp6hdr);
+                       nhoff += sizeof(struct icmp6hdr);
                }
-               return bond_flow_ip(skb, fk, &noff, &proto, l34);
+               return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
        }
 
        return true;
@@ -3736,33 +3817,26 @@ static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
        return hash >> 1;
 }
 
-/**
- * bond_xmit_hash - generate a hash value based on the xmit policy
- * @bond: bonding device
- * @skb: buffer to use for headers
- *
- * This function will extract the necessary headers from the skb buffer and use
- * them to generate a hash based on the xmit_policy set in the bonding device
+/* Generate hash based on xmit policy. If @skb is given it is used to linearize
+ * the data as required, but this function can be used without it if the data is
+ * known to be linear (e.g. with xdp_buff).
  */
-u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
+                           __be16 l2_proto, int mhoff, int nhoff, int hlen)
 {
        struct flow_keys flow;
        u32 hash;
 
-       if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
-           skb->l4_hash)
-               return skb->hash;
-
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
-               return bond_vlan_srcmac_hash(skb);
+               return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
-           !bond_flow_dissect(bond, skb, &flow))
-               return bond_eth_hash(skb);
+           !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
+               return bond_eth_hash(skb, data, mhoff, hlen);
 
        if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
            bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
-               hash = bond_eth_hash(skb);
+               hash = bond_eth_hash(skb, data, mhoff, hlen);
        } else {
                if (flow.icmp.id)
                        memcpy(&hash, &flow.icmp, sizeof(hash));
@@ -3773,6 +3847,45 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
        return bond_ip_hash(hash, &flow);
 }
 
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ */
+u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+{
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
+           skb->l4_hash)
+               return skb->hash;
+
+       return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
+                               skb->mac_header, skb->network_header,
+                               skb_headlen(skb));
+}
+
+/**
+ * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @xdp: buffer to use for headers
+ *
+ * The XDP variant of bond_xmit_hash.
+ */
+static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
+{
+       struct ethhdr *eth;
+
+       if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
+               return 0;
+
+       eth = (struct ethhdr *)xdp->data;
+
+       return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
+                               sizeof(struct ethhdr), xdp->data_end - xdp->data);
+}
+
 /*-------------------------- Device entry points ----------------------------*/
 
 void bond_work_init_all(struct bonding *bond)
@@ -4421,6 +4534,47 @@ non_igmp:
        return NULL;
 }
 
+static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
+                                                       struct xdp_buff *xdp)
+{
+       struct slave *slave;
+       int slave_cnt;
+       u32 slave_id;
+       const struct ethhdr *eth;
+       void *data = xdp->data;
+
+       if (data + sizeof(struct ethhdr) > xdp->data_end)
+               goto non_igmp;
+
+       eth = (struct ethhdr *)data;
+       data += sizeof(struct ethhdr);
+
+       /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
+       if (eth->h_proto == htons(ETH_P_IP)) {
+               const struct iphdr *iph;
+
+               if (data + sizeof(struct iphdr) > xdp->data_end)
+                       goto non_igmp;
+
+               iph = (struct iphdr *)data;
+
+               if (iph->protocol == IPPROTO_IGMP) {
+                       slave = rcu_dereference(bond->curr_active_slave);
+                       if (slave)
+                               return slave;
+                       return bond_get_slave_by_id(bond, 0);
+               }
+       }
+
+non_igmp:
+       slave_cnt = READ_ONCE(bond->slave_cnt);
+       if (likely(slave_cnt)) {
+               slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
+               return bond_get_slave_by_id(bond, slave_id);
+       }
+       return NULL;
+}
+
 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
                                        struct net_device *bond_dev)
 {
@@ -4434,8 +4588,7 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
        return bond_tx_drop(bond_dev, skb);
 }
 
-static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
-                                                     struct sk_buff *skb)
+static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
 {
        return rcu_dereference(bond->curr_active_slave);
 }
@@ -4449,7 +4602,7 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave;
 
-       slave = bond_xmit_activebackup_slave_get(bond, skb);
+       slave = bond_xmit_activebackup_slave_get(bond);
        if (slave)
                return bond_dev_queue_xmit(bond, skb, slave->dev);
 
@@ -4637,6 +4790,22 @@ static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
        return slave;
 }
 
+static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
+                                                    struct xdp_buff *xdp)
+{
+       struct bond_up_slave *slaves;
+       unsigned int count;
+       u32 hash;
+
+       hash = bond_xmit_hash_xdp(bond, xdp);
+       slaves = rcu_dereference(bond->usable_slaves);
+       count = slaves ? READ_ONCE(slaves->count) : 0;
+       if (unlikely(!count))
+               return NULL;
+
+       return slaves->arr[hash % count];
+}
+
 /* Use this Xmit function for 3AD as well as XOR modes. The current
  * usable slave array is formed in the control path. The xmit function
  * just calculates hash and sends the packet out.
@@ -4747,7 +4916,7 @@ static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
                slave = bond_xmit_roundrobin_slave_get(bond, skb);
                break;
        case BOND_MODE_ACTIVEBACKUP:
-               slave = bond_xmit_activebackup_slave_get(bond, skb);
+               slave = bond_xmit_activebackup_slave_get(bond);
                break;
        case BOND_MODE_8023AD:
        case BOND_MODE_XOR:
@@ -4921,6 +5090,174 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
+static struct net_device *
+bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave;
+
+       /* Caller needs to hold rcu_read_lock() */
+
+       switch (BOND_MODE(bond)) {
+       case BOND_MODE_ROUNDROBIN:
+               slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
+               break;
+
+       case BOND_MODE_ACTIVEBACKUP:
+               slave = bond_xmit_activebackup_slave_get(bond);
+               break;
+
+       case BOND_MODE_8023AD:
+       case BOND_MODE_XOR:
+               slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
+               break;
+
+       default:
+               /* Should never happen. Mode guarded by bond_xdp_check() */
+               netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
+               WARN_ON_ONCE(1);
+               return NULL;
+       }
+
+       if (slave)
+               return slave->dev;
+
+       return NULL;
+}
+
+static int bond_xdp_xmit(struct net_device *bond_dev,
+                        int n, struct xdp_frame **frames, u32 flags)
+{
+       int nxmit, err = -ENXIO;
+
+       rcu_read_lock();
+
+       for (nxmit = 0; nxmit < n; nxmit++) {
+               struct xdp_frame *frame = frames[nxmit];
+               struct xdp_frame *frames1[] = {frame};
+               struct net_device *slave_dev;
+               struct xdp_buff xdp;
+
+               xdp_convert_frame_to_buff(frame, &xdp);
+
+               slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
+               if (!slave_dev) {
+                       err = -ENXIO;
+                       break;
+               }
+
+               err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
+               if (err < 1)
+                       break;
+       }
+
+       rcu_read_unlock();
+
+       /* If error happened on the first frame then we can pass the error up, otherwise
+        * report the number of frames that were xmitted.
+        */
+       if (err < 0)
+               return (nxmit == 0 ? err : nxmit);
+
+       return nxmit;
+}
+
+static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+                       struct netlink_ext_ack *extack)
+{
+       struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
+       struct slave *slave, *rollback_slave;
+       struct bpf_prog *old_prog;
+       struct netdev_bpf xdp = {
+               .command = XDP_SETUP_PROG,
+               .flags   = 0,
+               .prog    = prog,
+               .extack  = extack,
+       };
+       int err;
+
+       ASSERT_RTNL();
+
+       if (!bond_xdp_check(bond))
+               return -EOPNOTSUPP;
+
+       old_prog = bond->xdp_prog;
+       bond->xdp_prog = prog;
+
+       bond_for_each_slave(bond, slave, iter) {
+               struct net_device *slave_dev = slave->dev;
+
+               if (!slave_dev->netdev_ops->ndo_bpf ||
+                   !slave_dev->netdev_ops->ndo_xdp_xmit) {
+                       NL_SET_ERR_MSG(extack, "Slave device does not support XDP");
+                       slave_err(dev, slave_dev, "Slave does not support XDP\n");
+                       err = -EOPNOTSUPP;
+                       goto err;
+               }
+
+               if (dev_xdp_prog_count(slave_dev) > 0) {
+                       NL_SET_ERR_MSG(extack,
+                                      "Slave has XDP program loaded, please unload before enslaving");
+                       slave_err(dev, slave_dev,
+                                 "Slave has XDP program loaded, please unload before enslaving\n");
+                       err = -EOPNOTSUPP;
+                       goto err;
+               }
+
+               err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+               if (err < 0) {
+                       /* ndo_bpf() sets extack error message */
+                       slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
+                       goto err;
+               }
+               if (prog)
+                       bpf_prog_inc(prog);
+       }
+
+       if (old_prog)
+               bpf_prog_put(old_prog);
+
+       if (prog)
+               static_branch_inc(&bpf_master_redirect_enabled_key);
+       else
+               static_branch_dec(&bpf_master_redirect_enabled_key);
+
+       return 0;
+
+err:
+       /* unwind the program changes */
+       bond->xdp_prog = old_prog;
+       xdp.prog = old_prog;
+       xdp.extack = NULL; /* do not overwrite original error */
+
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               struct net_device *slave_dev = rollback_slave->dev;
+               int err_unwind;
+
+               if (slave == rollback_slave)
+                       break;
+
+               err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
+               if (err_unwind < 0)
+                       slave_err(dev, slave_dev,
+                                 "Error %d when unwinding XDP program change\n", err_unwind);
+               else if (xdp.prog)
+                       bpf_prog_inc(xdp.prog);
+       }
+       return err;
+}
+
+static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return bond_xdp_set(dev, xdp->prog, xdp->extack);
+       default:
+               return -EINVAL;
+       }
+}
+
 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
 {
        if (speed == 0 || speed == SPEED_UNKNOWN)
@@ -5009,6 +5346,9 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_features_check     = passthru_features_check,
        .ndo_get_xmit_slave     = bond_xmit_get_slave,
        .ndo_sk_get_lower_dev   = bond_sk_get_lower_dev,
+       .ndo_bpf                = bond_xdp,
+       .ndo_xdp_xmit           = bond_xdp_xmit,
+       .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
 };
 
 static const struct device_type bond_type = {