int ret;
const struct macvlan_dev *vlan = netdev_priv(dev);
- ret = macvlan_queue_xmit(skb, dev);
+ if (vlan->fwd_priv) {
+ skb->dev = vlan->lowerdev;
+ ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv);
+ } else {
+ ret = macvlan_queue_xmit(skb, dev);
+ }
+
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct macvlan_pcpu_stats *pcpu_stats;
goto hash_add;
}
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+ vlan->fwd_priv =
+ lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
+
+ /* If we get a NULL pointer back, or if we get an error
+ * then we should just fall through to the non accelerated path
+ */
+ if (IS_ERR_OR_NULL(vlan->fwd_priv)) {
+ vlan->fwd_priv = NULL;
+ } else {
+ dev->features &= ~NETIF_F_LLTX;
+ return 0;
+ }
+ }
+
err = -EBUSY;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
del_unicast:
dev_uc_del(lowerdev, dev->dev_addr);
out:
+ if (vlan->fwd_priv) {
+ lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+ vlan->fwd_priv);
+ vlan->fwd_priv = NULL;
+ }
return err;
}
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
+ if (vlan->fwd_priv) {
+ lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
+ vlan->fwd_priv);
+ vlan->fwd_priv = NULL;
+ return 0;
+ }
+
dev_uc_unsync(lowerdev, dev);
dev_mc_unsync(lowerdev, dev);
if (err < 0)
goto destroy_port;
+ dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
goto destroy_port;
struct hlist_node hlist;
struct macvlan_port *port;
struct net_device *lowerdev;
+ void *fwd_priv;
struct macvlan_pcpu_stats __percpu *pcpu_stats;
DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ);
NETIF_F_HW_VLAN_STAG_TX_BIT, /* Transmit VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_RX_BIT, /* Receive VLAN STAG HW acceleration */
NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
+ NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
/*
* Add your fresh new feature above and remember to update
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
+#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
* Called by vxlan to notify the driver about a UDP port and socket
* address family that vxlan is not listening to anymore. The operation
* is protected by the vxlan_net->sock_lock.
+ *
+ * void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ * struct net_device *dev)
+ * Called by upper layer devices to accelerate switching or other
+ * station functionality into hardware. 'pdev is the lowerdev
+ * to use for the offload and 'dev' is the net device that will
+ * back the offload. Returns a pointer to the private structure
+ * the upper layer will maintain.
+ * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv)
+ * Called by upper layer device to delete the station created
+ * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing
+ * the station and priv is the structure returned by the add
+ * operation.
+ * netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff *skb,
+ * struct net_device *dev,
+ * void *priv);
+ * Callback to use for xmit over the accelerated station. This
+ * is used in place of ndo_start_xmit on accelerated net
+ * devices.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_del_vxlan_port)(struct net_device *dev,
sa_family_t sa_family,
__be16 port);
+
+ void* (*ndo_dfwd_add_station)(struct net_device *pdev,
+ struct net_device *dev);
+ void (*ndo_dfwd_del_station)(struct net_device *pdev,
+ void *priv);
+
+ netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
+ struct net_device *dev,
+ void *priv);
};
/*
/* Management operations */
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
+ const struct forwarding_accel_ops *fwd_ops;
/* Hardware header description */
const struct header_ops *header_ops;
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_port_id *ppid);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq);
+ struct netdev_queue *txq, void *accel_priv);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
extern int netdev_budget;
dev->gso_max_size = size;
}
+static inline bool netif_is_macvlan(struct net_device *dev)
+{
+ return dev->priv_flags & IFF_MACVLAN;
+}
+
static inline bool netif_is_bond_master(struct net_device *dev)
{
return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
#define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */
#define IFF_LIVE_ADDR_CHANGE 0x100000 /* device supports hardware address
* change when it's running */
+#define IFF_MACVLAN 0x200000 /* Macvlan device */
#define IF_GET_IFACE 0x0001 /* for querying only */
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq)
+ struct netdev_queue *txq, void *accel_priv)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
dev_queue_xmit_nit(skb, dev);
skb_len = skb->len;
- rc = ops->ndo_start_xmit(skb, dev);
+ if (accel_priv)
+ rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
+ else
+ rc = ops->ndo_start_xmit(skb, dev);
+
trace_net_dev_xmit(skb, rc, dev, skb_len);
- if (rc == NETDEV_TX_OK)
+ if (rc == NETDEV_TX_OK && txq)
txq_trans_update(txq);
return rc;
}
dev_queue_xmit_nit(nskb, dev);
skb_len = nskb->len;
- rc = ops->ndo_start_xmit(nskb, dev);
+ if (accel_priv)
+ rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
+ else
+ rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
out:
return rc;
}
+EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
- rc = dev_hard_start_xmit(skb, dev, txq);
+ rc = dev_hard_start_xmit(skb, dev, txq, NULL);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
[NETIF_F_LOOPBACK_BIT] = "loopback",
[NETIF_F_RXFCS_BIT] = "rx-fcs",
[NETIF_F_RXALL_BIT] = "rx-all",
+ [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
};
static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
- ret = dev_hard_start_xmit(skb, dev, txq);
+ ret = dev_hard_start_xmit(skb, dev, txq, NULL);
HARD_TX_UNLOCK(dev, txq);