u8 port_id;
u8 tx_chan;
u8 lport; /* associated offload logical port */
- u8 rx_offload; /* CSO, etc */
u8 nqsets; /* # of qsets */
u8 first_qset; /* index of first qset */
u8 rss_mode;
u16 *rss;
};
-/* port_info.rx_offload flags */
-enum {
- RX_CSO = 1 << 0,
-};
-
struct dentry;
struct work_struct;
return 0;
}
-static u32 get_rx_csum(struct net_device *dev)
-{
- struct port_info *p = netdev_priv(dev);
-
- return p->rx_offload & RX_CSO;
-}
-
-static int set_rx_csum(struct net_device *dev, u32 data)
-{
- struct port_info *p = netdev_priv(dev);
-
- if (data)
- p->rx_offload |= RX_CSO;
- else
- p->rx_offload &= ~RX_CSO;
- return 0;
-}
-
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
const struct port_info *pi = netdev_priv(dev);
return err;
}
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-static int set_tso(struct net_device *dev, u32 value)
-{
- if (value)
- dev->features |= TSO_FLAGS;
- else
- dev->features &= ~TSO_FLAGS;
- return 0;
-}
-
-static int set_flags(struct net_device *dev, u32 flags)
+static int cxgb_set_features(struct net_device *dev, u32 features)
{
+ const struct port_info *pi = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
int err;
- unsigned long old_feat = dev->features;
-
- err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
- ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
- if (err)
- return err;
- if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
- const struct port_info *pi = netdev_priv(dev);
+ if (!(changed & NETIF_F_HW_VLAN_RX))
+ return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
- -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
- true);
- if (err)
- dev->features = old_feat;
- }
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1,
+ !!(features & NETIF_F_HW_VLAN_RX), true);
+ if (unlikely(err))
+ dev->features = features ^ NETIF_F_HW_VLAN_RX;
return err;
}
.set_eeprom = set_eeprom,
.get_pauseparam = get_pauseparam,
.set_pauseparam = set_pauseparam,
- .get_rx_csum = get_rx_csum,
- .set_rx_csum = set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
.set_phys_id = identify_port,
.get_regs = get_regs,
.get_wol = get_wol,
.set_wol = set_wol,
- .set_tso = set_tso,
- .set_flags = set_flags,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_set_features = cxgb_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
t4_fw_bye(adapter, adapter->fn);
}
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
pi = netdev_priv(netdev);
pi->adapter = adapter;
pi->xact_addr_filt = -1;
- pi->rx_offload = RX_CSO;
pi->port_id = i;
netdev->irq = pdev->irq;
- netdev->features |= NETIF_F_SG | TSO_FLAGS;
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
- netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= netdev->hw_features | highdma;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->netdev_ops = &cxgb4_netdev_ops;
{
bool csum_ok;
struct sk_buff *skb;
- struct port_info *pi;
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
if (skb->dev->features & NETIF_F_RXHASH)
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
- pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
- if (csum_ok && (pi->rx_offload & RX_CSO) &&
+ if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
(pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
u8 port_id; /* physical port ID */
- u8 rx_offload; /* CSO, etc. */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
struct link_config link_cfg; /* physical port configuration */
};
-/* port_info.rx_offload flags */
-enum {
- RX_CSO = 1 << 0,
-};
-
/*
* Scatter Gather Engine resources for the "adapter". Our ingress and egress
* queues are organized into "Queue Sets" with one ingress and one egress
}
/*
- * Return whether RX Checksum Offloading is currently enabled for the device.
- */
-static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
-{
- struct port_info *pi = netdev_priv(dev);
-
- return (pi->rx_offload & RX_CSO) != 0;
-}
-
-/*
- * Turn RX Checksum Offloading on or off for the device.
- */
-static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum)
-{
- struct port_info *pi = netdev_priv(dev);
-
- if (csum)
- pi->rx_offload |= RX_CSO;
- else
- pi->rx_offload &= ~RX_CSO;
- return 0;
-}
-
-/*
* Identify the port by blinking the port's LED.
*/
static int cxgb4vf_phys_id(struct net_device *dev,
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-/*
- * Set TCP Segmentation Offloading feature capabilities.
- */
-static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
-{
- if (tso)
- dev->features |= TSO_FLAGS;
- else
- dev->features &= ~TSO_FLAGS;
- return 0;
-}
-
static struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_settings = cxgb4vf_get_settings,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_coalesce = cxgb4vf_get_coalesce,
.set_coalesce = cxgb4vf_set_coalesce,
.get_pauseparam = cxgb4vf_get_pauseparam,
- .get_rx_csum = cxgb4vf_get_rx_csum,
- .set_rx_csum = cxgb4vf_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = cxgb4vf_get_strings,
.set_phys_id = cxgb4vf_phys_id,
.get_regs_len = cxgb4vf_get_regs_len,
.get_regs = cxgb4vf_get_regs,
.get_wol = cxgb4vf_get_wol,
- .set_tso = cxgb4vf_set_tso,
};
/*
* it.
*/
pi->xact_addr_filt = -1;
- pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
- netdev->features = (NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_GRO);
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+ netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_RX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features =
- (netdev->features &
- ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
#ifdef HAVE_NET_DEVICE_OPS
netdev->netdev_ops = &cxgb4vf_netdev_ops;
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
- if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec &&
- (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
+ !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {