static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
- napi_schedule(&tqp_vector->napi);
+ napi_schedule_irqoff(&tqp_vector->napi);
return IRQ_HANDLED;
}
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
-#define IANA_VXLAN_PORT 4789
union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
- if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ if (!(!skb->encapsulation &&
+ l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
return false;
skb_checksum_help(skb);
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
+ struct hnae3_handle *handle = tx_ring->tqp->handle;
+
+ /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
+ * header is allowed in skb, otherwise it will cause RAS error.
+ */
+ if (unlikely(skb_vlan_tagged_multi(skb) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_ENABLE))
+ return -EINVAL;
+
if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features &
NETIF_F_HW_VLAN_CTAG_TX)) {
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE){
+ hns3_set_field(*out_vlan_flag,
+ HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hns3_set_field(*inner_vlan_flag,
+ HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
} else {
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
desc_cb->length = size;
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ ring_ptr_move_fw(ring, next_to_use);
+ return 0;
+ }
+
frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
return 0;
}
-static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+static int hns3_nic_bd_num(struct sk_buff *skb)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
- int bdnum_for_frag;
- int frag_num;
- int buf_num;
- int size;
- int i;
+ int size = skb_headlen(skb);
+ int i, bd_num;
- size = skb_headlen(skb);
- buf_num = hns3_tx_bd_count(size);
+ /* if the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE))
+ return skb_shinfo(skb)->nr_frags + 1;
+
+ bd_num = hns3_tx_bd_count(size);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int frag_bd_num;
- frag_num = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frag_num; i++) {
- frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag = hns3_tx_bd_count(size);
- if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
+ frag_bd_num = hns3_tx_bd_count(size);
+
+ if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
- buf_num += bdnum_for_frag;
+ bd_num += frag_bd_num;
}
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
- return -EBUSY;
- /* manual split the send packet */
- new_skb = skb_copy(skb, GFP_ATOMIC);
- if (!new_skb)
- return -ENOMEM;
- dev_kfree_skb_any(skb);
- *out_skb = new_skb;
- }
+ return bd_num;
+}
- if (unlikely(ring_space(ring) < buf_num))
- return -EBUSY;
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
- *bnum = buf_num;
- return 0;
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
-static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+/* HW need every continuous 8 buffer data to be larger than MSS,
+ * we simplify it by ensuring skb_headlen + the first continuous
+ * 7 frags to to be larger than gso header len + mss, and the remaining
+ * continuous 7 frags to be larger than MSS except the last 7 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb)
+{
+ int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ unsigned int tot_len = 0;
+ int i;
+
+ for (i = 0; i < bd_limit; i++)
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ /* ensure headlen + the first 7 frags is greater than mss + header
+ * and the first 7 frags is greater than mss.
+ */
+ if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
+ hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ return true;
+
+ /* ensure the remaining continuous 7 buffer is greater than mss */
+ for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
+ tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+
+ if (tot_len < skb_shinfo(skb)->gso_size)
+ return true;
+ }
+
+ return false;
+}
+
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- int buf_num;
+ int bd_num;
- /* No. of segments (plus a header) */
- buf_num = skb_shinfo(skb)->nr_frags + 1;
+ bd_num = hns3_nic_bd_num(skb);
+ if (bd_num < 0)
+ return bd_num;
+
+ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ struct sk_buff *new_skb;
+
+ if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ goto out;
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
+ bd_num = hns3_tx_bd_count(skb->len);
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_copy++;
+ u64_stats_update_end(&ring->syncp);
}
- if (unlikely(ring_space(ring) < buf_num))
+out:
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
- *bnum = buf_num;
-
- return 0;
+ return bd_num;
}
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* Prefetch the data used later */
prefetch(skb->data);
- switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
- case -EBUSY:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(buf_num <= 0)) {
+ if (buf_num == -EBUSY) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+ goto out_net_tx_busy;
+ } else if (buf_num == -ENOMEM) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ }
- goto out_net_tx_busy;
- case -ENOMEM:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- netdev_err(netdev, "no memory to xmit!\n");
+ if (net_ratelimit())
+ netdev_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
- default:
- break;
}
/* No. of segments (plus a header) */
bool enable;
int ret;
- if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
-
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, enable);
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring = NULL;
+ struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
+ int fbd_num, fbd_oft;
+ int ebd_num, ebd_oft;
+ int bd_num, bd_err;
+ int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
- for (i = 0; i < ndev->real_num_tx_queues; i++) {
+ for (i = 0; i < ndev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start;
return false;
}
+ priv->tx_timeout_count++;
+
tx_ring = priv->ring_data[timeout_queue].ring;
+ napi = &tx_ring->tqp_vector->napi;
+
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
+ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
+ tx_ring->next_to_clean, napi->state);
+
+ netdev_info(ndev,
+ "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
+ tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+
+ netdev_info(ndev,
+ "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ /* When mac received many pause frames continuous, it's unable to send
+ * packets, which may cause tx timeout
+ */
+ if (h->ae_algo->ops->update_stats &&
+ h->ae_algo->ops->get_mac_pause_stats) {
+ u64 tx_pause_cnt, rx_pause_cnt;
+
+ h->ae_algo->ops->update_stats(h, &ndev->stats);
+ h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
+ &rx_pause_cnt);
+ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
+ tx_pause_cnt, rx_pause_cnt);
+ }
hw_head = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
+ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBDNUM_REG);
+ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBD_OFFSET_REG);
+ bd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ bd_err = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_ERR_REG);
+ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
+ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
+
netdev_info(ndev,
- "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
- priv->tx_timeout_count,
- timeout_queue,
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- hw_head,
- hw_tail,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ bd_num, hw_head, hw_tail, bd_err,
readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
return true;
}
if (!hns3_get_tx_timeo_queue_info(ndev))
return;
- priv->tx_timeout_count++;
-
/* request the reset, and let the hclge to determine
* which reset level should be done
*/
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
-static bool hns3_is_phys_func(struct pci_dev *pdev)
+bool hns3_is_phys_func(struct pci_dev *pdev)
{
u32 dev_id = pdev->device;
static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
int *pkts)
{
- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
+ desc_cb = &ring->desc_cb[ntc];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ hns3_free_buffer_detach(ring, ntc);
- ring_ptr_move_fw(ring, next_to_clean);
+ if (++ntc == ring->desc_num)
+ ntc = 0;
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+ */
+ smp_store_release(&ring->next_to_clean, ntc);
}
static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_reuse_pg++;
+ u64_stats_update_end(&ring->syncp);
}
ring_ptr_move_fw(ring, next_to_use);
}
}
+static int hns3_gro_complete(struct sk_buff *skb)
+{
+ __be16 type = skb->protocol;
+ struct tcphdr *th;
+ int depth = 0;
+
+ while (type == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vh;
+
+ if ((depth + VLAN_HLEN) > skb_headlen(skb))
+ return -EFAULT;
+
+ vh = (struct vlan_hdr *)(skb->data + depth);
+ type = vh->h_vlan_encapsulated_proto;
+ depth += VLAN_HLEN;
+ }
+
+ if (type == htons(ETH_P_IP)) {
+ depth += sizeof(struct iphdr);
+ } else if (type == htons(ETH_P_IPV6)) {
+ depth += sizeof(struct ipv6hdr);
+ } else {
+ netdev_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
+ return -EFAULT;
+ }
+
+ th = (struct tcphdr *)(skb->data + depth);
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return 0;
+}
+
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
- struct hns3_desc *desc)
+ u32 l234info, u32 bd_base_info)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int l3_type, l4_type;
- u32 bd_base_info;
int ol4_type;
- u32 l234info;
-
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- l234info = le32_to_cpu(desc->rx.l234_info);
skb->ip_summed = CHECKSUM_NONE;
if (!(netdev->features & NETIF_F_RXCSUM))
return;
- /* We MUST enable hardware checksum before enabling hardware GRO */
- if (skb_shinfo(skb)->gso_size) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
-
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
struct hns3_desc *desc, u32 l234info,
u16 *vlan_tag)
{
+ struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
if (pdev->revision == 0x20) {
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
+#define HNS3_STRP_BOTH 0x3
+ /* Hardware always insert VLAN tag into RX descriptor when
+ * remove the tag from packet, driver needs to determine
+ * reporting which tag to stack.
+ */
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
return true;
case HNS3_STRP_INNER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return true;
+ case HNS3_STRP_BOTH:
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ else
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return true;
default:
return false;
}
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
return 0;
}
-static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
- u32 bd_base_info)
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
{
u16 gro_count;
u32 l3_type;
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
- if (!gro_count)
- return;
+ if (!gro_count) {
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info);
+ return 0;
+ }
- /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
- * to skb_shinfo(skb)->gso_segs
- */
NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else
- return;
+ return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
- if (skb_shinfo(skb)->gso_size)
- tcp_gro_complete(skb);
+
+ return hns3_gro_complete(skb);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 rss_hash)
{
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
- struct hns3_desc *desc;
- int last_bd;
-
- /* When driver handle the rss type, ring->next_to_clean indicates the
- * first descriptor of next packet, need -1 here.
- */
- last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
- desc = &ring->desc[last_bd];
- if (le32_to_cpu(desc->rx.rss_hash))
+ if (rss_hash)
rss_type = handle->kinfo.rss_type;
else
rss_type = PKT_HASH_TYPE_NONE;
- skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
+ skb_set_hash(skb, rss_hash, rss_type);
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
enum hns3_pkt_l2t_type l2_frame_type;
+ u32 bd_base_info, l234info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+
+ /* bdinfo handled below is only valid on the last BD of the
+ * current packet, and ring->next_to_clean indicates the first
+ * descriptor of next packet, so need - 1 below.
+ */
+ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
+ (ring->desc_num - 1);
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_vld_descs++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EINVAL;
+ }
+
+ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
+ u64_stats_update_begin(&ring->syncp);
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EFAULT;
+ }
+
+ len = skb->len;
+
+ /* Do update ip stack process */
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += len;
+
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
+
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
+{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
u32 bd_base_info;
- u32 l234info;
int length;
int ret;
ALIGN(ring->pull_len, sizeof(long)));
}
- l234info = le32_to_cpu(desc->rx.l234_info);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
-
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
- }
-
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_vld_descs++;
- u64_stats_update_end(&ring->syncp);
-
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (unlikely((!desc->rx.pkt_len) ||
- (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
- BIT(HNS3_RXD_L2E_B))))) {
- u64_stats_update_begin(&ring->syncp);
- if (l234info & BIT(HNS3_RXD_L2E_B))
- ring->stats.l2_err++;
- else
- ring->stats.err_pkt_len++;
- u64_stats_update_end(&ring->syncp);
-
+ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
dev_kfree_skb_any(skb);
- return -EFAULT;
+ return ret;
}
-
- l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
- HNS3_RXD_DMAC_S);
- u64_stats_update_begin(&ring->syncp);
- if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
- ring->stats.rx_multicast++;
-
- ring->stats.rx_pkts++;
- ring->stats.rx_bytes += skb->len;
- u64_stats_update_end(&ring->syncp);
-
- ring->tqp_vector->rx_group.total_bytes += skb->len;
-
- /* This is needed in order to enable forwarding support */
- hns3_set_gro_param(skb, l234info, bd_base_info);
-
- hns3_rx_checksum(ring, skb, desc);
*out_skb = skb;
- hns3_set_rx_skb_rss_type(ring, skb);
return 0;
}
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
int num;
recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
+ unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
continue;
}
- /* Do update ip stack process */
- skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
struct hns3_enet_tqp_vector *tqp_vector =
container_of(napi, struct hns3_enet_tqp_vector, napi);
bool clean_complete = true;
- int rx_budget;
+ int rx_budget = budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
hns3_clean_tx_ring(ring);
/* make sure rx ring budget not smaller than 1 */
- rx_budget = max(budget / tqp_vector->num_tqps, 1);
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
hns3_for_each_ring(ring, tqp_vector->rx_group) {
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
}
devm_kfree(&pdev->dev, priv->ring_data);
+ priv->ring_data = NULL;
return ret;
}
struct hnae3_handle *h = priv->ae_handle;
int i;
+ if (!priv->ring_data)
+ return;
+
for (i = 0; i < h->kinfo.num_tqps; i++) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
+ priv->ring_data = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
-static void hns3_nic_set_priv_ops(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
-}
-
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
handle->ae_algo->ops->client_stop(handle);
}
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+
+ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
priv->tx_timeout_count = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
netdev->netdev_ops = &hns3_nic_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev);
hns3_ethtool_set_ops(netdev);
- hns3_nic_set_priv_ops(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
return ret;
out_client_start:
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_client_stop(handle);
-
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_client_stop(handle);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
hns3_dbg_uninit(handle);
- priv->ring_data = NULL;
-
out_netdev_free:
free_netdev(netdev);
}
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
} else {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- netdev_info(netdev, "link down\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
}
}
struct netdev_hw_addr *ha, *tmp;
int ret = 0;
+ netif_addr_lock_bh(ndev);
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_uc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
/* go through and sync mc_addr entries to the device */
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_mc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
+out:
+ netif_addr_unlock_bh(ndev);
return ret;
}
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+ netif_addr_lock_bh(netdev);
/* go through and unsync uc_addr entries to the device */
list = &netdev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
list_for_each_entry_safe(ha, tmp, &list->list, list)
if (ha->refcount > 1)
hns3_nic_mc_unsync(netdev, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
ring_ptr_move_fw(ring, next_to_use);
}
+ /* Free the pending skb in rx ring */
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
return 0;
}
if (ret)
goto err_uninit_vector;
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto err_uninit_ring;
+ }
+
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
+err_uninit_ring:
+ hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
return ret;
}
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
}
netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
-
- clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}