1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/if_vlan.h>
8 #include <linux/kernel.h>
9 #include <linux/netdevice.h>
10 #include <linux/u64_stats_sync.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/skbuff.h>
19 #include <linux/smp.h>
20 #include <asm/byteorder.h>
22 #include <linux/tcp.h>
23 #include <linux/sctp.h>
24 #include <linux/ipv6.h>
26 #include <net/checksum.h>
27 #include <net/ip6_checksum.h>
29 #include "hinic_common.h"
30 #include "hinic_hw_if.h"
31 #include "hinic_hw_wqe.h"
32 #include "hinic_hw_wq.h"
33 #include "hinic_hw_qp.h"
34 #include "hinic_hw_dev.h"
35 #include "hinic_dev.h"
38 #define TX_IRQ_NO_PENDING 0
39 #define TX_IRQ_NO_COALESC 0
40 #define TX_IRQ_NO_LLI_TIMER 0
41 #define TX_IRQ_NO_CREDIT 0
42 #define TX_IRQ_NO_RESEND_TIMER 0
44 #define CI_UPDATE_NO_PENDING 0
45 #define CI_UPDATE_NO_COALESC 0
47 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
49 #define MIN_SKB_LEN 32
51 #define MAX_PAYLOAD_OFFSET 221
52 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
66 enum hinic_offload_type {
67 TX_OFFLOAD_TSO = BIT(0),
68 TX_OFFLOAD_CSUM = BIT(1),
69 TX_OFFLOAD_VLAN = BIT(2),
70 TX_OFFLOAD_INVALID = BIT(3),
74 * hinic_txq_clean_stats - Clean the statistics of specific queue
75 * @txq: Logical Tx Queue
77 void hinic_txq_clean_stats(struct hinic_txq *txq)
79 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
81 u64_stats_update_begin(&txq_stats->syncp);
84 txq_stats->tx_busy = 0;
85 txq_stats->tx_wake = 0;
86 txq_stats->tx_dropped = 0;
87 txq_stats->big_frags_pkts = 0;
88 u64_stats_update_end(&txq_stats->syncp);
92 * hinic_txq_get_stats - get statistics of Tx Queue
93 * @txq: Logical Tx Queue
94 * @stats: return updated stats here
96 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
98 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
102 start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
103 stats->pkts = txq_stats->pkts;
104 stats->bytes = txq_stats->bytes;
105 stats->tx_busy = txq_stats->tx_busy;
106 stats->tx_wake = txq_stats->tx_wake;
107 stats->tx_dropped = txq_stats->tx_dropped;
108 stats->big_frags_pkts = txq_stats->big_frags_pkts;
109 } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
113 * txq_stats_init - Initialize the statistics of specific queue
114 * @txq: Logical Tx Queue
116 static void txq_stats_init(struct hinic_txq *txq)
118 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
120 u64_stats_init(&txq_stats->syncp);
121 hinic_txq_clean_stats(txq);
125 * tx_map_skb - dma mapping for skb and return sges
126 * @nic_dev: nic device
128 * @sges: returned sges
130 * Return 0 - Success, negative - Failure
132 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
133 struct hinic_sge *sges)
135 struct hinic_hwdev *hwdev = nic_dev->hwdev;
136 struct hinic_hwif *hwif = hwdev->hwif;
137 struct pci_dev *pdev = hwif->pdev;
142 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
144 if (dma_mapping_error(&pdev->dev, dma_addr)) {
145 dev_err(&pdev->dev, "Failed to map Tx skb data\n");
149 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
151 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
152 frag = &skb_shinfo(skb)->frags[i];
154 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
157 if (dma_mapping_error(&pdev->dev, dma_addr)) {
158 dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
162 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
168 for (j = 0; j < i; j++)
169 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
170 sges[j + 1].len, DMA_TO_DEVICE);
172 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
178 * tx_unmap_skb - unmap the dma address of the skb
179 * @nic_dev: nic device
181 * @sges: the sges that are connected to the skb
183 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
184 struct hinic_sge *sges)
186 struct hinic_hwdev *hwdev = nic_dev->hwdev;
187 struct hinic_hwif *hwif = hwdev->hwif;
188 struct pci_dev *pdev = hwif->pdev;
191 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
192 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
193 sges[i + 1].len, DMA_TO_DEVICE);
195 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
199 static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
201 enum hinic_offload_type offload_type,
202 enum hinic_l3_offload_type *l3_type,
207 if (ip->v4->version == 4) {
208 *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
209 IPV4_PKT_NO_CHKSUM_OFFLOAD :
210 IPV4_PKT_WITH_CHKSUM_OFFLOAD;
211 *l4_proto = ip->v4->protocol;
212 } else if (ip->v4->version == 6) {
214 exthdr = ip->hdr + sizeof(*ip->v6);
215 *l4_proto = ip->v6->nexthdr;
216 if (exthdr != l4->hdr) {
217 int start = exthdr - skb->data;
220 ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
223 *l3_type = L3TYPE_UNKNOWN;
228 static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
229 enum hinic_offload_type offload_type, u8 l4_proto,
230 enum hinic_l4_offload_type *l4_offload,
231 u32 *l4_len, u32 *offset)
233 *l4_offload = OFFLOAD_DISABLE;
239 *l4_offload = TCP_OFFLOAD_ENABLE;
240 /* doff in unit of 4B */
241 *l4_len = l4->tcp->doff * 4;
242 *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
246 *l4_offload = UDP_OFFLOAD_ENABLE;
247 *l4_len = sizeof(struct udphdr);
248 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
252 /* only csum offload support sctp */
253 if (offload_type != TX_OFFLOAD_CSUM)
256 *l4_offload = SCTP_OFFLOAD_ENABLE;
257 *l4_len = sizeof(struct sctphdr);
258 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
266 static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
268 return (ip->v4->version == 4) ?
269 csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
270 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
273 static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
276 u32 offset, l4_len, ip_identify, network_hdr_len;
277 enum hinic_l3_offload_type l3_offload;
278 enum hinic_l4_offload_type l4_offload;
283 if (!skb_is_gso(skb))
286 if (skb_cow_head(skb, 0) < 0)
287 return -EPROTONOSUPPORT;
289 if (skb->encapsulation) {
290 u32 gso_type = skb_shinfo(skb)->gso_type;
294 ip.hdr = skb_network_header(skb);
295 l4.hdr = skb_transport_header(skb);
296 network_hdr_len = skb_inner_network_header_len(skb);
298 if (ip.v4->version == 4) {
300 l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
301 } else if (ip.v4->version == 6) {
302 l3_offload = IPV6_PKT;
307 hinic_task_set_outter_l3(task, l3_offload,
308 skb_network_header_len(skb));
310 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
311 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
312 tunnel_type = TUNNEL_UDP_CSUM;
313 } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
314 tunnel_type = TUNNEL_UDP_NO_CSUM;
317 l4_tunnel_len = skb_inner_network_offset(skb) -
318 skb_transport_offset(skb);
319 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
321 ip.hdr = skb_inner_network_header(skb);
322 l4.hdr = skb_inner_transport_header(skb);
324 ip.hdr = skb_network_header(skb);
325 l4.hdr = skb_transport_header(skb);
326 network_hdr_len = skb_network_header_len(skb);
329 /* initialize inner IP header fields */
330 if (ip.v4->version == 4)
333 ip.v6->payload_len = 0;
335 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
338 hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
341 if (l4_proto == IPPROTO_TCP)
342 l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
344 get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
347 hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
348 ip_identify, skb_shinfo(skb)->gso_size);
353 static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
356 enum hinic_l4_offload_type l4_offload;
357 u32 offset, l4_len, network_hdr_len;
358 enum hinic_l3_offload_type l3_type;
359 u32 tunnel_type = NOT_TUNNEL;
364 if (skb->ip_summed != CHECKSUM_PARTIAL)
367 if (skb->encapsulation) {
370 tunnel_type = TUNNEL_UDP_NO_CSUM;
371 ip.hdr = skb_network_header(skb);
373 if (ip.v4->version == 4) {
374 l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
375 l4_proto = ip.v4->protocol;
376 } else if (ip.v4->version == 6) {
377 unsigned char *exthdr;
381 tunnel_type = TUNNEL_UDP_CSUM;
382 exthdr = ip.hdr + sizeof(*ip.v6);
383 l4_proto = ip.v6->nexthdr;
384 l4.hdr = skb_transport_header(skb);
385 if (l4.hdr != exthdr)
386 ipv6_skip_exthdr(skb, exthdr - skb->data,
387 &l4_proto, &frag_off);
389 l3_type = L3TYPE_UNKNOWN;
390 l4_proto = IPPROTO_RAW;
393 hinic_task_set_outter_l3(task, l3_type,
394 skb_network_header_len(skb));
398 l4_tunnel_len = skb_inner_network_offset(skb) -
399 skb_transport_offset(skb);
400 ip.hdr = skb_inner_network_header(skb);
401 l4.hdr = skb_inner_transport_header(skb);
402 network_hdr_len = skb_inner_network_header_len(skb);
406 tunnel_type = NOT_TUNNEL;
409 ip.hdr = skb_inner_network_header(skb);
410 l4.hdr = skb_transport_header(skb);
411 network_hdr_len = skb_network_header_len(skb);
414 /* Unsupported tunnel packet, disable csum offload */
415 skb_checksum_help(skb);
419 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
421 ip.hdr = skb_network_header(skb);
422 l4.hdr = skb_transport_header(skb);
423 network_hdr_len = skb_network_header_len(skb);
426 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
429 hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
431 get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
434 hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
439 static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
440 u16 vlan_tag, u16 vlan_pri)
442 task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
443 HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
445 *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
448 static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
451 enum hinic_offload_type offload = 0;
455 enabled = offload_tso(task, queue_info, skb);
457 offload |= TX_OFFLOAD_TSO;
458 } else if (enabled == 0) {
459 enabled = offload_csum(task, queue_info, skb);
461 offload |= TX_OFFLOAD_CSUM;
463 return -EPROTONOSUPPORT;
466 if (unlikely(skb_vlan_tag_present(skb))) {
467 vlan_tag = skb_vlan_tag_get(skb);
468 offload_vlan(task, queue_info, vlan_tag,
469 vlan_tag >> VLAN_PRIO_SHIFT);
470 offload |= TX_OFFLOAD_VLAN;
474 hinic_task_set_l2hdr(task, skb_network_offset(skb));
476 /* payload offset should not more than 221 */
477 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
478 MAX_PAYLOAD_OFFSET) {
479 return -EPROTONOSUPPORT;
482 /* mss should not less than 80 */
483 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
484 *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
485 *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
491 netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
493 struct hinic_dev *nic_dev = netdev_priv(netdev);
494 u16 prod_idx, q_id = skb->queue_mapping;
495 struct netdev_queue *netdev_txq;
496 int nr_sges, err = NETDEV_TX_OK;
497 struct hinic_sq_wqe *sq_wqe;
498 unsigned int wqe_size;
499 struct hinic_txq *txq;
502 txq = &nic_dev->txqs[q_id];
503 qp = container_of(txq->sq, struct hinic_qp, sq);
504 nr_sges = skb_shinfo(skb)->nr_frags + 1;
506 err = tx_map_skb(nic_dev, skb, txq->sges);
510 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
512 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
514 netif_stop_subqueue(netdev, qp->q_id);
516 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
518 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
522 tx_unmap_skb(nic_dev, skb, txq->sges);
524 u64_stats_update_begin(&txq->txq_stats.syncp);
525 txq->txq_stats.tx_busy++;
526 u64_stats_update_end(&txq->txq_stats.syncp);
527 err = NETDEV_TX_BUSY;
533 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
534 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
537 netdev_txq = netdev_get_tx_queue(netdev, q_id);
538 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
539 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
544 dev_kfree_skb_any(skb);
545 u64_stats_update_begin(&txq->txq_stats.syncp);
546 txq->txq_stats.tx_dropped++;
547 u64_stats_update_end(&txq->txq_stats.syncp);
552 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
554 struct hinic_dev *nic_dev = netdev_priv(netdev);
555 u16 prod_idx, q_id = skb->queue_mapping;
556 struct netdev_queue *netdev_txq;
557 int nr_sges, err = NETDEV_TX_OK;
558 struct hinic_sq_wqe *sq_wqe;
559 unsigned int wqe_size;
560 struct hinic_txq *txq;
563 txq = &nic_dev->txqs[q_id];
564 qp = container_of(txq->sq, struct hinic_qp, sq);
566 if (skb->len < MIN_SKB_LEN) {
567 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
568 netdev_err(netdev, "Failed to pad skb\n");
569 goto update_error_stats;
572 skb->len = MIN_SKB_LEN;
575 nr_sges = skb_shinfo(skb)->nr_frags + 1;
577 u64_stats_update_begin(&txq->txq_stats.syncp);
578 txq->txq_stats.big_frags_pkts++;
579 u64_stats_update_end(&txq->txq_stats.syncp);
582 if (nr_sges > txq->max_sges) {
583 netdev_err(netdev, "Too many Tx sges\n");
587 err = tx_map_skb(nic_dev, skb, txq->sges);
591 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
593 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
595 netif_stop_subqueue(netdev, qp->q_id);
597 /* Check for the case free_tx_poll is called in another cpu
598 * and we stopped the subqueue after free_tx_poll check.
600 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
602 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
606 tx_unmap_skb(nic_dev, skb, txq->sges);
608 u64_stats_update_begin(&txq->txq_stats.syncp);
609 txq->txq_stats.tx_busy++;
610 u64_stats_update_end(&txq->txq_stats.syncp);
611 err = NETDEV_TX_BUSY;
617 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
619 err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
623 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
626 netdev_txq = netdev_get_tx_queue(netdev, q_id);
627 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
628 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
633 hinic_sq_return_wqe(txq->sq, wqe_size);
634 tx_unmap_skb(nic_dev, skb, txq->sges);
637 dev_kfree_skb_any(skb);
640 u64_stats_update_begin(&txq->txq_stats.syncp);
641 txq->txq_stats.tx_dropped++;
642 u64_stats_update_end(&txq->txq_stats.syncp);
648 * tx_free_skb - unmap and free skb
649 * @nic_dev: nic device
651 * @sges: the sges that are connected to the skb
653 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
654 struct hinic_sge *sges)
656 tx_unmap_skb(nic_dev, skb, sges);
658 dev_kfree_skb_any(skb);
662 * free_all_tx_skbs - free all skbs in tx queue
665 static void free_all_tx_skbs(struct hinic_txq *txq)
667 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
668 struct hinic_sq *sq = txq->sq;
669 struct hinic_sq_wqe *sq_wqe;
670 unsigned int wqe_size;
675 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
676 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
680 nr_sges = skb_shinfo(skb)->nr_frags + 1;
682 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
684 hinic_sq_put_wqe(sq, wqe_size);
686 tx_free_skb(nic_dev, skb, txq->free_sges);
691 * free_tx_poll - free finished tx skbs in tx queue that connected to napi
693 * @budget: number of tx
695 * Return 0 - Success, negative - Failure
697 static int free_tx_poll(struct napi_struct *napi, int budget)
699 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
700 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
701 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
702 struct netdev_queue *netdev_txq;
703 struct hinic_sq *sq = txq->sq;
704 struct hinic_wq *wq = sq->wq;
705 struct hinic_sq_wqe *sq_wqe;
706 unsigned int wqe_size;
707 int nr_sges, pkts = 0;
713 hw_ci = HW_CONS_IDX(sq) & wq->mask;
717 /* Reading a WQEBB to get real WQE size and consumer index. */
718 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
720 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
723 /* If this WQE have multiple WQEBBs, we will read again to get
726 if (wqe_size > wq->wqebb_size) {
727 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
728 if (unlikely(!sq_wqe))
732 tx_bytes += skb->len;
735 nr_sges = skb_shinfo(skb)->nr_frags + 1;
737 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
739 hinic_sq_put_wqe(sq, wqe_size);
741 tx_free_skb(nic_dev, skb, txq->free_sges);
742 } while (pkts < budget);
744 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
745 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
746 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
748 __netif_tx_lock(netdev_txq, smp_processor_id());
749 if (!netif_testing(nic_dev->netdev))
750 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
752 __netif_tx_unlock(netdev_txq);
754 u64_stats_update_begin(&txq->txq_stats.syncp);
755 txq->txq_stats.tx_wake++;
756 u64_stats_update_end(&txq->txq_stats.syncp);
759 u64_stats_update_begin(&txq->txq_stats.syncp);
760 txq->txq_stats.bytes += tx_bytes;
761 txq->txq_stats.pkts += pkts;
762 u64_stats_update_end(&txq->txq_stats.syncp);
766 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
767 hinic_hwdev_set_msix_state(nic_dev->hwdev,
777 static irqreturn_t tx_irq(int irq, void *data)
779 struct hinic_txq *txq = data;
780 struct hinic_dev *nic_dev;
782 nic_dev = netdev_priv(txq->netdev);
784 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
785 /* Disable the interrupt until napi will be completed */
786 hinic_hwdev_set_msix_state(nic_dev->hwdev,
790 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
792 napi_schedule(&txq->napi);
796 static int tx_request_irq(struct hinic_txq *txq)
798 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
799 struct hinic_msix_config interrupt_info = {0};
800 struct hinic_intr_coal_info *intr_coal = NULL;
801 struct hinic_hwdev *hwdev = nic_dev->hwdev;
802 struct hinic_hwif *hwif = hwdev->hwif;
803 struct pci_dev *pdev = hwif->pdev;
804 struct hinic_sq *sq = txq->sq;
808 qp = container_of(sq, struct hinic_qp, sq);
810 netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
813 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
814 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
815 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
816 TX_IRQ_NO_RESEND_TIMER);
818 intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
819 interrupt_info.msix_index = sq->msix_entry;
820 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
821 interrupt_info.pending_cnt = intr_coal->pending_limt;
822 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
824 err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
826 netif_err(nic_dev, drv, txq->netdev,
827 "Failed to set TX interrupt coalescing attribute\n");
828 netif_napi_del(&txq->napi);
832 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
834 dev_err(&pdev->dev, "Failed to request Tx irq\n");
835 netif_napi_del(&txq->napi);
842 static void tx_free_irq(struct hinic_txq *txq)
844 struct hinic_sq *sq = txq->sq;
846 free_irq(sq->irq, txq);
847 netif_napi_del(&txq->napi);
851 * hinic_init_txq - Initialize the Tx Queue
852 * @txq: Logical Tx Queue
853 * @sq: Hardware Tx Queue to connect the Logical queue with
854 * @netdev: network device to connect the Logical queue with
856 * Return 0 - Success, negative - Failure
858 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
859 struct net_device *netdev)
861 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
862 struct hinic_dev *nic_dev = netdev_priv(netdev);
863 struct hinic_hwdev *hwdev = nic_dev->hwdev;
864 int err, irqname_len;
866 txq->netdev = netdev;
871 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
873 txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
874 sizeof(*txq->sges), GFP_KERNEL);
878 txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
879 sizeof(*txq->free_sges), GFP_KERNEL);
880 if (!txq->free_sges) {
882 goto err_alloc_free_sges;
885 irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
886 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
887 if (!txq->irq_name) {
889 goto err_alloc_irqname;
892 sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
894 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
895 CI_UPDATE_NO_COALESC);
899 err = tx_request_irq(txq);
901 netdev_err(netdev, "Failed to request Tx irq\n");
909 devm_kfree(&netdev->dev, txq->irq_name);
912 devm_kfree(&netdev->dev, txq->free_sges);
915 devm_kfree(&netdev->dev, txq->sges);
920 * hinic_clean_txq - Clean the Tx Queue
921 * @txq: Logical Tx Queue
923 void hinic_clean_txq(struct hinic_txq *txq)
925 struct net_device *netdev = txq->netdev;
929 free_all_tx_skbs(txq);
931 devm_kfree(&netdev->dev, txq->irq_name);
932 devm_kfree(&netdev->dev, txq->free_sges);
933 devm_kfree(&netdev->dev, txq->sges);