1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/if_vlan.h>
8 #include <linux/kernel.h>
9 #include <linux/netdevice.h>
10 #include <linux/u64_stats_sync.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/pci.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/skbuff.h>
19 #include <linux/smp.h>
20 #include <asm/byteorder.h>
22 #include <linux/tcp.h>
23 #include <linux/sctp.h>
24 #include <linux/ipv6.h>
26 #include <net/checksum.h>
27 #include <net/ip6_checksum.h>
29 #include "hinic_common.h"
30 #include "hinic_hw_if.h"
31 #include "hinic_hw_wqe.h"
32 #include "hinic_hw_wq.h"
33 #include "hinic_hw_qp.h"
34 #include "hinic_hw_dev.h"
35 #include "hinic_dev.h"
38 #define TX_IRQ_NO_PENDING 0
39 #define TX_IRQ_NO_COALESC 0
40 #define TX_IRQ_NO_LLI_TIMER 0
41 #define TX_IRQ_NO_CREDIT 0
42 #define TX_IRQ_NO_RESEND_TIMER 0
44 #define CI_UPDATE_NO_PENDING 0
45 #define CI_UPDATE_NO_COALESC 0
47 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
49 #define MIN_SKB_LEN 32
51 #define MAX_PAYLOAD_OFFSET 221
52 #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data))
66 enum hinic_offload_type {
67 TX_OFFLOAD_TSO = BIT(0),
68 TX_OFFLOAD_CSUM = BIT(1),
69 TX_OFFLOAD_VLAN = BIT(2),
70 TX_OFFLOAD_INVALID = BIT(3),
74 * hinic_txq_clean_stats - Clean the statistics of specific queue
75 * @txq: Logical Tx Queue
77 void hinic_txq_clean_stats(struct hinic_txq *txq)
79 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
81 u64_stats_update_begin(&txq_stats->syncp);
84 txq_stats->tx_busy = 0;
85 txq_stats->tx_wake = 0;
86 txq_stats->tx_dropped = 0;
87 txq_stats->big_frags_pkts = 0;
88 u64_stats_update_end(&txq_stats->syncp);
92 * hinic_txq_get_stats - get statistics of Tx Queue
93 * @txq: Logical Tx Queue
94 * @stats: return updated stats here
96 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
98 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
101 u64_stats_update_begin(&stats->syncp);
103 start = u64_stats_fetch_begin(&txq_stats->syncp);
104 stats->pkts = txq_stats->pkts;
105 stats->bytes = txq_stats->bytes;
106 stats->tx_busy = txq_stats->tx_busy;
107 stats->tx_wake = txq_stats->tx_wake;
108 stats->tx_dropped = txq_stats->tx_dropped;
109 stats->big_frags_pkts = txq_stats->big_frags_pkts;
110 } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
111 u64_stats_update_end(&stats->syncp);
115 * txq_stats_init - Initialize the statistics of specific queue
116 * @txq: Logical Tx Queue
118 static void txq_stats_init(struct hinic_txq *txq)
120 struct hinic_txq_stats *txq_stats = &txq->txq_stats;
122 u64_stats_init(&txq_stats->syncp);
123 hinic_txq_clean_stats(txq);
127 * tx_map_skb - dma mapping for skb and return sges
128 * @nic_dev: nic device
130 * @sges: returned sges
132 * Return 0 - Success, negative - Failure
134 static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
135 struct hinic_sge *sges)
137 struct hinic_hwdev *hwdev = nic_dev->hwdev;
138 struct hinic_hwif *hwif = hwdev->hwif;
139 struct pci_dev *pdev = hwif->pdev;
144 dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
146 if (dma_mapping_error(&pdev->dev, dma_addr)) {
147 dev_err(&pdev->dev, "Failed to map Tx skb data\n");
151 hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
153 for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
154 frag = &skb_shinfo(skb)->frags[i];
156 dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
159 if (dma_mapping_error(&pdev->dev, dma_addr)) {
160 dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
164 hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
170 for (j = 0; j < i; j++)
171 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
172 sges[j + 1].len, DMA_TO_DEVICE);
174 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
180 * tx_unmap_skb - unmap the dma address of the skb
181 * @nic_dev: nic device
183 * @sges: the sges that are connected to the skb
185 static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
186 struct hinic_sge *sges)
188 struct hinic_hwdev *hwdev = nic_dev->hwdev;
189 struct hinic_hwif *hwif = hwdev->hwif;
190 struct pci_dev *pdev = hwif->pdev;
193 for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
194 dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
195 sges[i + 1].len, DMA_TO_DEVICE);
197 dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
201 static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
203 enum hinic_offload_type offload_type,
204 enum hinic_l3_offload_type *l3_type,
209 if (ip->v4->version == 4) {
210 *l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
211 IPV4_PKT_NO_CHKSUM_OFFLOAD :
212 IPV4_PKT_WITH_CHKSUM_OFFLOAD;
213 *l4_proto = ip->v4->protocol;
214 } else if (ip->v4->version == 6) {
216 exthdr = ip->hdr + sizeof(*ip->v6);
217 *l4_proto = ip->v6->nexthdr;
218 if (exthdr != l4->hdr) {
219 int start = exthdr - skb->data;
222 ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
225 *l3_type = L3TYPE_UNKNOWN;
230 static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
231 enum hinic_offload_type offload_type, u8 l4_proto,
232 enum hinic_l4_offload_type *l4_offload,
233 u32 *l4_len, u32 *offset)
235 *l4_offload = OFFLOAD_DISABLE;
241 *l4_offload = TCP_OFFLOAD_ENABLE;
242 /* doff in unit of 4B */
243 *l4_len = l4->tcp->doff * 4;
244 *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
248 *l4_offload = UDP_OFFLOAD_ENABLE;
249 *l4_len = sizeof(struct udphdr);
250 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
254 /* only csum offload support sctp */
255 if (offload_type != TX_OFFLOAD_CSUM)
258 *l4_offload = SCTP_OFFLOAD_ENABLE;
259 *l4_len = sizeof(struct sctphdr);
260 *offset = TRANSPORT_OFFSET(l4->hdr, skb);
268 static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
270 return (ip->v4->version == 4) ?
271 csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
272 csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
275 static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
278 u32 offset, l4_len, ip_identify, network_hdr_len;
279 enum hinic_l3_offload_type l3_offload;
280 enum hinic_l4_offload_type l4_offload;
285 if (!skb_is_gso(skb))
288 if (skb_cow_head(skb, 0) < 0)
289 return -EPROTONOSUPPORT;
291 if (skb->encapsulation) {
292 u32 gso_type = skb_shinfo(skb)->gso_type;
296 ip.hdr = skb_network_header(skb);
297 l4.hdr = skb_transport_header(skb);
298 network_hdr_len = skb_inner_network_header_len(skb);
300 if (ip.v4->version == 4) {
302 l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
303 } else if (ip.v4->version == 6) {
304 l3_offload = IPV6_PKT;
309 hinic_task_set_outter_l3(task, l3_offload,
310 skb_network_header_len(skb));
312 if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
313 l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
314 tunnel_type = TUNNEL_UDP_CSUM;
315 } else if (gso_type & SKB_GSO_UDP_TUNNEL) {
316 tunnel_type = TUNNEL_UDP_NO_CSUM;
319 l4_tunnel_len = skb_inner_network_offset(skb) -
320 skb_transport_offset(skb);
321 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
323 ip.hdr = skb_inner_network_header(skb);
324 l4.hdr = skb_inner_transport_header(skb);
326 ip.hdr = skb_network_header(skb);
327 l4.hdr = skb_transport_header(skb);
328 network_hdr_len = skb_network_header_len(skb);
331 /* initialize inner IP header fields */
332 if (ip.v4->version == 4)
335 ip.v6->payload_len = 0;
337 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
340 hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
343 if (l4_proto == IPPROTO_TCP)
344 l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
346 get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
349 hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
350 ip_identify, skb_shinfo(skb)->gso_size);
355 static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
358 enum hinic_l4_offload_type l4_offload;
359 u32 offset, l4_len, network_hdr_len;
360 enum hinic_l3_offload_type l3_type;
361 u32 tunnel_type = NOT_TUNNEL;
366 if (skb->ip_summed != CHECKSUM_PARTIAL)
369 if (skb->encapsulation) {
372 tunnel_type = TUNNEL_UDP_NO_CSUM;
373 ip.hdr = skb_network_header(skb);
375 if (ip.v4->version == 4) {
376 l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
377 l4_proto = ip.v4->protocol;
378 } else if (ip.v4->version == 6) {
379 unsigned char *exthdr;
383 tunnel_type = TUNNEL_UDP_CSUM;
384 exthdr = ip.hdr + sizeof(*ip.v6);
385 l4_proto = ip.v6->nexthdr;
386 l4.hdr = skb_transport_header(skb);
387 if (l4.hdr != exthdr)
388 ipv6_skip_exthdr(skb, exthdr - skb->data,
389 &l4_proto, &frag_off);
391 l3_type = L3TYPE_UNKNOWN;
392 l4_proto = IPPROTO_RAW;
395 hinic_task_set_outter_l3(task, l3_type,
396 skb_network_header_len(skb));
400 l4_tunnel_len = skb_inner_network_offset(skb) -
401 skb_transport_offset(skb);
402 ip.hdr = skb_inner_network_header(skb);
403 l4.hdr = skb_inner_transport_header(skb);
404 network_hdr_len = skb_inner_network_header_len(skb);
408 tunnel_type = NOT_TUNNEL;
411 ip.hdr = skb_inner_network_header(skb);
412 l4.hdr = skb_transport_header(skb);
413 network_hdr_len = skb_network_header_len(skb);
416 /* Unsupported tunnel packet, disable csum offload */
417 skb_checksum_help(skb);
421 hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
423 ip.hdr = skb_network_header(skb);
424 l4.hdr = skb_transport_header(skb);
425 network_hdr_len = skb_network_header_len(skb);
428 get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
431 hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
433 get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
436 hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
441 static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
442 u16 vlan_tag, u16 vlan_pri)
444 task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
445 HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
447 *queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
450 static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
453 enum hinic_offload_type offload = 0;
457 enabled = offload_tso(task, queue_info, skb);
459 offload |= TX_OFFLOAD_TSO;
460 } else if (enabled == 0) {
461 enabled = offload_csum(task, queue_info, skb);
463 offload |= TX_OFFLOAD_CSUM;
465 return -EPROTONOSUPPORT;
468 if (unlikely(skb_vlan_tag_present(skb))) {
469 vlan_tag = skb_vlan_tag_get(skb);
470 offload_vlan(task, queue_info, vlan_tag,
471 vlan_tag >> VLAN_PRIO_SHIFT);
472 offload |= TX_OFFLOAD_VLAN;
476 hinic_task_set_l2hdr(task, skb_network_offset(skb));
478 /* payload offset should not more than 221 */
479 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
480 MAX_PAYLOAD_OFFSET) {
481 return -EPROTONOSUPPORT;
484 /* mss should not less than 80 */
485 if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
486 *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
487 *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
493 netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
495 struct hinic_dev *nic_dev = netdev_priv(netdev);
496 u16 prod_idx, q_id = skb->queue_mapping;
497 struct netdev_queue *netdev_txq;
498 int nr_sges, err = NETDEV_TX_OK;
499 struct hinic_sq_wqe *sq_wqe;
500 unsigned int wqe_size;
501 struct hinic_txq *txq;
504 txq = &nic_dev->txqs[q_id];
505 qp = container_of(txq->sq, struct hinic_qp, sq);
506 nr_sges = skb_shinfo(skb)->nr_frags + 1;
508 err = tx_map_skb(nic_dev, skb, txq->sges);
512 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
514 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
516 netif_stop_subqueue(netdev, qp->q_id);
518 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
520 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
524 tx_unmap_skb(nic_dev, skb, txq->sges);
526 u64_stats_update_begin(&txq->txq_stats.syncp);
527 txq->txq_stats.tx_busy++;
528 u64_stats_update_end(&txq->txq_stats.syncp);
529 err = NETDEV_TX_BUSY;
535 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
536 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
539 netdev_txq = netdev_get_tx_queue(netdev, q_id);
540 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
541 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
546 dev_kfree_skb_any(skb);
547 u64_stats_update_begin(&txq->txq_stats.syncp);
548 txq->txq_stats.tx_dropped++;
549 u64_stats_update_end(&txq->txq_stats.syncp);
554 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
556 struct hinic_dev *nic_dev = netdev_priv(netdev);
557 u16 prod_idx, q_id = skb->queue_mapping;
558 struct netdev_queue *netdev_txq;
559 int nr_sges, err = NETDEV_TX_OK;
560 struct hinic_sq_wqe *sq_wqe;
561 unsigned int wqe_size;
562 struct hinic_txq *txq;
565 txq = &nic_dev->txqs[q_id];
566 qp = container_of(txq->sq, struct hinic_qp, sq);
568 if (skb->len < MIN_SKB_LEN) {
569 if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
570 netdev_err(netdev, "Failed to pad skb\n");
571 goto update_error_stats;
574 skb->len = MIN_SKB_LEN;
577 nr_sges = skb_shinfo(skb)->nr_frags + 1;
579 u64_stats_update_begin(&txq->txq_stats.syncp);
580 txq->txq_stats.big_frags_pkts++;
581 u64_stats_update_end(&txq->txq_stats.syncp);
584 if (nr_sges > txq->max_sges) {
585 netdev_err(netdev, "Too many Tx sges\n");
589 err = tx_map_skb(nic_dev, skb, txq->sges);
593 wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
595 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
597 netif_stop_subqueue(netdev, qp->q_id);
599 /* Check for the case free_tx_poll is called in another cpu
600 * and we stopped the subqueue after free_tx_poll check.
602 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
604 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
608 tx_unmap_skb(nic_dev, skb, txq->sges);
610 u64_stats_update_begin(&txq->txq_stats.syncp);
611 txq->txq_stats.tx_busy++;
612 u64_stats_update_end(&txq->txq_stats.syncp);
613 err = NETDEV_TX_BUSY;
619 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
621 err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
625 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
628 netdev_txq = netdev_get_tx_queue(netdev, q_id);
629 if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
630 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
635 hinic_sq_return_wqe(txq->sq, wqe_size);
636 tx_unmap_skb(nic_dev, skb, txq->sges);
639 dev_kfree_skb_any(skb);
642 u64_stats_update_begin(&txq->txq_stats.syncp);
643 txq->txq_stats.tx_dropped++;
644 u64_stats_update_end(&txq->txq_stats.syncp);
650 * tx_free_skb - unmap and free skb
651 * @nic_dev: nic device
653 * @sges: the sges that are connected to the skb
655 static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
656 struct hinic_sge *sges)
658 tx_unmap_skb(nic_dev, skb, sges);
660 dev_kfree_skb_any(skb);
664 * free_all_tx_skbs - free all skbs in tx queue
667 static void free_all_tx_skbs(struct hinic_txq *txq)
669 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
670 struct hinic_sq *sq = txq->sq;
671 struct hinic_sq_wqe *sq_wqe;
672 unsigned int wqe_size;
677 while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
678 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
682 nr_sges = skb_shinfo(skb)->nr_frags + 1;
684 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
686 hinic_sq_put_wqe(sq, wqe_size);
688 tx_free_skb(nic_dev, skb, txq->free_sges);
693 * free_tx_poll - free finished tx skbs in tx queue that connected to napi
695 * @budget: number of tx
697 * Return 0 - Success, negative - Failure
699 static int free_tx_poll(struct napi_struct *napi, int budget)
701 struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
702 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
703 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
704 struct netdev_queue *netdev_txq;
705 struct hinic_sq *sq = txq->sq;
706 struct hinic_wq *wq = sq->wq;
707 struct hinic_sq_wqe *sq_wqe;
708 unsigned int wqe_size;
709 int nr_sges, pkts = 0;
715 hw_ci = HW_CONS_IDX(sq) & wq->mask;
719 /* Reading a WQEBB to get real WQE size and consumer index. */
720 sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
722 (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
725 /* If this WQE have multiple WQEBBs, we will read again to get
728 if (wqe_size > wq->wqebb_size) {
729 sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
730 if (unlikely(!sq_wqe))
734 tx_bytes += skb->len;
737 nr_sges = skb_shinfo(skb)->nr_frags + 1;
739 hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
741 hinic_sq_put_wqe(sq, wqe_size);
743 tx_free_skb(nic_dev, skb, txq->free_sges);
744 } while (pkts < budget);
746 if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
747 hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
748 netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
750 __netif_tx_lock(netdev_txq, smp_processor_id());
751 if (!netif_testing(nic_dev->netdev))
752 netif_wake_subqueue(nic_dev->netdev, qp->q_id);
754 __netif_tx_unlock(netdev_txq);
756 u64_stats_update_begin(&txq->txq_stats.syncp);
757 txq->txq_stats.tx_wake++;
758 u64_stats_update_end(&txq->txq_stats.syncp);
761 u64_stats_update_begin(&txq->txq_stats.syncp);
762 txq->txq_stats.bytes += tx_bytes;
763 txq->txq_stats.pkts += pkts;
764 u64_stats_update_end(&txq->txq_stats.syncp);
768 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
769 hinic_hwdev_set_msix_state(nic_dev->hwdev,
779 static irqreturn_t tx_irq(int irq, void *data)
781 struct hinic_txq *txq = data;
782 struct hinic_dev *nic_dev;
784 nic_dev = netdev_priv(txq->netdev);
786 if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
787 /* Disable the interrupt until napi will be completed */
788 hinic_hwdev_set_msix_state(nic_dev->hwdev,
792 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
794 napi_schedule(&txq->napi);
798 static int tx_request_irq(struct hinic_txq *txq)
800 struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
801 struct hinic_msix_config interrupt_info = {0};
802 struct hinic_intr_coal_info *intr_coal = NULL;
803 struct hinic_hwdev *hwdev = nic_dev->hwdev;
804 struct hinic_hwif *hwif = hwdev->hwif;
805 struct pci_dev *pdev = hwif->pdev;
806 struct hinic_sq *sq = txq->sq;
810 qp = container_of(sq, struct hinic_qp, sq);
812 netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
815 hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
816 TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
817 TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
818 TX_IRQ_NO_RESEND_TIMER);
820 intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
821 interrupt_info.msix_index = sq->msix_entry;
822 interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
823 interrupt_info.pending_cnt = intr_coal->pending_limt;
824 interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
826 err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
828 netif_err(nic_dev, drv, txq->netdev,
829 "Failed to set TX interrupt coalescing attribute\n");
830 netif_napi_del(&txq->napi);
834 err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
836 dev_err(&pdev->dev, "Failed to request Tx irq\n");
837 netif_napi_del(&txq->napi);
844 static void tx_free_irq(struct hinic_txq *txq)
846 struct hinic_sq *sq = txq->sq;
848 free_irq(sq->irq, txq);
849 netif_napi_del(&txq->napi);
853 * hinic_init_txq - Initialize the Tx Queue
854 * @txq: Logical Tx Queue
855 * @sq: Hardware Tx Queue to connect the Logical queue with
856 * @netdev: network device to connect the Logical queue with
858 * Return 0 - Success, negative - Failure
860 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
861 struct net_device *netdev)
863 struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
864 struct hinic_dev *nic_dev = netdev_priv(netdev);
865 struct hinic_hwdev *hwdev = nic_dev->hwdev;
866 int err, irqname_len;
868 txq->netdev = netdev;
873 txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
875 txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
876 sizeof(*txq->sges), GFP_KERNEL);
880 txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
881 sizeof(*txq->free_sges), GFP_KERNEL);
882 if (!txq->free_sges) {
884 goto err_alloc_free_sges;
887 irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
888 txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
889 if (!txq->irq_name) {
891 goto err_alloc_irqname;
894 sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
896 err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
897 CI_UPDATE_NO_COALESC);
901 err = tx_request_irq(txq);
903 netdev_err(netdev, "Failed to request Tx irq\n");
911 devm_kfree(&netdev->dev, txq->irq_name);
914 devm_kfree(&netdev->dev, txq->free_sges);
917 devm_kfree(&netdev->dev, txq->sges);
922 * hinic_clean_txq - Clean the Tx Queue
923 * @txq: Logical Tx Queue
925 void hinic_clean_txq(struct hinic_txq *txq)
927 struct net_device *netdev = txq->netdev;
931 free_all_tx_skbs(txq);
933 devm_kfree(&netdev->dev, txq->irq_name);
934 devm_kfree(&netdev->dev, txq->free_sges);
935 devm_kfree(&netdev->dev, txq->sges);