1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
7 #include <net/ip6_checksum.h>
10 #include "ionic_lif.h"
11 #include "ionic_txrx.h"
13 static void ionic_rx_clean(struct ionic_queue *q,
14 struct ionic_desc_info *desc_info,
15 struct ionic_cq_info *cq_info,
18 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
20 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
22 static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
23 ionic_desc_cb cb_func, void *cb_arg)
25 DEBUG_STATS_TXQ_POST(q, ring_dbell);
27 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
30 static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
31 ionic_desc_cb cb_func, void *cb_arg)
33 ionic_q_post(q, ring_dbell, cb_func, cb_arg);
35 DEBUG_STATS_RX_BUFF_CNT(q);
38 static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
40 return netdev_get_tx_queue(q->lif->netdev, q->index);
43 static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
44 unsigned int len, bool frags)
46 struct ionic_lif *lif = q->lif;
47 struct ionic_rx_stats *stats;
48 struct net_device *netdev;
52 stats = &q->lif->rxqstats[q->index];
55 skb = napi_get_frags(&q_to_qcq(q)->napi);
57 skb = netdev_alloc_skb_ip_align(netdev, len);
60 net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
61 netdev->name, q->name);
69 static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
70 struct ionic_desc_info *desc_info,
71 struct ionic_cq_info *cq_info)
73 struct ionic_rxq_comp *comp = cq_info->cq_desc;
74 struct device *dev = q->lif->ionic->dev;
75 struct ionic_page_info *page_info;
81 page_info = &desc_info->pages[0];
82 len = le16_to_cpu(comp->len);
84 prefetch(page_address(page_info->page) + NET_IP_ALIGN);
86 skb = ionic_rx_skb_alloc(q, len, true);
90 i = comp->num_sg_elems + 1;
92 if (unlikely(!page_info->page)) {
93 struct napi_struct *napi = &q_to_qcq(q)->napi;
100 frag_len = min(len, (u16)PAGE_SIZE);
103 dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
104 PAGE_SIZE, DMA_FROM_DEVICE);
105 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
106 page_info->page, 0, frag_len, PAGE_SIZE);
107 page_info->page = NULL;
115 static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
116 struct ionic_desc_info *desc_info,
117 struct ionic_cq_info *cq_info)
119 struct ionic_rxq_comp *comp = cq_info->cq_desc;
120 struct device *dev = q->lif->ionic->dev;
121 struct ionic_page_info *page_info;
125 page_info = &desc_info->pages[0];
126 len = le16_to_cpu(comp->len);
128 skb = ionic_rx_skb_alloc(q, len, false);
132 if (unlikely(!page_info->page)) {
137 dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
138 len, DMA_FROM_DEVICE);
139 skb_copy_to_linear_data(skb, page_address(page_info->page), len);
140 dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
141 len, DMA_FROM_DEVICE);
144 skb->protocol = eth_type_trans(skb, q->lif->netdev);
149 static void ionic_rx_clean(struct ionic_queue *q,
150 struct ionic_desc_info *desc_info,
151 struct ionic_cq_info *cq_info,
154 struct ionic_rxq_comp *comp = cq_info->cq_desc;
155 struct ionic_qcq *qcq = q_to_qcq(q);
156 struct ionic_rx_stats *stats;
157 struct net_device *netdev;
160 stats = q_to_rx_stats(q);
161 netdev = q->lif->netdev;
169 stats->bytes += le16_to_cpu(comp->len);
171 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
172 skb = ionic_rx_copybreak(q, desc_info, cq_info);
174 skb = ionic_rx_frags(q, desc_info, cq_info);
176 if (unlikely(!skb)) {
181 skb_record_rx_queue(skb, q->index);
183 if (likely(netdev->features & NETIF_F_RXHASH)) {
184 switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
185 case IONIC_PKT_TYPE_IPV4:
186 case IONIC_PKT_TYPE_IPV6:
187 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
190 case IONIC_PKT_TYPE_IPV4_TCP:
191 case IONIC_PKT_TYPE_IPV6_TCP:
192 case IONIC_PKT_TYPE_IPV4_UDP:
193 case IONIC_PKT_TYPE_IPV6_UDP:
194 skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
200 if (likely(netdev->features & NETIF_F_RXCSUM)) {
201 if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
202 skb->ip_summed = CHECKSUM_COMPLETE;
203 skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
204 stats->csum_complete++;
210 if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
211 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
212 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
215 if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
216 (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
217 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
218 le16_to_cpu(comp->vlan_tci));
219 stats->vlan_stripped++;
222 if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
223 napi_gro_receive(&qcq->napi, skb);
225 napi_gro_frags(&qcq->napi);
228 static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
230 struct ionic_rxq_comp *comp = cq_info->cq_desc;
231 struct ionic_queue *q = cq->bound_q;
232 struct ionic_desc_info *desc_info;
234 if (!color_match(comp->pkt_type_color, cq->done_color))
237 /* check for empty queue */
238 if (q->tail_idx == q->head_idx)
241 if (q->tail_idx != le16_to_cpu(comp->comp_index))
244 desc_info = &q->info[q->tail_idx];
245 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
247 /* clean the related q entry, only one per qc completion */
248 ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
250 desc_info->cb = NULL;
251 desc_info->cb_arg = NULL;
256 static int ionic_rx_page_alloc(struct ionic_queue *q,
257 struct ionic_page_info *page_info)
259 struct ionic_lif *lif = q->lif;
260 struct ionic_rx_stats *stats;
261 struct net_device *netdev;
264 netdev = lif->netdev;
265 dev = lif->ionic->dev;
266 stats = q_to_rx_stats(q);
268 if (unlikely(!page_info)) {
269 net_err_ratelimited("%s: %s invalid page_info in alloc\n",
270 netdev->name, q->name);
274 page_info->page = dev_alloc_page();
275 if (unlikely(!page_info->page)) {
276 net_err_ratelimited("%s: %s page alloc failed\n",
277 netdev->name, q->name);
282 page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
284 if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
285 put_page(page_info->page);
286 page_info->dma_addr = 0;
287 page_info->page = NULL;
288 net_err_ratelimited("%s: %s dma map failed\n",
289 netdev->name, q->name);
290 stats->dma_map_err++;
297 static void ionic_rx_page_free(struct ionic_queue *q,
298 struct ionic_page_info *page_info)
300 struct ionic_lif *lif = q->lif;
301 struct net_device *netdev;
304 netdev = lif->netdev;
305 dev = lif->ionic->dev;
307 if (unlikely(!page_info)) {
308 net_err_ratelimited("%s: %s invalid page_info in free\n",
309 netdev->name, q->name);
313 if (unlikely(!page_info->page)) {
314 net_err_ratelimited("%s: %s invalid page in free\n",
315 netdev->name, q->name);
319 dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
321 put_page(page_info->page);
322 page_info->dma_addr = 0;
323 page_info->page = NULL;
326 void ionic_rx_fill(struct ionic_queue *q)
328 struct net_device *netdev = q->lif->netdev;
329 struct ionic_desc_info *desc_info;
330 struct ionic_page_info *page_info;
331 struct ionic_rxq_sg_desc *sg_desc;
332 struct ionic_rxq_sg_elem *sg_elem;
333 struct ionic_rxq_desc *desc;
334 unsigned int remain_len;
335 unsigned int seg_len;
340 len = netdev->mtu + ETH_HLEN;
341 nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
343 for (i = ionic_q_space_avail(q); i; i--) {
345 desc_info = &q->info[q->head_idx];
346 desc = desc_info->desc;
347 sg_desc = desc_info->sg_desc;
348 page_info = &desc_info->pages[0];
350 if (page_info->page) { /* recycle the buffer */
351 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
355 /* fill main descriptor - pages[0] */
356 desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
357 IONIC_RXQ_DESC_OPCODE_SIMPLE;
358 desc_info->npages = nfrags;
359 if (unlikely(ionic_rx_page_alloc(q, page_info))) {
364 desc->addr = cpu_to_le64(page_info->dma_addr);
365 seg_len = min_t(unsigned int, PAGE_SIZE, len);
366 desc->len = cpu_to_le16(seg_len);
367 remain_len -= seg_len;
370 /* fill sg descriptors - pages[1..n] */
371 for (j = 0; j < nfrags - 1; j++) {
372 if (page_info->page) /* recycle the sg buffer */
375 sg_elem = &sg_desc->elems[j];
376 if (unlikely(ionic_rx_page_alloc(q, page_info))) {
381 sg_elem->addr = cpu_to_le64(page_info->dma_addr);
382 seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
383 sg_elem->len = cpu_to_le16(seg_len);
384 remain_len -= seg_len;
388 ionic_rxq_post(q, false, ionic_rx_clean, NULL);
391 ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
392 q->dbval | q->head_idx);
395 static void ionic_rx_fill_cb(void *arg)
400 void ionic_rx_empty(struct ionic_queue *q)
402 struct ionic_desc_info *desc_info;
403 struct ionic_page_info *page_info;
406 for (i = 0; i < q->num_descs; i++) {
407 desc_info = &q->info[i];
408 for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
409 page_info = &desc_info->pages[j];
411 ionic_rx_page_free(q, page_info);
414 desc_info->npages = 0;
415 desc_info->cb = NULL;
416 desc_info->cb_arg = NULL;
420 static void ionic_dim_update(struct ionic_qcq *qcq)
422 struct dim_sample dim_sample;
423 struct ionic_lif *lif;
426 if (!qcq->intr.dim_coal_hw)
430 qi = qcq->cq.bound_q->index;
432 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
433 lif->rxqcqs[qi]->intr.index,
434 qcq->intr.dim_coal_hw);
436 dim_update_sample(qcq->cq.bound_intr->rearm_count,
437 lif->txqstats[qi].pkts,
438 lif->txqstats[qi].bytes,
441 net_dim(&qcq->dim, dim_sample);
444 int ionic_tx_napi(struct napi_struct *napi, int budget)
446 struct ionic_qcq *qcq = napi_to_qcq(napi);
447 struct ionic_cq *cq = napi_to_cq(napi);
448 struct ionic_dev *idev;
449 struct ionic_lif *lif;
453 lif = cq->bound_q->lif;
454 idev = &lif->ionic->idev;
456 work_done = ionic_cq_service(cq, budget,
457 ionic_tx_service, NULL, NULL);
459 if (work_done < budget && napi_complete_done(napi, work_done)) {
460 ionic_dim_update(qcq);
461 flags |= IONIC_INTR_CRED_UNMASK;
462 cq->bound_intr->rearm_count++;
465 if (work_done || flags) {
466 flags |= IONIC_INTR_CRED_RESET_COALESCE;
467 ionic_intr_credits(idev->intr_ctrl,
468 cq->bound_intr->index,
472 DEBUG_STATS_NAPI_POLL(qcq, work_done);
477 int ionic_rx_napi(struct napi_struct *napi, int budget)
479 struct ionic_qcq *qcq = napi_to_qcq(napi);
480 struct ionic_cq *cq = napi_to_cq(napi);
481 struct ionic_dev *idev;
482 struct ionic_lif *lif;
486 lif = cq->bound_q->lif;
487 idev = &lif->ionic->idev;
489 work_done = ionic_cq_service(cq, budget,
490 ionic_rx_service, NULL, NULL);
493 ionic_rx_fill(cq->bound_q);
495 if (work_done < budget && napi_complete_done(napi, work_done)) {
496 ionic_dim_update(qcq);
497 flags |= IONIC_INTR_CRED_UNMASK;
498 cq->bound_intr->rearm_count++;
501 if (work_done || flags) {
502 flags |= IONIC_INTR_CRED_RESET_COALESCE;
503 ionic_intr_credits(idev->intr_ctrl,
504 cq->bound_intr->index,
508 DEBUG_STATS_NAPI_POLL(qcq, work_done);
513 int ionic_txrx_napi(struct napi_struct *napi, int budget)
515 struct ionic_qcq *qcq = napi_to_qcq(napi);
516 struct ionic_cq *rxcq = napi_to_cq(napi);
517 unsigned int qi = rxcq->bound_q->index;
518 struct ionic_dev *idev;
519 struct ionic_lif *lif;
520 struct ionic_cq *txcq;
521 u32 rx_work_done = 0;
522 u32 tx_work_done = 0;
525 lif = rxcq->bound_q->lif;
526 idev = &lif->ionic->idev;
527 txcq = &lif->txqcqs[qi]->cq;
529 tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
530 ionic_tx_service, NULL, NULL);
532 rx_work_done = ionic_cq_service(rxcq, budget,
533 ionic_rx_service, NULL, NULL);
535 ionic_rx_fill_cb(rxcq->bound_q);
537 if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
538 ionic_dim_update(qcq);
539 flags |= IONIC_INTR_CRED_UNMASK;
540 rxcq->bound_intr->rearm_count++;
543 if (rx_work_done || flags) {
544 flags |= IONIC_INTR_CRED_RESET_COALESCE;
545 ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
546 tx_work_done + rx_work_done, flags);
549 DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
550 DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
555 static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
556 void *data, size_t len)
558 struct ionic_tx_stats *stats = q_to_tx_stats(q);
559 struct device *dev = q->lif->ionic->dev;
562 dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
563 if (dma_mapping_error(dev, dma_addr)) {
564 net_warn_ratelimited("%s: DMA single map failed on %s!\n",
565 q->lif->netdev->name, q->name);
566 stats->dma_map_err++;
572 static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
573 const skb_frag_t *frag,
574 size_t offset, size_t len)
576 struct ionic_tx_stats *stats = q_to_tx_stats(q);
577 struct device *dev = q->lif->ionic->dev;
580 dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
581 if (dma_mapping_error(dev, dma_addr)) {
582 net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
583 q->lif->netdev->name, q->name);
584 stats->dma_map_err++;
589 static void ionic_tx_clean(struct ionic_queue *q,
590 struct ionic_desc_info *desc_info,
591 struct ionic_cq_info *cq_info,
594 struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
595 struct ionic_txq_sg_elem *elem = sg_desc->elems;
596 struct ionic_tx_stats *stats = q_to_tx_stats(q);
597 struct ionic_txq_desc *desc = desc_info->desc;
598 struct device *dev = q->lif->ionic->dev;
599 u8 opcode, flags, nsge;
604 decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
605 &opcode, &flags, &nsge, &addr);
607 /* use unmap_single only if either this is not TSO,
608 * or this is first descriptor of a TSO
610 if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
611 flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
612 dma_unmap_single(dev, (dma_addr_t)addr,
613 le16_to_cpu(desc->len), DMA_TO_DEVICE);
615 dma_unmap_page(dev, (dma_addr_t)addr,
616 le16_to_cpu(desc->len), DMA_TO_DEVICE);
618 for (i = 0; i < nsge; i++, elem++)
619 dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
620 le16_to_cpu(elem->len), DMA_TO_DEVICE);
623 struct sk_buff *skb = cb_arg;
626 queue_index = skb_get_queue_mapping(skb);
627 if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
629 netif_wake_subqueue(q->lif->netdev, queue_index);
632 dev_kfree_skb_any(skb);
634 netdev_tx_completed_queue(q_to_ndq(q), 1, len);
638 static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
640 struct ionic_txq_comp *comp = cq_info->cq_desc;
641 struct ionic_queue *q = cq->bound_q;
642 struct ionic_desc_info *desc_info;
645 if (!color_match(comp->color, cq->done_color))
648 /* clean the related q entries, there could be
649 * several q entries completed for each cq completion
652 desc_info = &q->info[q->tail_idx];
654 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
655 ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
656 desc_info->cb = NULL;
657 desc_info->cb_arg = NULL;
658 } while (index != le16_to_cpu(comp->comp_index));
663 void ionic_tx_flush(struct ionic_cq *cq)
665 struct ionic_dev *idev = &cq->lif->ionic->idev;
668 work_done = ionic_cq_service(cq, cq->num_descs,
669 ionic_tx_service, NULL, NULL);
671 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
672 work_done, IONIC_INTR_CRED_RESET_COALESCE);
675 void ionic_tx_empty(struct ionic_queue *q)
677 struct ionic_desc_info *desc_info;
679 /* walk the not completed tx entries, if any */
680 while (q->head_idx != q->tail_idx) {
681 desc_info = &q->info[q->tail_idx];
682 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
683 ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
684 desc_info->cb = NULL;
685 desc_info->cb_arg = NULL;
689 static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
693 err = skb_cow_head(skb, 0);
697 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
698 inner_ip_hdr(skb)->check = 0;
699 inner_tcp_hdr(skb)->check =
700 ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
701 inner_ip_hdr(skb)->daddr,
703 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
704 inner_tcp_hdr(skb)->check =
705 ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
706 &inner_ipv6_hdr(skb)->daddr,
713 static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
717 err = skb_cow_head(skb, 0);
721 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
722 ip_hdr(skb)->check = 0;
723 tcp_hdr(skb)->check =
724 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
727 } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
728 tcp_v6_gso_csum_prep(skb);
734 static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
736 dma_addr_t addr, u8 nsge, u16 len,
737 unsigned int hdrlen, unsigned int mss,
739 u16 vlan_tci, bool has_vlan,
740 bool start, bool done)
745 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
746 flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
747 flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
748 flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
750 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
751 desc->cmd = cpu_to_le64(cmd);
752 desc->len = cpu_to_le16(len);
753 desc->vlan_tci = cpu_to_le16(vlan_tci);
754 desc->hdr_len = cpu_to_le16(hdrlen);
755 desc->mss = cpu_to_le16(mss);
758 skb_tx_timestamp(skb);
759 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
760 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
762 ionic_txq_post(q, false, ionic_tx_clean, NULL);
766 static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
767 struct ionic_txq_sg_elem **elem)
769 struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
770 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
772 *elem = sg_desc->elems;
776 static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
778 struct ionic_tx_stats *stats = q_to_tx_stats(q);
779 struct ionic_desc_info *rewind_desc_info;
780 struct device *dev = q->lif->ionic->dev;
781 struct ionic_txq_sg_elem *elem;
782 struct ionic_txq_desc *desc;
783 unsigned int frag_left = 0;
784 unsigned int offset = 0;
785 u16 abort = q->head_idx;
786 unsigned int len_left;
787 dma_addr_t desc_addr;
808 mss = skb_shinfo(skb)->gso_size;
809 nfrags = skb_shinfo(skb)->nr_frags;
810 len_left = skb->len - skb_headlen(skb);
811 outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
812 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
813 has_vlan = !!skb_vlan_tag_present(skb);
814 vlan_tci = skb_vlan_tag_get(skb);
815 encap = skb->encapsulation;
817 /* Preload inner-most TCP csum field with IP pseudo hdr
818 * calculated with IP length set to zero. HW will later
819 * add in length to each TCP segment resulting from the TSO.
823 err = ionic_tx_tcp_inner_pseudo_csum(skb);
825 err = ionic_tx_tcp_pseudo_csum(skb);
830 hdrlen = skb_inner_transport_header(skb) - skb->data +
831 inner_tcp_hdrlen(skb);
833 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
835 seglen = hdrlen + mss;
836 left = skb_headlen(skb);
838 desc = ionic_tx_tso_next(q, &elem);
841 /* Chop skb->data up into desc segments */
844 len = min(seglen, left);
845 frag_left = seglen - len;
846 desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
847 if (dma_mapping_error(dev, desc_addr))
853 if (nfrags > 0 && frag_left > 0)
855 done = (nfrags == 0 && left == 0);
856 ionic_tx_tso_post(q, desc, skb,
857 desc_addr, desc_nsge, desc_len,
863 total_bytes += start ? len : len + hdrlen;
864 desc = ionic_tx_tso_next(q, &elem);
869 /* Chop skb frags into desc segments */
871 for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
873 left = skb_frag_size(frag);
880 len = min(frag_left, left);
882 addr = ionic_tx_map_frag(q, frag, offset, len);
883 if (dma_mapping_error(dev, addr))
885 elem->addr = cpu_to_le64(addr);
886 elem->len = cpu_to_le16(len);
891 if (nfrags > 0 && frag_left > 0)
893 done = (nfrags == 0 && left == 0);
894 ionic_tx_tso_post(q, desc, skb, desc_addr,
896 hdrlen, mss, outer_csum,
900 total_bytes += start ? len : len + hdrlen;
901 desc = ionic_tx_tso_next(q, &elem);
904 len = min(mss, left);
905 frag_left = mss - len;
906 desc_addr = ionic_tx_map_frag(q, frag,
908 if (dma_mapping_error(dev, desc_addr))
914 if (nfrags > 0 && frag_left > 0)
916 done = (nfrags == 0 && left == 0);
917 ionic_tx_tso_post(q, desc, skb, desc_addr,
919 hdrlen, mss, outer_csum,
923 total_bytes += start ? len : len + hdrlen;
924 desc = ionic_tx_tso_next(q, &elem);
930 stats->pkts += total_pkts;
931 stats->bytes += total_bytes;
933 stats->tso_bytes += total_bytes;
938 while (rewind != q->head_idx) {
939 rewind_desc_info = &q->info[rewind];
940 ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
941 rewind = (rewind + 1) & (q->num_descs - 1);
948 static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
950 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
951 struct ionic_tx_stats *stats = q_to_tx_stats(q);
952 struct device *dev = q->lif->ionic->dev;
959 has_vlan = !!skb_vlan_tag_present(skb);
960 encap = skb->encapsulation;
962 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
963 if (dma_mapping_error(dev, dma_addr))
966 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
967 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
969 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
970 flags, skb_shinfo(skb)->nr_frags, dma_addr);
971 desc->cmd = cpu_to_le64(cmd);
972 desc->len = cpu_to_le16(skb_headlen(skb));
973 desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
974 desc->csum_offset = cpu_to_le16(skb->csum_offset);
976 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
977 stats->vlan_inserted++;
980 if (skb->csum_not_inet)
988 static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
990 struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
991 struct ionic_tx_stats *stats = q_to_tx_stats(q);
992 struct device *dev = q->lif->ionic->dev;
999 has_vlan = !!skb_vlan_tag_present(skb);
1000 encap = skb->encapsulation;
1002 dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
1003 if (dma_mapping_error(dev, dma_addr))
1006 flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1007 flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1009 cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1010 flags, skb_shinfo(skb)->nr_frags, dma_addr);
1011 desc->cmd = cpu_to_le64(cmd);
1012 desc->len = cpu_to_le16(skb_headlen(skb));
1014 desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1015 stats->vlan_inserted++;
1023 static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
1025 struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
1026 unsigned int len_left = skb->len - skb_headlen(skb);
1027 struct ionic_txq_sg_elem *elem = sg_desc->elems;
1028 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1029 struct device *dev = q->lif->ionic->dev;
1030 dma_addr_t dma_addr;
1034 for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
1035 len = skb_frag_size(frag);
1036 elem->len = cpu_to_le16(len);
1037 dma_addr = ionic_tx_map_frag(q, frag, 0, len);
1038 if (dma_mapping_error(dev, dma_addr))
1040 elem->addr = cpu_to_le64(dma_addr);
1048 static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1050 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1053 /* set up the initial descriptor */
1054 if (skb->ip_summed == CHECKSUM_PARTIAL)
1055 err = ionic_tx_calc_csum(q, skb);
1057 err = ionic_tx_calc_no_csum(q, skb);
1062 err = ionic_tx_skb_frags(q, skb);
1066 skb_tx_timestamp(skb);
1068 stats->bytes += skb->len;
1070 netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1071 ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1076 static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1078 int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
1079 struct ionic_tx_stats *stats = q_to_tx_stats(q);
1082 /* If TSO, need roundup(skb->len/mss) descs */
1083 if (skb_is_gso(skb))
1084 return (skb->len / skb_shinfo(skb)->gso_size) + 1;
1086 /* If non-TSO, just need 1 desc and nr_frags sg elems */
1087 if (skb_shinfo(skb)->nr_frags <= sg_elems)
1090 /* Too many frags, so linearize */
1091 err = skb_linearize(skb);
1097 /* Need 1 desc and zero sg elems */
1101 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1105 if (unlikely(!ionic_q_has_space(q, ndescs))) {
1106 netif_stop_subqueue(q->lif->netdev, q->index);
1110 /* Might race with ionic_tx_clean, check again */
1112 if (ionic_q_has_space(q, ndescs)) {
1113 netif_wake_subqueue(q->lif->netdev, q->index);
1121 netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1123 u16 queue_index = skb_get_queue_mapping(skb);
1124 struct ionic_lif *lif = netdev_priv(netdev);
1125 struct ionic_queue *q;
1129 if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1131 return NETDEV_TX_OK;
1134 if (unlikely(queue_index >= lif->nxqs))
1136 q = &lif->txqcqs[queue_index]->q;
1138 ndescs = ionic_tx_descs_needed(q, skb);
1142 if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1143 return NETDEV_TX_BUSY;
1145 if (skb_is_gso(skb))
1146 err = ionic_tx_tso(q, skb);
1148 err = ionic_tx(q, skb);
1153 /* Stop the queue if there aren't descriptors for the next packet.
1154 * Since our SG lists per descriptor take care of most of the possible
1155 * fragmentation, we don't need to have many descriptors available.
1157 ionic_maybe_stop_tx(q, 4);
1159 return NETDEV_TX_OK;
1165 return NETDEV_TX_OK;