1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
8 #include <linux/bpf_trace.h>
10 #include "ice_txrx_lib.h"
13 #include "ice_trace.h"
14 #include "ice_dcb_lib.h"
17 #define ICE_RX_HDR_SIZE 256
19 #define FDIR_DESC_RXDID 0x40
20 #define ICE_FDIR_CLEAN_DELAY 10
23 * ice_prgm_fdir_fltr - Program a Flow Director filter
24 * @vsi: VSI to send dummy packet
25 * @fdir_desc: flow director descriptor
26 * @raw_packet: allocated buffer for flow director
29 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
32 struct ice_tx_buf *tx_buf, *first;
33 struct ice_fltr_desc *f_desc;
34 struct ice_tx_desc *tx_desc;
35 struct ice_ring *tx_ring;
44 tx_ring = vsi->tx_rings[0];
45 if (!tx_ring || !tx_ring->desc)
49 /* we are using two descriptors to add/del a filter and we can wait */
50 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
53 msleep_interruptible(1);
56 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
59 if (dma_mapping_error(dev, dma))
62 /* grab the next descriptor */
63 i = tx_ring->next_to_use;
64 first = &tx_ring->tx_buf[i];
65 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
66 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
69 i = (i < tx_ring->count) ? i : 0;
70 tx_desc = ICE_TX_DESC(tx_ring, i);
71 tx_buf = &tx_ring->tx_buf[i];
74 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
76 memset(tx_buf, 0, sizeof(*tx_buf));
77 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
78 dma_unmap_addr_set(tx_buf, dma, dma);
80 tx_desc->buf_addr = cpu_to_le64(dma);
81 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
84 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
85 tx_buf->raw_buf = raw_packet;
87 tx_desc->cmd_type_offset_bsz =
88 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
90 /* Force memory write to complete before letting h/w know
91 * there are new descriptors to fetch.
95 /* mark the data descriptor to be watched */
96 first->next_to_watch = tx_desc;
98 writel(tx_ring->next_to_use, tx_ring->tail);
104 * ice_unmap_and_free_tx_buf - Release a Tx buffer
105 * @ring: the ring that owns the buffer
106 * @tx_buf: the buffer to free
109 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
112 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
113 devm_kfree(ring->dev, tx_buf->raw_buf);
114 else if (ice_ring_is_xdp(ring))
115 page_frag_free(tx_buf->raw_buf);
117 dev_kfree_skb_any(tx_buf->skb);
118 if (dma_unmap_len(tx_buf, len))
119 dma_unmap_single(ring->dev,
120 dma_unmap_addr(tx_buf, dma),
121 dma_unmap_len(tx_buf, len),
123 } else if (dma_unmap_len(tx_buf, len)) {
124 dma_unmap_page(ring->dev,
125 dma_unmap_addr(tx_buf, dma),
126 dma_unmap_len(tx_buf, len),
130 tx_buf->next_to_watch = NULL;
132 dma_unmap_len_set(tx_buf, len, 0);
133 /* tx_buf must be completely set up in the transmit path */
136 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
138 return netdev_get_tx_queue(ring->netdev, ring->q_index);
142 * ice_clean_tx_ring - Free any empty Tx buffers
143 * @tx_ring: ring to be cleaned
145 void ice_clean_tx_ring(struct ice_ring *tx_ring)
149 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
150 ice_xsk_clean_xdp_ring(tx_ring);
154 /* ring already cleared, nothing to do */
155 if (!tx_ring->tx_buf)
158 /* Free all the Tx ring sk_buffs */
159 for (i = 0; i < tx_ring->count; i++)
160 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
163 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
165 /* Zero out the descriptor ring */
166 memset(tx_ring->desc, 0, tx_ring->size);
168 tx_ring->next_to_use = 0;
169 tx_ring->next_to_clean = 0;
171 if (!tx_ring->netdev)
174 /* cleanup Tx queue statistics */
175 netdev_tx_reset_queue(txring_txq(tx_ring));
179 * ice_free_tx_ring - Free Tx resources per queue
180 * @tx_ring: Tx descriptor ring for a specific queue
182 * Free all transmit software resources
184 void ice_free_tx_ring(struct ice_ring *tx_ring)
186 ice_clean_tx_ring(tx_ring);
187 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
188 tx_ring->tx_buf = NULL;
191 dmam_free_coherent(tx_ring->dev, tx_ring->size,
192 tx_ring->desc, tx_ring->dma);
193 tx_ring->desc = NULL;
198 * ice_clean_tx_irq - Reclaim resources after transmit completes
199 * @tx_ring: Tx ring to clean
200 * @napi_budget: Used to determine if we are in netpoll
202 * Returns true if there's any budget left (e.g. the clean is finished)
204 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
206 unsigned int total_bytes = 0, total_pkts = 0;
207 unsigned int budget = ICE_DFLT_IRQ_WORK;
208 struct ice_vsi *vsi = tx_ring->vsi;
209 s16 i = tx_ring->next_to_clean;
210 struct ice_tx_desc *tx_desc;
211 struct ice_tx_buf *tx_buf;
213 tx_buf = &tx_ring->tx_buf[i];
214 tx_desc = ICE_TX_DESC(tx_ring, i);
217 prefetch(&vsi->state);
220 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
222 /* if next_to_watch is not set then there is no work pending */
226 smp_rmb(); /* prevent any other reads prior to eop_desc */
228 ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
229 /* if the descriptor isn't done, no work yet to do */
230 if (!(eop_desc->cmd_type_offset_bsz &
231 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
234 /* clear next_to_watch to prevent false hangs */
235 tx_buf->next_to_watch = NULL;
237 /* update the statistics for this packet */
238 total_bytes += tx_buf->bytecount;
239 total_pkts += tx_buf->gso_segs;
241 if (ice_ring_is_xdp(tx_ring))
242 page_frag_free(tx_buf->raw_buf);
245 napi_consume_skb(tx_buf->skb, napi_budget);
247 /* unmap skb header data */
248 dma_unmap_single(tx_ring->dev,
249 dma_unmap_addr(tx_buf, dma),
250 dma_unmap_len(tx_buf, len),
253 /* clear tx_buf data */
255 dma_unmap_len_set(tx_buf, len, 0);
257 /* unmap remaining buffers */
258 while (tx_desc != eop_desc) {
259 ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
265 tx_buf = tx_ring->tx_buf;
266 tx_desc = ICE_TX_DESC(tx_ring, 0);
269 /* unmap any remaining paged data */
270 if (dma_unmap_len(tx_buf, len)) {
271 dma_unmap_page(tx_ring->dev,
272 dma_unmap_addr(tx_buf, dma),
273 dma_unmap_len(tx_buf, len),
275 dma_unmap_len_set(tx_buf, len, 0);
278 ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
280 /* move us one more past the eop_desc for start of next pkt */
286 tx_buf = tx_ring->tx_buf;
287 tx_desc = ICE_TX_DESC(tx_ring, 0);
292 /* update budget accounting */
294 } while (likely(budget));
297 tx_ring->next_to_clean = i;
299 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
301 if (ice_ring_is_xdp(tx_ring))
304 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
307 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
308 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
309 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
310 /* Make sure that anybody stopping the queue after this
311 * sees the new next_to_clean.
314 if (__netif_subqueue_stopped(tx_ring->netdev,
316 !test_bit(ICE_VSI_DOWN, vsi->state)) {
317 netif_wake_subqueue(tx_ring->netdev,
319 ++tx_ring->tx_stats.restart_q;
327 * ice_setup_tx_ring - Allocate the Tx descriptors
328 * @tx_ring: the Tx ring to set up
330 * Return 0 on success, negative on error
332 int ice_setup_tx_ring(struct ice_ring *tx_ring)
334 struct device *dev = tx_ring->dev;
339 /* warn if we are about to overwrite the pointer */
340 WARN_ON(tx_ring->tx_buf);
342 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
344 if (!tx_ring->tx_buf)
347 /* round up to nearest page */
348 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
350 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
352 if (!tx_ring->desc) {
353 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
358 tx_ring->next_to_use = 0;
359 tx_ring->next_to_clean = 0;
360 tx_ring->tx_stats.prev_pkt = -1;
364 devm_kfree(dev, tx_ring->tx_buf);
365 tx_ring->tx_buf = NULL;
370 * ice_clean_rx_ring - Free Rx buffers
371 * @rx_ring: ring to be cleaned
373 void ice_clean_rx_ring(struct ice_ring *rx_ring)
375 struct device *dev = rx_ring->dev;
378 /* ring already cleared, nothing to do */
379 if (!rx_ring->rx_buf)
383 dev_kfree_skb(rx_ring->skb);
387 if (rx_ring->xsk_pool) {
388 ice_xsk_clean_rx_ring(rx_ring);
392 /* Free all the Rx ring sk_buffs */
393 for (i = 0; i < rx_ring->count; i++) {
394 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
399 /* Invalidate cache lines that may have been written to by
400 * device so that we avoid corrupting memory.
402 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
407 /* free resources associated with mapping */
408 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
409 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
410 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
413 rx_buf->page_offset = 0;
417 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
419 /* Zero out the descriptor ring */
420 memset(rx_ring->desc, 0, rx_ring->size);
422 rx_ring->next_to_alloc = 0;
423 rx_ring->next_to_clean = 0;
424 rx_ring->next_to_use = 0;
428 * ice_free_rx_ring - Free Rx resources
429 * @rx_ring: ring to clean the resources from
431 * Free all receive software resources
433 void ice_free_rx_ring(struct ice_ring *rx_ring)
435 ice_clean_rx_ring(rx_ring);
436 if (rx_ring->vsi->type == ICE_VSI_PF)
437 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
438 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
439 rx_ring->xdp_prog = NULL;
440 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
441 rx_ring->rx_buf = NULL;
444 dmam_free_coherent(rx_ring->dev, rx_ring->size,
445 rx_ring->desc, rx_ring->dma);
446 rx_ring->desc = NULL;
451 * ice_setup_rx_ring - Allocate the Rx descriptors
452 * @rx_ring: the Rx ring to set up
454 * Return 0 on success, negative on error
456 int ice_setup_rx_ring(struct ice_ring *rx_ring)
458 struct device *dev = rx_ring->dev;
463 /* warn if we are about to overwrite the pointer */
464 WARN_ON(rx_ring->rx_buf);
466 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
468 if (!rx_ring->rx_buf)
471 /* round up to nearest page */
472 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
474 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
476 if (!rx_ring->desc) {
477 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
482 rx_ring->next_to_use = 0;
483 rx_ring->next_to_clean = 0;
485 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
486 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
488 if (rx_ring->vsi->type == ICE_VSI_PF &&
489 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
490 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
491 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
496 devm_kfree(dev, rx_ring->rx_buf);
497 rx_ring->rx_buf = NULL;
502 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
504 unsigned int truesize;
506 #if (PAGE_SIZE < 8192)
507 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
509 truesize = rx_ring->rx_offset ?
510 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
511 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
512 SKB_DATA_ALIGN(size);
518 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
520 * @xdp: xdp_buff used as input to the XDP program
521 * @xdp_prog: XDP program to run
523 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
526 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
527 struct bpf_prog *xdp_prog)
529 struct ice_ring *xdp_ring;
533 act = bpf_prog_run_xdp(xdp_prog, xdp);
538 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
539 result = ice_xmit_xdp_buff(xdp, xdp_ring);
540 if (result == ICE_XDP_CONSUMED)
544 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
547 return ICE_XDP_REDIR;
549 bpf_warn_invalid_xdp_action(act);
553 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
556 return ICE_XDP_CONSUMED;
561 * ice_xdp_xmit - submit packets to XDP ring for transmission
563 * @n: number of XDP frames to be transmitted
564 * @frames: XDP frames to be transmitted
565 * @flags: transmit flags
567 * Returns number of frames successfully sent. Failed frames
568 * will be free'ed by XDP core.
569 * For error cases, a negative errno code is returned and no-frames
570 * are transmitted (caller must handle freeing frames).
573 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
576 struct ice_netdev_priv *np = netdev_priv(dev);
577 unsigned int queue_index = smp_processor_id();
578 struct ice_vsi *vsi = np->vsi;
579 struct ice_ring *xdp_ring;
582 if (test_bit(ICE_VSI_DOWN, vsi->state))
585 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
588 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
591 xdp_ring = vsi->xdp_rings[queue_index];
592 for (i = 0; i < n; i++) {
593 struct xdp_frame *xdpf = frames[i];
596 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
597 if (err != ICE_XDP_TX)
602 if (unlikely(flags & XDP_XMIT_FLUSH))
603 ice_xdp_ring_update_tail(xdp_ring);
609 * ice_alloc_mapped_page - recycle or make a new page
610 * @rx_ring: ring to use
611 * @bi: rx_buf struct to modify
613 * Returns true if the page was successfully allocated or
617 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
619 struct page *page = bi->page;
622 /* since we are recycling buffers we should seldom need to alloc */
626 /* alloc new page for storage */
627 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
628 if (unlikely(!page)) {
629 rx_ring->rx_stats.alloc_page_failed++;
633 /* map page for use */
634 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
635 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
637 /* if mapping failed free memory back to system since
638 * there isn't much point in holding memory we can't use
640 if (dma_mapping_error(rx_ring->dev, dma)) {
641 __free_pages(page, ice_rx_pg_order(rx_ring));
642 rx_ring->rx_stats.alloc_page_failed++;
648 bi->page_offset = rx_ring->rx_offset;
649 page_ref_add(page, USHRT_MAX - 1);
650 bi->pagecnt_bias = USHRT_MAX;
656 * ice_alloc_rx_bufs - Replace used receive buffers
657 * @rx_ring: ring to place buffers on
658 * @cleaned_count: number of buffers to replace
660 * Returns false if all allocations were successful, true if any fail. Returning
661 * true signals to the caller that we didn't replace cleaned_count buffers and
662 * there is more work to do.
664 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
665 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
666 * multiple tail writes per call.
668 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
670 union ice_32b_rx_flex_desc *rx_desc;
671 u16 ntu = rx_ring->next_to_use;
672 struct ice_rx_buf *bi;
674 /* do nothing if no valid netdev defined */
675 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
679 /* get the Rx descriptor and buffer based on next_to_use */
680 rx_desc = ICE_RX_DESC(rx_ring, ntu);
681 bi = &rx_ring->rx_buf[ntu];
684 /* if we fail here, we have work remaining */
685 if (!ice_alloc_mapped_page(rx_ring, bi))
688 /* sync the buffer for use by the device */
689 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
694 /* Refresh the desc even if buffer_addrs didn't change
695 * because each write-back erases this info.
697 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
702 if (unlikely(ntu == rx_ring->count)) {
703 rx_desc = ICE_RX_DESC(rx_ring, 0);
704 bi = rx_ring->rx_buf;
708 /* clear the status bits for the next_to_use descriptor */
709 rx_desc->wb.status_error0 = 0;
712 } while (cleaned_count);
714 if (rx_ring->next_to_use != ntu)
715 ice_release_rx_desc(rx_ring, ntu);
717 return !!cleaned_count;
721 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
722 * @rx_buf: Rx buffer to adjust
723 * @size: Size of adjustment
725 * Update the offset within page so that Rx buf will be ready to be reused.
726 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
727 * so the second half of page assigned to Rx buffer will be used, otherwise
728 * the offset is moved by "size" bytes
731 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
733 #if (PAGE_SIZE < 8192)
734 /* flip page offset to other buffer */
735 rx_buf->page_offset ^= size;
737 /* move offset up to the next cache line */
738 rx_buf->page_offset += size;
743 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
744 * @rx_buf: buffer containing the page
745 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
747 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
748 * which will assign the current buffer to the buffer that next_to_alloc is
749 * pointing to; otherwise, the DMA mapping needs to be destroyed and
753 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
755 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
756 struct page *page = rx_buf->page;
758 /* avoid re-using remote and pfmemalloc pages */
759 if (!dev_page_is_reusable(page))
762 #if (PAGE_SIZE < 8192)
763 /* if we are only owner of page we can reuse it */
764 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
767 #define ICE_LAST_OFFSET \
768 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
769 if (rx_buf->page_offset > ICE_LAST_OFFSET)
771 #endif /* PAGE_SIZE < 8192) */
773 /* If we have drained the page fragment pool we need to update
774 * the pagecnt_bias and page count so that we fully restock the
775 * number of references the driver holds.
777 if (unlikely(pagecnt_bias == 1)) {
778 page_ref_add(page, USHRT_MAX - 1);
779 rx_buf->pagecnt_bias = USHRT_MAX;
786 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
787 * @rx_ring: Rx descriptor ring to transact packets on
788 * @rx_buf: buffer containing page to add
789 * @skb: sk_buff to place the data into
790 * @size: packet length from rx_desc
792 * This function will add the data contained in rx_buf->page to the skb.
793 * It will just attach the page as a frag to the skb.
794 * The function will then update the page offset.
797 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
798 struct sk_buff *skb, unsigned int size)
800 #if (PAGE_SIZE >= 8192)
801 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
803 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
808 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
809 rx_buf->page_offset, size, truesize);
811 /* page is being used so we must update the page offset */
812 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
816 * ice_reuse_rx_page - page flip buffer and store it back on the ring
817 * @rx_ring: Rx descriptor ring to store buffers on
818 * @old_buf: donor buffer to have page reused
820 * Synchronizes page for reuse by the adapter
823 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
825 u16 nta = rx_ring->next_to_alloc;
826 struct ice_rx_buf *new_buf;
828 new_buf = &rx_ring->rx_buf[nta];
830 /* update, and store next to alloc */
832 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
834 /* Transfer page from old buffer to new buffer.
835 * Move each member individually to avoid possible store
836 * forwarding stalls and unnecessary copy of skb.
838 new_buf->dma = old_buf->dma;
839 new_buf->page = old_buf->page;
840 new_buf->page_offset = old_buf->page_offset;
841 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
845 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
846 * @rx_ring: Rx descriptor ring to transact packets on
847 * @size: size of buffer to add to skb
848 * @rx_buf_pgcnt: rx_buf page refcount
850 * This function will pull an Rx buffer from the ring and synchronize it
851 * for use by the CPU.
853 static struct ice_rx_buf *
854 ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
857 struct ice_rx_buf *rx_buf;
859 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
861 #if (PAGE_SIZE < 8192)
862 page_count(rx_buf->page);
866 prefetchw(rx_buf->page);
870 /* we are reusing so sync this buffer for CPU use */
871 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
872 rx_buf->page_offset, size,
875 /* We have pulled a buffer for use, so decrement pagecnt_bias */
876 rx_buf->pagecnt_bias--;
882 * ice_build_skb - Build skb around an existing buffer
883 * @rx_ring: Rx descriptor ring to transact packets on
884 * @rx_buf: Rx buffer to pull data from
885 * @xdp: xdp_buff pointing to the data
887 * This function builds an skb around an existing Rx buffer, taking care
888 * to set up the skb correctly and avoid any memcpy overhead.
890 static struct sk_buff *
891 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
892 struct xdp_buff *xdp)
894 u8 metasize = xdp->data - xdp->data_meta;
895 #if (PAGE_SIZE < 8192)
896 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
898 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
899 SKB_DATA_ALIGN(xdp->data_end -
900 xdp->data_hard_start);
904 /* Prefetch first cache line of first page. If xdp->data_meta
905 * is unused, this points exactly as xdp->data, otherwise we
906 * likely have a consumer accessing first few bytes of meta
907 * data, and then actual data.
909 net_prefetch(xdp->data_meta);
910 /* build an skb around the page buffer */
911 skb = build_skb(xdp->data_hard_start, truesize);
915 /* must to record Rx queue, otherwise OS features such as
916 * symmetric queue won't work
918 skb_record_rx_queue(skb, rx_ring->q_index);
920 /* update pointers within the skb to store the data */
921 skb_reserve(skb, xdp->data - xdp->data_hard_start);
922 __skb_put(skb, xdp->data_end - xdp->data);
924 skb_metadata_set(skb, metasize);
926 /* buffer is used by skb, update page_offset */
927 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
933 * ice_construct_skb - Allocate skb and populate it
934 * @rx_ring: Rx descriptor ring to transact packets on
935 * @rx_buf: Rx buffer to pull data from
936 * @xdp: xdp_buff pointing to the data
938 * This function allocates an skb. It then populates it with the page
939 * data from the current receive descriptor, taking care to set up the
942 static struct sk_buff *
943 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
944 struct xdp_buff *xdp)
946 unsigned int size = xdp->data_end - xdp->data;
947 unsigned int headlen;
950 /* prefetch first cache line of first page */
951 net_prefetch(xdp->data);
953 /* allocate a skb to store the frags */
954 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
955 GFP_ATOMIC | __GFP_NOWARN);
959 skb_record_rx_queue(skb, rx_ring->q_index);
960 /* Determine available headroom for copy */
962 if (headlen > ICE_RX_HDR_SIZE)
963 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
965 /* align pull length to size of long to optimize memcpy performance */
966 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
969 /* if we exhaust the linear part then add what is left as a frag */
972 #if (PAGE_SIZE >= 8192)
973 unsigned int truesize = SKB_DATA_ALIGN(size);
975 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
977 skb_add_rx_frag(skb, 0, rx_buf->page,
978 rx_buf->page_offset + headlen, size, truesize);
979 /* buffer is used by skb, update page_offset */
980 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
982 /* buffer is unused, reset bias back to rx_buf; data was copied
983 * onto skb's linear part so there's no need for adjusting
984 * page offset and we can reuse this buffer as-is
986 rx_buf->pagecnt_bias++;
993 * ice_put_rx_buf - Clean up used buffer and either recycle or free
994 * @rx_ring: Rx descriptor ring to transact packets on
995 * @rx_buf: Rx buffer to pull data from
996 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
998 * This function will update next_to_clean and then clean up the contents
999 * of the rx_buf. It will either recycle the buffer or unmap it and free
1000 * the associated resources.
1003 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1006 u16 ntc = rx_ring->next_to_clean + 1;
1008 /* fetch, update, and store next to clean */
1009 ntc = (ntc < rx_ring->count) ? ntc : 0;
1010 rx_ring->next_to_clean = ntc;
1015 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1016 /* hand second half of page back to the ring */
1017 ice_reuse_rx_page(rx_ring, rx_buf);
1019 /* we are not reusing the buffer so unmap it */
1020 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1021 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1023 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1026 /* clear contents of buffer_info */
1027 rx_buf->page = NULL;
1031 * ice_is_non_eop - process handling of non-EOP buffers
1032 * @rx_ring: Rx ring being processed
1033 * @rx_desc: Rx descriptor for current buffer
1035 * If the buffer is an EOP buffer, this function exits returning false,
1036 * otherwise return true indicating that this is in fact a non-EOP buffer.
1039 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1041 /* if we are the last buffer then there is nothing else to do */
1042 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1043 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1046 rx_ring->rx_stats.non_eop_descs++;
1052 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1053 * @rx_ring: Rx descriptor ring to transact packets on
1054 * @budget: Total limit on number of packets to process
1056 * This function provides a "bounce buffer" approach to Rx interrupt
1057 * processing. The advantage to this is that on systems that have
1058 * expensive overhead for IOMMU access this provides a means of avoiding
1059 * it by maintaining the mapping of the page to the system.
1061 * Returns amount of work completed
1063 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1065 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1066 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1067 unsigned int offset = rx_ring->rx_offset;
1068 unsigned int xdp_res, xdp_xmit = 0;
1069 struct sk_buff *skb = rx_ring->skb;
1070 struct bpf_prog *xdp_prog = NULL;
1071 struct xdp_buff xdp;
1074 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1075 #if (PAGE_SIZE < 8192)
1076 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1078 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1080 /* start the loop to process Rx packets bounded by 'budget' */
1081 while (likely(total_rx_pkts < (unsigned int)budget)) {
1082 union ice_32b_rx_flex_desc *rx_desc;
1083 struct ice_rx_buf *rx_buf;
1084 unsigned char *hard_start;
1091 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1092 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1094 /* status_error_len will always be zero for unused descriptors
1095 * because it's cleared in cleanup, and overlaps with hdr_addr
1096 * which is always zero because packet split isn't used, if the
1097 * hardware wrote DD then it will be non-zero
1099 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1100 if (!ice_test_staterr(rx_desc, stat_err_bits))
1103 /* This memory barrier is needed to keep us from reading
1104 * any other fields out of the rx_desc until we know the
1109 ice_trace(clean_rx_irq, rx_ring, rx_desc);
1110 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1111 struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1113 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1114 ctrl_vsi->vf_id != ICE_INVAL_VFID)
1115 ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1116 ice_put_rx_buf(rx_ring, NULL, 0);
1121 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1122 ICE_RX_FLX_DESC_PKT_LEN_M;
1124 /* retrieve a buffer from the ring */
1125 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1129 xdp.data_end = NULL;
1130 xdp.data_hard_start = NULL;
1131 xdp.data_meta = NULL;
1135 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1137 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1138 #if (PAGE_SIZE > 4096)
1139 /* At larger PAGE_SIZE, frame_sz depend on len size */
1140 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1143 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1147 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1150 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1151 xdp_xmit |= xdp_res;
1152 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1154 rx_buf->pagecnt_bias++;
1156 total_rx_bytes += size;
1160 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1164 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1165 } else if (likely(xdp.data)) {
1166 if (ice_ring_uses_build_skb(rx_ring))
1167 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1169 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1171 /* exit if we failed to retrieve a buffer */
1173 rx_ring->rx_stats.alloc_buf_failed++;
1175 rx_buf->pagecnt_bias++;
1179 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1182 /* skip if it is NOP desc */
1183 if (ice_is_non_eop(rx_ring, rx_desc))
1186 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1187 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1188 dev_kfree_skb_any(skb);
1192 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1193 if (ice_test_staterr(rx_desc, stat_err_bits))
1194 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1196 /* pad the skb if needed, to make a valid ethernet frame */
1197 if (eth_skb_pad(skb)) {
1202 /* probably a little skewed due to removing CRC */
1203 total_rx_bytes += skb->len;
1205 /* populate checksum, VLAN, and protocol */
1206 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1207 ICE_RX_FLEX_DESC_PTYPE_M;
1209 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1211 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1212 /* send completed skb up the stack */
1213 ice_receive_skb(rx_ring, skb, vlan_tag);
1216 /* update budget accounting */
1220 /* return up to cleaned_count buffers to hardware */
1221 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1224 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1227 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1229 /* guarantee a trip back through this routine if there was a failure */
1230 return failure ? budget : (int)total_rx_pkts;
1234 * ice_net_dim - Update net DIM algorithm
1235 * @q_vector: the vector associated with the interrupt
1237 * Create a DIM sample and notify net_dim() so that it can possibly decide
1238 * a new ITR value based on incoming packets, bytes, and interrupts.
1240 * This function is a no-op if the ring is not configured to dynamic ITR.
1242 static void ice_net_dim(struct ice_q_vector *q_vector)
1244 struct ice_ring_container *tx = &q_vector->tx;
1245 struct ice_ring_container *rx = &q_vector->rx;
1247 if (ITR_IS_DYNAMIC(tx)) {
1248 struct dim_sample dim_sample = {};
1249 u64 packets = 0, bytes = 0;
1250 struct ice_ring *ring;
1252 ice_for_each_ring(ring, q_vector->tx) {
1253 packets += ring->stats.pkts;
1254 bytes += ring->stats.bytes;
1257 dim_update_sample(q_vector->total_events, packets, bytes,
1260 net_dim(&tx->dim, dim_sample);
1263 if (ITR_IS_DYNAMIC(rx)) {
1264 struct dim_sample dim_sample = {};
1265 u64 packets = 0, bytes = 0;
1266 struct ice_ring *ring;
1268 ice_for_each_ring(ring, q_vector->rx) {
1269 packets += ring->stats.pkts;
1270 bytes += ring->stats.bytes;
1273 dim_update_sample(q_vector->total_events, packets, bytes,
1276 net_dim(&rx->dim, dim_sample);
1281 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1282 * @itr_idx: interrupt throttling index
1283 * @itr: interrupt throttling value in usecs
1285 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1287 /* The ITR value is reported in microseconds, and the register value is
1288 * recorded in 2 microsecond units. For this reason we only need to
1289 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1290 * granularity as a shift instead of division. The mask makes sure the
1291 * ITR value is never odd so we don't accidentally write into the field
1292 * prior to the ITR field.
1294 itr &= ICE_ITR_MASK;
1296 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1297 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1298 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1302 * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
1303 * @q_vector: the vector associated with the interrupt to enable
1305 * Update the net_dim() algorithm and re-enable the interrupt associated with
1308 * If the VSI is down, the interrupt will not be re-enabled.
1310 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1312 struct ice_vsi *vsi = q_vector->vsi;
1313 bool wb_en = q_vector->wb_on_itr;
1316 if (test_bit(ICE_DOWN, vsi->state))
1319 /* When exiting WB_ON_ITR, let ITR resume its normal
1320 * interrupts-enabled path.
1323 q_vector->wb_on_itr = false;
1325 /* This will do nothing if dynamic updates are not enabled. */
1326 ice_net_dim(q_vector);
1328 /* net_dim() updates ITR out-of-band using a work item */
1329 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1330 /* trigger an immediate software interrupt when exiting
1331 * busy poll, to make sure to catch any pending cleanups
1332 * that might have been missed due to interrupt state
1336 itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1337 GLINT_DYN_CTL_SW_ITR_INDX_M |
1338 GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1340 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1344 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1345 * @q_vector: q_vector to set WB_ON_ITR on
1347 * We need to tell hardware to write-back completed descriptors even when
1348 * interrupts are disabled. Descriptors will be written back on cache line
1349 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1350 * descriptors may not be written back if they don't fill a cache line until
1351 * the next interrupt.
1353 * This sets the write-back frequency to whatever was set previously for the
1354 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1355 * aren't meddling with the INTENA_M bit.
1357 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1359 struct ice_vsi *vsi = q_vector->vsi;
1361 /* already in wb_on_itr mode no need to change it */
1362 if (q_vector->wb_on_itr)
1365 /* use previously set ITR values for all of the ITR indices by
1366 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1367 * be static in non-adaptive mode (user configured)
1369 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1370 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1371 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1372 GLINT_DYN_CTL_WB_ON_ITR_M);
1374 q_vector->wb_on_itr = true;
1378 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1379 * @napi: napi struct with our devices info in it
1380 * @budget: amount of work driver is allowed to do this pass, in packets
1382 * This function will clean all queues associated with a q_vector.
1384 * Returns the amount of work done
1386 int ice_napi_poll(struct napi_struct *napi, int budget)
1388 struct ice_q_vector *q_vector =
1389 container_of(napi, struct ice_q_vector, napi);
1390 bool clean_complete = true;
1391 struct ice_ring *ring;
1392 int budget_per_ring;
1395 /* Since the actual Tx work is minimal, we can give the Tx a larger
1396 * budget and be more aggressive about cleaning up the Tx descriptors.
1398 ice_for_each_ring(ring, q_vector->tx) {
1399 bool wd = ring->xsk_pool ?
1400 ice_clean_tx_irq_zc(ring, budget) :
1401 ice_clean_tx_irq(ring, budget);
1404 clean_complete = false;
1407 /* Handle case where we are called by netpoll with a budget of 0 */
1408 if (unlikely(budget <= 0))
1411 /* normally we have 1 Rx ring per q_vector */
1412 if (unlikely(q_vector->num_ring_rx > 1))
1413 /* We attempt to distribute budget to each Rx queue fairly, but
1414 * don't allow the budget to go below 1 because that would exit
1417 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1419 /* Max of 1 Rx ring in this q_vector so give it the budget */
1420 budget_per_ring = budget;
1422 ice_for_each_ring(ring, q_vector->rx) {
1425 /* A dedicated path for zero-copy allows making a single
1426 * comparison in the irq context instead of many inside the
1427 * ice_clean_rx_irq function and makes the codebase cleaner.
1429 cleaned = ring->xsk_pool ?
1430 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1431 ice_clean_rx_irq(ring, budget_per_ring);
1432 work_done += cleaned;
1433 /* if we clean as many as budgeted, we must not be done */
1434 if (cleaned >= budget_per_ring)
1435 clean_complete = false;
1438 /* If work not completed, return budget and polling will return */
1439 if (!clean_complete) {
1440 /* Set the writeback on ITR so partial completions of
1441 * cache-lines will still continue even if we're polling.
1443 ice_set_wb_on_itr(q_vector);
1447 /* Exit the polling mode, but don't re-enable interrupts if stack might
1448 * poll us due to busy-polling
1450 if (likely(napi_complete_done(napi, work_done)))
1451 ice_update_ena_itr(q_vector);
1453 ice_set_wb_on_itr(q_vector);
1455 return min_t(int, work_done, budget - 1);
1459 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1460 * @tx_ring: the ring to be checked
1461 * @size: the size buffer we want to assure is available
1463 * Returns -EBUSY if a stop is needed, else 0
1465 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1467 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1468 /* Memory barrier before checking head and tail */
1471 /* Check again in a case another CPU has just made room available. */
1472 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1475 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1476 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1477 ++tx_ring->tx_stats.restart_q;
1482 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1483 * @tx_ring: the ring to be checked
1484 * @size: the size buffer we want to assure is available
1486 * Returns 0 if stop is not needed
1488 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1490 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1493 return __ice_maybe_stop_tx(tx_ring, size);
1497 * ice_tx_map - Build the Tx descriptor
1498 * @tx_ring: ring to send buffer on
1499 * @first: first buffer info buffer to use
1500 * @off: pointer to struct that holds offload parameters
1502 * This function loops over the skb data pointed to by *first
1503 * and gets a physical address for each memory location and programs
1504 * it and the length into the transmit descriptor.
1507 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1508 struct ice_tx_offload_params *off)
1510 u64 td_offset, td_tag, td_cmd;
1511 u16 i = tx_ring->next_to_use;
1512 unsigned int data_len, size;
1513 struct ice_tx_desc *tx_desc;
1514 struct ice_tx_buf *tx_buf;
1515 struct sk_buff *skb;
1519 td_tag = off->td_l2tag1;
1520 td_cmd = off->td_cmd;
1521 td_offset = off->td_offset;
1524 data_len = skb->data_len;
1525 size = skb_headlen(skb);
1527 tx_desc = ICE_TX_DESC(tx_ring, i);
1529 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1530 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1531 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1532 ICE_TX_FLAGS_VLAN_S;
1535 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1539 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1540 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1542 if (dma_mapping_error(tx_ring->dev, dma))
1545 /* record length, and DMA address */
1546 dma_unmap_len_set(tx_buf, len, size);
1547 dma_unmap_addr_set(tx_buf, dma, dma);
1549 /* align size to end of page */
1550 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1551 tx_desc->buf_addr = cpu_to_le64(dma);
1553 /* account for data chunks larger than the hardware
1556 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1557 tx_desc->cmd_type_offset_bsz =
1558 ice_build_ctob(td_cmd, td_offset, max_data,
1564 if (i == tx_ring->count) {
1565 tx_desc = ICE_TX_DESC(tx_ring, 0);
1572 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1573 tx_desc->buf_addr = cpu_to_le64(dma);
1576 if (likely(!data_len))
1579 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1585 if (i == tx_ring->count) {
1586 tx_desc = ICE_TX_DESC(tx_ring, 0);
1590 size = skb_frag_size(frag);
1593 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1596 tx_buf = &tx_ring->tx_buf[i];
1599 /* record bytecount for BQL */
1600 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1602 /* record SW timestamp if HW timestamp is not available */
1603 skb_tx_timestamp(first->skb);
1606 if (i == tx_ring->count)
1609 /* write last descriptor with RS and EOP bits */
1610 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1611 tx_desc->cmd_type_offset_bsz =
1612 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1614 /* Force memory writes to complete before letting h/w know there
1615 * are new descriptors to fetch.
1617 * We also use this memory barrier to make certain all of the
1618 * status bits have been updated before next_to_watch is written.
1622 /* set next_to_watch value indicating a packet is present */
1623 first->next_to_watch = tx_desc;
1625 tx_ring->next_to_use = i;
1627 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1629 /* notify HW of packet */
1630 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1631 writel(i, tx_ring->tail);
1636 /* clear DMA mappings for failed tx_buf map */
1638 tx_buf = &tx_ring->tx_buf[i];
1639 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1640 if (tx_buf == first)
1647 tx_ring->next_to_use = i;
1651 * ice_tx_csum - Enable Tx checksum offloads
1652 * @first: pointer to the first descriptor
1653 * @off: pointer to struct that holds offload parameters
1655 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1658 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1660 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1661 struct sk_buff *skb = first->skb;
1671 __be16 frag_off, protocol;
1672 unsigned char *exthdr;
1673 u32 offset, cmd = 0;
1676 if (skb->ip_summed != CHECKSUM_PARTIAL)
1679 ip.hdr = skb_network_header(skb);
1680 l4.hdr = skb_transport_header(skb);
1682 /* compute outer L2 header size */
1683 l2_len = ip.hdr - skb->data;
1684 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1686 protocol = vlan_get_protocol(skb);
1688 if (protocol == htons(ETH_P_IP))
1689 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1690 else if (protocol == htons(ETH_P_IPV6))
1691 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1693 if (skb->encapsulation) {
1694 bool gso_ena = false;
1697 /* define outer network header type */
1698 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1699 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1700 ICE_TX_CTX_EIPT_IPV4 :
1701 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1702 l4_proto = ip.v4->protocol;
1703 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1706 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1707 exthdr = ip.hdr + sizeof(*ip.v6);
1708 l4_proto = ip.v6->nexthdr;
1709 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1710 &l4_proto, &frag_off);
1715 /* define outer transport */
1718 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1719 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1722 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1723 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1727 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1728 l4.hdr = skb_inner_network_header(skb);
1731 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1734 skb_checksum_help(skb);
1738 /* compute outer L3 header size */
1739 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1740 ICE_TXD_CTX_QW0_EIPLEN_S;
1742 /* switch IP header pointer from outer to inner header */
1743 ip.hdr = skb_inner_network_header(skb);
1745 /* compute tunnel header size */
1746 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1747 ICE_TXD_CTX_QW0_NATLEN_S;
1749 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1750 /* indicate if we need to offload outer UDP header */
1751 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1752 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1753 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1755 /* record tunnel offload values */
1756 off->cd_tunnel_params |= tunnel;
1758 /* set DTYP=1 to indicate that it's an Tx context descriptor
1759 * in IPsec tunnel mode with Tx offloads in Quad word 1
1761 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1763 /* switch L4 header pointer from outer to inner */
1764 l4.hdr = skb_inner_transport_header(skb);
1767 /* reset type as we transition from outer to inner headers */
1768 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1769 if (ip.v4->version == 4)
1770 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771 if (ip.v6->version == 6)
1772 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1775 /* Enable IP checksum offloads */
1776 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1777 l4_proto = ip.v4->protocol;
1778 /* the stack computes the IP header already, the only time we
1779 * need the hardware to recompute it is in the case of TSO.
1781 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1782 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1784 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1786 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1787 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1788 exthdr = ip.hdr + sizeof(*ip.v6);
1789 l4_proto = ip.v6->nexthdr;
1790 if (l4.hdr != exthdr)
1791 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1797 /* compute inner L3 header size */
1798 l3_len = l4.hdr - ip.hdr;
1799 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1801 /* Enable L4 checksum offloads */
1804 /* enable checksum offloads */
1805 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1806 l4_len = l4.tcp->doff;
1807 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1810 /* enable UDP checksum offload */
1811 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1812 l4_len = (sizeof(struct udphdr) >> 2);
1813 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1816 /* enable SCTP checksum offload */
1817 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1818 l4_len = sizeof(struct sctphdr) >> 2;
1819 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1823 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1825 skb_checksum_help(skb);
1830 off->td_offset |= offset;
1835 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1836 * @tx_ring: ring to send buffer on
1837 * @first: pointer to struct ice_tx_buf
1839 * Checks the skb and set up correspondingly several generic transmit flags
1840 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1843 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1845 struct sk_buff *skb = first->skb;
1847 /* nothing left to do, software offloaded VLAN */
1848 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1851 /* currently, we always assume 802.1Q for VLAN insertion as VLAN
1852 * insertion for 802.1AD is not supported
1854 if (skb_vlan_tag_present(skb)) {
1855 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1856 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1859 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1863 * ice_tso - computes mss and TSO length to prepare for TSO
1864 * @first: pointer to struct ice_tx_buf
1865 * @off: pointer to struct that holds offload parameters
1867 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1870 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1872 struct sk_buff *skb = first->skb;
1883 u64 cd_mss, cd_tso_len;
1888 if (skb->ip_summed != CHECKSUM_PARTIAL)
1891 if (!skb_is_gso(skb))
1894 err = skb_cow_head(skb, 0);
1898 /* cppcheck-suppress unreadVariable */
1899 ip.hdr = skb_network_header(skb);
1900 l4.hdr = skb_transport_header(skb);
1902 /* initialize outer IP header fields */
1903 if (ip.v4->version == 4) {
1907 ip.v6->payload_len = 0;
1910 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1914 SKB_GSO_UDP_TUNNEL |
1915 SKB_GSO_UDP_TUNNEL_CSUM)) {
1916 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1917 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1920 /* determine offset of outer transport header */
1921 l4_start = (u8)(l4.hdr - skb->data);
1923 /* remove payload length from outer checksum */
1924 paylen = skb->len - l4_start;
1925 csum_replace_by_diff(&l4.udp->check,
1926 (__force __wsum)htonl(paylen));
1929 /* reset pointers to inner headers */
1931 /* cppcheck-suppress unreadVariable */
1932 ip.hdr = skb_inner_network_header(skb);
1933 l4.hdr = skb_inner_transport_header(skb);
1935 /* initialize inner IP header fields */
1936 if (ip.v4->version == 4) {
1940 ip.v6->payload_len = 0;
1944 /* determine offset of transport header */
1945 l4_start = (u8)(l4.hdr - skb->data);
1947 /* remove payload length from checksum */
1948 paylen = skb->len - l4_start;
1950 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1951 csum_replace_by_diff(&l4.udp->check,
1952 (__force __wsum)htonl(paylen));
1953 /* compute length of UDP segmentation header */
1954 off->header_len = (u8)sizeof(l4.udp) + l4_start;
1956 csum_replace_by_diff(&l4.tcp->check,
1957 (__force __wsum)htonl(paylen));
1958 /* compute length of TCP segmentation header */
1959 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1962 /* update gso_segs and bytecount */
1963 first->gso_segs = skb_shinfo(skb)->gso_segs;
1964 first->bytecount += (first->gso_segs - 1) * off->header_len;
1966 cd_tso_len = skb->len - off->header_len;
1967 cd_mss = skb_shinfo(skb)->gso_size;
1969 /* record cdesc_qw1 with TSO parameters */
1970 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1971 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1972 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1973 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1974 first->tx_flags |= ICE_TX_FLAGS_TSO;
1979 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1980 * @size: transmit request size in bytes
1982 * Due to hardware alignment restrictions (4K alignment), we need to
1983 * assume that we can have no more than 12K of data per descriptor, even
1984 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1985 * Thus, we need to divide by 12K. But division is slow! Instead,
1986 * we decompose the operation into shifts and one relatively cheap
1987 * multiply operation.
1989 * To divide by 12K, we first divide by 4K, then divide by 3:
1990 * To divide by 4K, shift right by 12 bits
1991 * To divide by 3, multiply by 85, then divide by 256
1992 * (Divide by 256 is done by shifting right by 8 bits)
1993 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1994 * 3, we'll underestimate near each multiple of 12K. This is actually more
1995 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1996 * segment. For our purposes this is accurate out to 1M which is orders of
1997 * magnitude greater than our largest possible GSO size.
1999 * This would then be implemented as:
2000 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2002 * Since multiplication and division are commutative, we can reorder
2004 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2006 static unsigned int ice_txd_use_count(unsigned int size)
2008 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2012 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2015 * Returns number of data descriptors needed for this skb.
2017 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2019 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2020 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2021 unsigned int count = 0, size = skb_headlen(skb);
2024 count += ice_txd_use_count(size);
2029 size = skb_frag_size(frag++);
2036 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2039 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2040 * and so we need to figure out the cases where we need to linearize the skb.
2042 * For TSO we need to count the TSO header and segment payload separately.
2043 * As such we need to check cases where we have 7 fragments or more as we
2044 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2045 * the segment payload in the first descriptor, and another 7 for the
2048 static bool __ice_chk_linearize(struct sk_buff *skb)
2050 const skb_frag_t *frag, *stale;
2053 /* no need to check if number of frags is less than 7 */
2054 nr_frags = skb_shinfo(skb)->nr_frags;
2055 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2058 /* We need to walk through the list and validate that each group
2059 * of 6 fragments totals at least gso_size.
2061 nr_frags -= ICE_MAX_BUF_TXD - 2;
2062 frag = &skb_shinfo(skb)->frags[0];
2064 /* Initialize size to the negative value of gso_size minus 1. We
2065 * use this as the worst case scenario in which the frag ahead
2066 * of us only provides one byte which is why we are limited to 6
2067 * descriptors for a single transmit as the header and previous
2068 * fragment are already consuming 2 descriptors.
2070 sum = 1 - skb_shinfo(skb)->gso_size;
2072 /* Add size of frags 0 through 4 to create our initial sum */
2073 sum += skb_frag_size(frag++);
2074 sum += skb_frag_size(frag++);
2075 sum += skb_frag_size(frag++);
2076 sum += skb_frag_size(frag++);
2077 sum += skb_frag_size(frag++);
2079 /* Walk through fragments adding latest fragment, testing it, and
2080 * then removing stale fragments from the sum.
2082 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2083 int stale_size = skb_frag_size(stale);
2085 sum += skb_frag_size(frag++);
2087 /* The stale fragment may present us with a smaller
2088 * descriptor than the actual fragment size. To account
2089 * for that we need to remove all the data on the front and
2090 * figure out what the remainder would be in the last
2091 * descriptor associated with the fragment.
2093 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2094 int align_pad = -(skb_frag_off(stale)) &
2095 (ICE_MAX_READ_REQ_SIZE - 1);
2098 stale_size -= align_pad;
2101 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2102 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2103 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2106 /* if sum is negative we failed to make sufficient progress */
2120 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2122 * @count: number of buffers used
2124 * Note: Our HW can't scatter-gather more than 8 fragments to build
2125 * a packet on the wire and so we need to figure out the cases where we
2126 * need to linearize the skb.
2128 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2130 /* Both TSO and single send will work if count is less than 8 */
2131 if (likely(count < ICE_MAX_BUF_TXD))
2134 if (skb_is_gso(skb))
2135 return __ice_chk_linearize(skb);
2137 /* we can support up to 8 data buffers for a single send */
2138 return count != ICE_MAX_BUF_TXD;
2142 * ice_tstamp - set up context descriptor for hardware timestamp
2143 * @tx_ring: pointer to the Tx ring to send buffer on
2144 * @skb: pointer to the SKB we're sending
2146 * @off: Tx offload parameters
2149 ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
2150 struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2154 /* only timestamp the outbound packet if the user has requested it */
2155 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2158 if (!tx_ring->ptp_tx)
2161 /* Tx timestamps cannot be sampled when doing TSO */
2162 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2165 /* Grab an open timestamp slot */
2166 idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2170 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2171 (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2172 ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2173 first->tx_flags |= ICE_TX_FLAGS_TSYN;
2177 * ice_xmit_frame_ring - Sends buffer on Tx ring
2179 * @tx_ring: ring to send buffer on
2181 * Returns NETDEV_TX_OK if sent, else an error code
2184 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2186 struct ice_tx_offload_params offload = { 0 };
2187 struct ice_vsi *vsi = tx_ring->vsi;
2188 struct ice_tx_buf *first;
2193 ice_trace(xmit_frame_ring, tx_ring, skb);
2195 count = ice_xmit_desc_count(skb);
2196 if (ice_chk_linearize(skb, count)) {
2197 if (__skb_linearize(skb))
2199 count = ice_txd_use_count(skb->len);
2200 tx_ring->tx_stats.tx_linearize++;
2203 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2204 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2205 * + 4 desc gap to avoid the cache line where head is,
2206 * + 1 desc for context descriptor,
2207 * otherwise try next time
2209 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2210 ICE_DESCS_FOR_CTX_DESC)) {
2211 tx_ring->tx_stats.tx_busy++;
2212 return NETDEV_TX_BUSY;
2215 offload.tx_ring = tx_ring;
2217 /* record the location of the first descriptor for this packet */
2218 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2220 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2221 first->gso_segs = 1;
2222 first->tx_flags = 0;
2224 /* prepare the VLAN tagging flags for Tx */
2225 ice_tx_prepare_vlan_flags(tx_ring, first);
2227 /* set up TSO offload */
2228 tso = ice_tso(first, &offload);
2232 /* always set up Tx checksum offload */
2233 csum = ice_tx_csum(first, &offload);
2237 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2238 eth = (struct ethhdr *)skb_mac_header(skb);
2239 if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2240 eth->h_proto == htons(ETH_P_LLDP)) &&
2241 vsi->type == ICE_VSI_PF &&
2242 vsi->port_info->qos_cfg.is_sw_lldp))
2243 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2244 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2245 ICE_TXD_CTX_QW1_CMD_S);
2247 ice_tstamp(tx_ring, skb, first, &offload);
2249 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2250 struct ice_tx_ctx_desc *cdesc;
2251 u16 i = tx_ring->next_to_use;
2253 /* grab the next descriptor */
2254 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2256 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2258 /* setup context descriptor */
2259 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2260 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2261 cdesc->rsvd = cpu_to_le16(0);
2262 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2265 ice_tx_map(tx_ring, first, &offload);
2266 return NETDEV_TX_OK;
2269 ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2270 dev_kfree_skb_any(skb);
2271 return NETDEV_TX_OK;
2275 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2277 * @netdev: network interface device structure
2279 * Returns NETDEV_TX_OK if sent, else an error code
2281 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2283 struct ice_netdev_priv *np = netdev_priv(netdev);
2284 struct ice_vsi *vsi = np->vsi;
2285 struct ice_ring *tx_ring;
2287 tx_ring = vsi->tx_rings[skb->queue_mapping];
2289 /* hardware can't handle really short frames, hardware padding works
2292 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2293 return NETDEV_TX_OK;
2295 return ice_xmit_frame_ring(skb, tx_ring);
2299 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2300 * @tx_ring: tx_ring to clean
2302 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2304 struct ice_vsi *vsi = tx_ring->vsi;
2305 s16 i = tx_ring->next_to_clean;
2306 int budget = ICE_DFLT_IRQ_WORK;
2307 struct ice_tx_desc *tx_desc;
2308 struct ice_tx_buf *tx_buf;
2310 tx_buf = &tx_ring->tx_buf[i];
2311 tx_desc = ICE_TX_DESC(tx_ring, i);
2312 i -= tx_ring->count;
2315 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2317 /* if next_to_watch is not set then there is no pending work */
2321 /* prevent any other reads prior to eop_desc */
2324 /* if the descriptor isn't done, no work to do */
2325 if (!(eop_desc->cmd_type_offset_bsz &
2326 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2329 /* clear next_to_watch to prevent false hangs */
2330 tx_buf->next_to_watch = NULL;
2331 tx_desc->buf_addr = 0;
2332 tx_desc->cmd_type_offset_bsz = 0;
2334 /* move past filter desc */
2339 i -= tx_ring->count;
2340 tx_buf = tx_ring->tx_buf;
2341 tx_desc = ICE_TX_DESC(tx_ring, 0);
2344 /* unmap the data header */
2345 if (dma_unmap_len(tx_buf, len))
2346 dma_unmap_single(tx_ring->dev,
2347 dma_unmap_addr(tx_buf, dma),
2348 dma_unmap_len(tx_buf, len),
2350 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2351 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2353 /* clear next_to_watch to prevent false hangs */
2354 tx_buf->raw_buf = NULL;
2355 tx_buf->tx_flags = 0;
2356 tx_buf->next_to_watch = NULL;
2357 dma_unmap_len_set(tx_buf, len, 0);
2358 tx_desc->buf_addr = 0;
2359 tx_desc->cmd_type_offset_bsz = 0;
2361 /* move past eop_desc for start of next FD desc */
2366 i -= tx_ring->count;
2367 tx_buf = tx_ring->tx_buf;
2368 tx_desc = ICE_TX_DESC(tx_ring, 0);
2372 } while (likely(budget));
2374 i += tx_ring->count;
2375 tx_ring->next_to_clean = i;
2377 /* re-enable interrupt if needed */
2378 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);