1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2021 Google, Inc.
8 #include "gve_adminq.h"
11 #include <linux/tcp.h>
12 #include <linux/vmalloc.h>
13 #include <linux/skbuff.h>
15 static inline void gve_tx_put_doorbell(struct gve_priv *priv,
16 struct gve_queue_resources *q_resources,
19 iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
22 /* gvnic can only transmit from a Registered Segment.
23 * We copy skb payloads into the registered segment before writing Tx
24 * descriptors and ringing the Tx doorbell.
26 * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
27 * free allocations in the order they were allocated.
30 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo)
32 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP,
34 if (unlikely(!fifo->base)) {
35 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n",
40 fifo->size = fifo->qpl->num_entries * PAGE_SIZE;
41 atomic_set(&fifo->available, fifo->size);
46 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo)
48 WARN(atomic_read(&fifo->available) != fifo->size,
49 "Releasing non-empty fifo");
54 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo,
57 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head;
60 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes)
62 return (atomic_read(&fifo->available) <= bytes) ? false : true;
65 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
66 * @fifo: FIFO to allocate from
67 * @bytes: Allocation size
68 * @iov: Scatter-gather elements to fill with allocation fragment base/len
70 * Returns number of valid elements in iov[] or negative on error.
72 * Allocations from a given FIFO must be externally synchronized but concurrent
73 * allocation and frees are allowed.
75 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes,
76 struct gve_tx_iovec iov[2])
78 size_t overflow, padding;
85 /* This check happens before we know how much padding is needed to
86 * align to a cacheline boundary for the payload, but that is fine,
87 * because the FIFO head always start aligned, and the FIFO's boundaries
88 * are aligned, so if there is space for the data, there is space for
89 * the padding to the next alignment.
91 WARN(!gve_tx_fifo_can_alloc(fifo, bytes),
92 "Reached %s when there's not enough space in the fifo", __func__);
96 iov[0].iov_offset = fifo->head;
97 iov[0].iov_len = bytes;
100 if (fifo->head > fifo->size) {
101 /* If the allocation did not fit in the tail fragment of the
102 * FIFO, also use the head fragment.
105 overflow = fifo->head - fifo->size;
106 iov[0].iov_len -= overflow;
107 iov[1].iov_offset = 0; /* Start of fifo*/
108 iov[1].iov_len = overflow;
110 fifo->head = overflow;
113 /* Re-align to a cacheline boundary */
114 aligned_head = L1_CACHE_ALIGN(fifo->head);
115 padding = aligned_head - fifo->head;
116 iov[nfrags - 1].iov_padding = padding;
117 atomic_sub(bytes + padding, &fifo->available);
118 fifo->head = aligned_head;
120 if (fifo->head == fifo->size)
126 /* gve_tx_free_fifo - Return space to Tx FIFO
127 * @fifo: FIFO to return fragments to
128 * @bytes: Bytes to free
130 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
132 atomic_add(bytes, &fifo->available);
135 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
136 u32 to_do, bool try_to_wake);
138 static void gve_tx_free_ring(struct gve_priv *priv, int idx)
140 struct gve_tx_ring *tx = &priv->tx[idx];
141 struct device *hdev = &priv->pdev->dev;
145 gve_tx_remove_from_block(priv, idx);
146 slots = tx->mask + 1;
147 gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
148 netdev_tx_reset_queue(tx->netdev_txq);
150 dma_free_coherent(hdev, sizeof(*tx->q_resources),
151 tx->q_resources, tx->q_resources_bus);
152 tx->q_resources = NULL;
154 if (!tx->raw_addressing) {
155 gve_tx_fifo_release(priv, &tx->tx_fifo);
156 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
157 tx->tx_fifo.qpl = NULL;
160 bytes = sizeof(*tx->desc) * slots;
161 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
167 netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
170 static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
172 struct gve_tx_ring *tx = &priv->tx[idx];
173 struct device *hdev = &priv->pdev->dev;
174 u32 slots = priv->tx_desc_cnt;
177 /* Make sure everything is zeroed to start */
178 memset(tx, 0, sizeof(*tx));
179 spin_lock_init(&tx->clean_lock);
182 tx->mask = slots - 1;
185 tx->info = vzalloc(sizeof(*tx->info) * slots);
190 bytes = sizeof(*tx->desc) * slots;
191 tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
193 goto abort_with_info;
195 tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
196 tx->dev = &priv->pdev->dev;
197 if (!tx->raw_addressing) {
198 tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
199 if (!tx->tx_fifo.qpl)
200 goto abort_with_desc;
202 if (gve_tx_fifo_init(priv, &tx->tx_fifo))
207 dma_alloc_coherent(hdev,
208 sizeof(*tx->q_resources),
209 &tx->q_resources_bus,
211 if (!tx->q_resources)
212 goto abort_with_fifo;
214 netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
215 (unsigned long)tx->bus);
216 tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
217 gve_tx_add_to_block(priv, idx);
222 if (!tx->raw_addressing)
223 gve_tx_fifo_release(priv, &tx->tx_fifo);
225 if (!tx->raw_addressing)
226 gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
228 dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
236 int gve_tx_alloc_rings(struct gve_priv *priv)
241 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
242 err = gve_tx_alloc_ring(priv, i);
244 netif_err(priv, drv, priv->dev,
245 "Failed to alloc tx ring=%d: err=%d\n",
250 /* Unallocate if there was an error */
254 for (j = 0; j < i; j++)
255 gve_tx_free_ring(priv, j);
260 void gve_tx_free_rings_gqi(struct gve_priv *priv)
264 for (i = 0; i < priv->tx_cfg.num_queues; i++)
265 gve_tx_free_ring(priv, i);
268 /* gve_tx_avail - Calculates the number of slots available in the ring
269 * @tx: tx ring to check
271 * Returns the number of slots available
273 * The capacity of the queue is mask + 1. We don't need to reserve an entry.
275 static inline u32 gve_tx_avail(struct gve_tx_ring *tx)
277 return tx->mask + 1 - (tx->req - tx->done);
280 static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
283 int pad_bytes, align_hdr_pad;
287 hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
288 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
290 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
292 /* We need to take into account the header alignment padding. */
293 align_hdr_pad = L1_CACHE_ALIGN(hlen) - hlen;
294 bytes = align_hdr_pad + pad_bytes + skb->len;
299 /* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
300 * 1 for each skb frag
301 * 1 for the skb linear portion
302 * 1 for when tcp hdr needs to be in separate descriptor
303 * 1 if the payload wraps to the beginning of the FIFO
304 * 1 for metadata descriptor
306 #define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
307 static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
310 dma_unmap_single(dev, dma_unmap_addr(info, dma),
311 dma_unmap_len(info, len),
313 dma_unmap_len_set(info, len, 0);
315 dma_unmap_page(dev, dma_unmap_addr(info, dma),
316 dma_unmap_len(info, len),
318 dma_unmap_len_set(info, len, 0);
322 /* Check if sufficient resources (descriptor ring space, FIFO space) are
323 * available to transmit the given number of bytes.
325 static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
327 bool can_alloc = true;
329 if (!tx->raw_addressing)
330 can_alloc = gve_tx_fifo_can_alloc(&tx->tx_fifo, bytes_required);
332 return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
335 static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
337 /* Stops the queue if the skb cannot be transmitted. */
338 static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
341 int bytes_required = 0;
346 if (!tx->raw_addressing)
347 bytes_required = gve_skb_fifo_bytes_required(tx, skb);
349 if (likely(gve_can_tx(tx, bytes_required)))
353 spin_lock(&tx->clean_lock);
354 nic_done = gve_tx_load_event_counter(priv, tx);
355 to_do = nic_done - tx->done;
357 /* Only try to clean if there is hope for TX */
358 if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
360 to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
361 gve_clean_tx_done(priv, tx, to_do, false);
363 if (likely(gve_can_tx(tx, bytes_required)))
367 /* No space, so stop the queue */
369 netif_tx_stop_queue(tx->netdev_txq);
371 spin_unlock(&tx->clean_lock);
376 static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
377 struct sk_buff *skb, bool is_gso,
378 int l4_hdr_offset, u32 desc_cnt,
381 /* l4_hdr_offset and csum_offset are in units of 16-bit words */
383 pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
384 pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
385 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
386 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
387 pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
388 pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
389 pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
391 pkt_desc->pkt.type_flags = GVE_TXD_STD;
392 pkt_desc->pkt.l4_csum_offset = 0;
393 pkt_desc->pkt.l4_hdr_offset = 0;
395 pkt_desc->pkt.desc_cnt = desc_cnt;
396 pkt_desc->pkt.len = cpu_to_be16(skb->len);
397 pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
398 pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
401 static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
404 BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
406 mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
407 mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
408 GVE_MTD_PATH_HASH_L4;
409 mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
410 mtd_desc->mtd.reserved0 = 0;
411 mtd_desc->mtd.reserved1 = 0;
414 static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
415 struct sk_buff *skb, bool is_gso,
418 seg_desc->seg.type_flags = GVE_TXD_SEG;
420 if (skb_is_gso_v6(skb))
421 seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
422 seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
423 seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
425 seg_desc->seg.seg_len = cpu_to_be16(len);
426 seg_desc->seg.seg_addr = cpu_to_be64(addr);
429 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
430 u64 iov_offset, u64 iov_len)
432 u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE;
433 u64 first_page = iov_offset / PAGE_SIZE;
436 for (page = first_page; page <= last_page; page++)
437 dma_sync_single_for_device(dev, page_buses[page], PAGE_SIZE, DMA_TO_DEVICE);
440 static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, struct sk_buff *skb)
442 int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
443 union gve_tx_desc *pkt_desc, *seg_desc;
444 struct gve_tx_buffer_state *info;
445 int mtd_desc_nr = !!skb->l4_hash;
446 bool is_gso = skb_is_gso(skb);
447 u32 idx = tx->req & tx->mask;
453 info = &tx->info[idx];
454 pkt_desc = &tx->desc[idx];
456 l4_hdr_offset = skb_checksum_start_offset(skb);
457 /* If the skb is gso, then we want the tcp header alone in the first segment
458 * otherwise we want the minimum required by the gVNIC spec.
460 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
461 min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
464 /* We don't want to split the header, so if necessary, pad to the end
465 * of the fifo and then put the header at the beginning of the fifo.
467 pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, hlen);
468 hdr_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, hlen + pad_bytes,
470 WARN(!hdr_nfrags, "hdr_nfrags should never be 0!");
471 payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
472 &info->iov[payload_iov]);
474 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
475 1 + mtd_desc_nr + payload_nfrags, hlen,
476 info->iov[hdr_nfrags - 1].iov_offset);
478 skb_copy_bits(skb, 0,
479 tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
481 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
482 info->iov[hdr_nfrags - 1].iov_offset,
483 info->iov[hdr_nfrags - 1].iov_len);
487 next_idx = (tx->req + 1) & tx->mask;
488 gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
491 for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
492 next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
493 seg_desc = &tx->desc[next_idx];
495 gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
496 info->iov[i].iov_len,
497 info->iov[i].iov_offset);
499 skb_copy_bits(skb, copy_offset,
500 tx->tx_fifo.base + info->iov[i].iov_offset,
501 info->iov[i].iov_len);
502 gve_dma_sync_for_device(&priv->pdev->dev, tx->tx_fifo.qpl->page_buses,
503 info->iov[i].iov_offset,
504 info->iov[i].iov_len);
505 copy_offset += info->iov[i].iov_len;
508 return 1 + mtd_desc_nr + payload_nfrags;
511 static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
514 const struct skb_shared_info *shinfo = skb_shinfo(skb);
515 int hlen, num_descriptors, l4_hdr_offset;
516 union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
517 struct gve_tx_buffer_state *info;
518 int mtd_desc_nr = !!skb->l4_hash;
519 bool is_gso = skb_is_gso(skb);
520 u32 idx = tx->req & tx->mask;
525 info = &tx->info[idx];
526 pkt_desc = &tx->desc[idx];
528 l4_hdr_offset = skb_checksum_start_offset(skb);
529 /* If the skb is gso, then we want only up to the tcp header in the first segment
530 * to efficiently replicate on each segment otherwise we want the linear portion
531 * of the skb (which will contain the checksum because skb->csum_start and
532 * skb->csum_offset are given relative to skb->head) in the first segment.
534 hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
535 len = skb_headlen(skb);
539 addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
540 if (unlikely(dma_mapping_error(tx->dev, addr))) {
541 tx->dma_mapping_error++;
544 dma_unmap_len_set(info, len, len);
545 dma_unmap_addr_set(info, dma, addr);
547 num_descriptors = 1 + shinfo->nr_frags;
553 gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
554 num_descriptors, hlen, addr);
557 idx = (idx + 1) & tx->mask;
558 mtd_desc = &tx->desc[idx];
559 gve_tx_fill_mtd_desc(mtd_desc, skb);
563 /* For gso the rest of the linear portion of the skb needs to
564 * be in its own descriptor.
568 idx = (idx + 1) & tx->mask;
569 seg_desc = &tx->desc[idx];
570 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
573 for (i = 0; i < shinfo->nr_frags; i++) {
574 const skb_frag_t *frag = &shinfo->frags[i];
576 idx = (idx + 1) & tx->mask;
577 seg_desc = &tx->desc[idx];
578 len = skb_frag_size(frag);
579 addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
580 if (unlikely(dma_mapping_error(tx->dev, addr))) {
581 tx->dma_mapping_error++;
584 tx->info[idx].skb = NULL;
585 dma_unmap_len_set(&tx->info[idx], len, len);
586 dma_unmap_addr_set(&tx->info[idx], dma, addr);
588 gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
591 return num_descriptors;
594 i += num_descriptors - shinfo->nr_frags;
596 /* Skip metadata descriptor, if set */
597 if (i == 1 && mtd_desc_nr == 1)
600 gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
607 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
609 struct gve_priv *priv = netdev_priv(dev);
610 struct gve_tx_ring *tx;
613 WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
614 "skb queue index out of range");
615 tx = &priv->tx[skb_get_queue_mapping(skb)];
616 if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
617 /* We need to ring the txq doorbell -- we have stopped the Tx
618 * queue for want of resources, but prior calls to gve_tx()
619 * may have added descriptors without ringing the doorbell.
622 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
623 return NETDEV_TX_BUSY;
625 if (tx->raw_addressing)
626 nsegs = gve_tx_add_skb_no_copy(priv, tx, skb);
628 nsegs = gve_tx_add_skb_copy(priv, tx, skb);
630 /* If the packet is getting sent, we need to update the skb */
632 netdev_tx_sent_queue(tx->netdev_txq, skb->len);
633 skb_tx_timestamp(skb);
636 dev_kfree_skb_any(skb);
639 if (!netif_xmit_stopped(tx->netdev_txq) && netdev_xmit_more())
642 /* Give packets to NIC. Even if this packet failed to send the doorbell
643 * might need to be rung because of xmit_more.
645 gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
649 #define GVE_TX_START_THRESH PAGE_SIZE
651 static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
652 u32 to_do, bool try_to_wake)
654 struct gve_tx_buffer_state *info;
655 u64 pkts = 0, bytes = 0;
656 size_t space_freed = 0;
661 for (j = 0; j < to_do; j++) {
662 idx = tx->done & tx->mask;
663 netif_info(priv, tx_done, priv->dev,
664 "[%d] %s: idx=%d (req=%u done=%u)\n",
665 tx->q_num, __func__, idx, tx->req, tx->done);
666 info = &tx->info[idx];
669 /* Unmap the buffer */
670 if (tx->raw_addressing)
671 gve_tx_unmap_buf(tx->dev, info);
678 dev_consume_skb_any(skb);
679 if (tx->raw_addressing)
682 for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
683 space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
684 info->iov[i].iov_len = 0;
685 info->iov[i].iov_padding = 0;
690 if (!tx->raw_addressing)
691 gve_tx_free_fifo(&tx->tx_fifo, space_freed);
692 u64_stats_update_begin(&tx->statss);
693 tx->bytes_done += bytes;
694 tx->pkt_done += pkts;
695 u64_stats_update_end(&tx->statss);
696 netdev_tx_completed_queue(tx->netdev_txq, pkts, bytes);
698 /* start the queue if we've stopped it */
700 /* Make sure that the doorbells are synced */
703 if (try_to_wake && netif_tx_queue_stopped(tx->netdev_txq) &&
704 likely(gve_can_tx(tx, GVE_TX_START_THRESH))) {
706 netif_tx_wake_queue(tx->netdev_txq);
712 u32 gve_tx_load_event_counter(struct gve_priv *priv,
713 struct gve_tx_ring *tx)
715 u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
716 __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
718 return be32_to_cpu(counter);
721 bool gve_tx_poll(struct gve_notify_block *block, int budget)
723 struct gve_priv *priv = block->priv;
724 struct gve_tx_ring *tx = block->tx;
728 /* If budget is 0, do all the work */
732 /* In TX path, it may try to clean completed pkts in order to xmit,
733 * to avoid cleaning conflict, use spin_lock(), it yields better
734 * concurrency between xmit/clean than netif's lock.
736 spin_lock(&tx->clean_lock);
737 /* Find out how much work there is to be done */
738 nic_done = gve_tx_load_event_counter(priv, tx);
739 to_do = min_t(u32, (nic_done - tx->done), budget);
740 gve_clean_tx_done(priv, tx, to_do, true);
741 spin_unlock(&tx->clean_lock);
742 /* If we still have work we want to repoll */
743 return nic_done != tx->done;
746 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
748 u32 nic_done = gve_tx_load_event_counter(priv, tx);
750 return nic_done != tx->done;