1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 #include <linux/bpf_trace.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/filter.h>
9 #include <linux/skbuff.h>
10 #include "funeth_txrx.h"
12 #include "fun_queue.h"
14 #define CREATE_TRACE_POINTS
15 #include "funeth_trace.h"
17 /* Given the device's max supported MTU and pages of at least 4KB a packet can
18 * be scattered into at most 4 buffers.
20 #define RX_MAX_FRAGS 4
22 /* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
23 #define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
25 /* We try to reuse pages for our buffers. To avoid frequent page ref writes we
26 * take EXTRA_PAGE_REFS references at once and then hand them out one per packet
27 * occupying the buffer.
29 #define EXTRA_PAGE_REFS 1000000
30 #define MIN_PAGE_REFS 1000
33 FUN_XDP_FLUSH_REDIR = 1,
37 /* See if a page is running low on refs we are holding and if so take more. */
38 static void refresh_refs(struct funeth_rxbuf *buf)
40 if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) {
41 buf->pg_refs += EXTRA_PAGE_REFS;
42 page_ref_add(buf->page, EXTRA_PAGE_REFS);
46 /* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
47 * page is worth retaining and there's room for it. Otherwise the page is
48 * unmapped and our references released.
50 static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
52 struct funeth_rx_cache *c = &q->cache;
54 if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
55 c->bufs[c->prod_cnt & c->mask] = *buf;
58 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
59 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
60 __page_frag_cache_drain(buf->page, buf->pg_refs);
64 /* Get a page from the Rx buffer cache. We only consider the next available
65 * page and return it if we own all its references.
67 static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
69 struct funeth_rx_cache *c = &q->cache;
70 struct funeth_rxbuf *buf;
72 if (c->prod_cnt == c->cons_cnt)
73 return false; /* empty cache */
75 buf = &c->bufs[c->cons_cnt & c->mask];
76 if (page_ref_count(buf->page) == buf->pg_refs) {
77 dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
78 PAGE_SIZE, DMA_FROM_DEVICE);
86 /* Page can't be reused. If the cache is full drop this page. */
87 if (c->prod_cnt - c->cons_cnt > c->mask) {
88 dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
89 DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
90 __page_frag_cache_drain(buf->page, buf->pg_refs);
97 /* Allocate and DMA-map a page for receive. */
98 static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
103 if (cache_get(q, rb))
106 p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
110 rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
112 if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
113 FUN_QSTAT_INC(q, rx_map_err);
118 FUN_QSTAT_INC(q, rx_page_alloc);
123 rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
127 static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
130 dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
132 __page_frag_cache_drain(rb->page, rb->pg_refs);
137 /* Run the XDP program assigned to an Rx queue.
138 * Return %NULL if the buffer is consumed, or the virtual address of the packet
139 * to turn into an skb.
141 static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
142 int ref_ok, struct funeth_txq *xdp_q)
144 struct bpf_prog *xdp_prog;
148 /* VA includes the headroom, frag size includes headroom + tailroom */
149 xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN),
151 xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) -
152 (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false);
154 xdp_prog = READ_ONCE(q->xdp_prog);
155 act = bpf_prog_run_xdp(xdp_prog, &xdp);
159 /* remove headroom, which may not be FUN_XDP_HEADROOM now */
160 skb_frag_size_set(frags, xdp.data_end - xdp.data);
161 skb_frag_off_add(frags, xdp.data - xdp.data_hard_start);
164 if (unlikely(!ref_ok))
166 if (!fun_xdp_tx(xdp_q, xdp.data, xdp.data_end - xdp.data))
168 FUN_QSTAT_INC(q, xdp_tx);
169 q->xdp_flush |= FUN_XDP_FLUSH_TX;
172 if (unlikely(!ref_ok))
174 if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
176 FUN_QSTAT_INC(q, xdp_redir);
177 q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
180 bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
183 trace_xdp_exception(q->netdev, xdp_prog, act);
185 q->cur_buf->pg_refs++; /* return frags' page reference */
186 FUN_QSTAT_INC(q, xdp_err);
189 q->cur_buf->pg_refs++;
190 FUN_QSTAT_INC(q, xdp_drops);
199 /* A CQE contains a fixed completion structure along with optional metadata and
200 * even packet data. Given the start address of a CQE return the start of the
201 * contained fixed structure, which lies at the end.
203 static const void *cqe_to_info(const void *cqe)
205 return cqe + FUNETH_CQE_INFO_OFFSET;
208 /* The inverse of cqe_to_info(). */
209 static const void *info_to_cqe(const void *cqe_info)
211 return cqe_info - FUNETH_CQE_INFO_OFFSET;
214 /* Return the type of hash provided by the device based on the L3 and L4
215 * protocols it parsed for the packet.
217 static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
219 static const enum pkt_hash_types htype_map[] = {
220 PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
221 PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4,
222 PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
223 PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3
227 /* Build the key from the TCP/UDP and IP/IPv6 bits */
228 key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) |
229 ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1);
231 return htype_map[key];
234 /* Each received packet can be scattered across several Rx buffers or can
235 * share a buffer with previously received packets depending on the buffer
236 * and packet sizes and the room available in the most recently used buffer.
239 * - If the buffer at the head of an RQ has not been used it gets (part of) the
240 * next incoming packet.
241 * - Otherwise, if the packet fully fits in the buffer's remaining space the
242 * packet is written there.
243 * - Otherwise, the packet goes into the next Rx buffer.
245 * This function returns the Rx buffer for a packet or fragment thereof of the
246 * given length. If it isn't @buf it either recycles or frees that buffer
247 * before advancing the queue to the next buffer.
249 * If called repeatedly with the remaining length of a packet it will walk
250 * through all the buffers containing the packet.
252 static struct funeth_rxbuf *
253 get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
255 if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
256 return buf; /* @buf holds (part of) the packet */
258 /* The packet occupies part of the next buffer. Move there after
259 * replenishing the current buffer slot either with the spare page or
260 * by reusing the slot's existing page. Note that if a spare page isn't
261 * available and the current packet occupies @buf it is a multi-frag
262 * packet that will be dropped leaving @buf available for reuse.
264 if ((page_ref_count(buf->page) == buf->pg_refs &&
265 buf->node == numa_mem_id()) || !q->spare_buf.page) {
266 dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
267 PAGE_SIZE, DMA_FROM_DEVICE);
272 q->spare_buf.page = NULL;
273 q->rqes[q->rq_cons & q->rq_mask] =
274 FUN_EPRQ_RQBUF_INIT(buf->dma_addr);
278 return &q->bufs[q->rq_cons & q->rq_mask];
281 /* Gather the page fragments making up the first Rx packet on @q. Its total
282 * length @tot_len includes optional head- and tail-rooms.
284 * Return 0 if the device retains ownership of at least some of the pages.
285 * In this case the caller may only copy the packet.
287 * A non-zero return value gives the caller permission to use references to the
288 * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
289 * one of the pages is PF_MEMALLOC.
291 * Regardless of outcome the caller is granted a reference to each of the pages.
293 static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
296 struct funeth_rxbuf *buf = q->cur_buf;
297 unsigned int frag_len;
301 buf = get_buf(q, buf, tot_len);
303 /* We always keep the RQ full of buffers so before we can give
304 * one of our pages to the stack we require that we can obtain
305 * a replacement page. If we can't the packet will either be
306 * copied or dropped so we can retain ownership of the page and
309 if (!q->spare_buf.page &&
310 funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
311 GFP_ATOMIC | __GFP_MEMALLOC))
314 frag_len = min_t(unsigned int, tot_len,
315 PAGE_SIZE - q->buf_offset);
316 dma_sync_single_for_cpu(q->dma_dev,
317 buf->dma_addr + q->buf_offset,
318 frag_len, DMA_FROM_DEVICE);
323 __skb_frag_set_page(frags, buf->page);
324 skb_frag_off_set(frags, q->buf_offset);
325 skb_frag_size_set(frags++, frag_len);
331 q->buf_offset = PAGE_SIZE;
333 q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
338 static bool rx_hwtstamp_enabled(const struct net_device *dev)
340 const struct funeth_priv *d = netdev_priv(dev);
342 return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL;
345 /* Advance the CQ pointers and phase tag to the next CQE. */
346 static void advance_cq(struct funeth_rxq *q)
348 if (unlikely(q->cq_head == q->cq_mask)) {
351 q->next_cqe_info = cqe_to_info(q->cqes);
354 q->next_cqe_info += FUNETH_CQE_SIZE;
356 prefetch(q->next_cqe_info);
359 /* Process the packet represented by the head CQE of @q. Gather the packet's
360 * fragments, run it through the optional XDP program, and if needed construct
361 * an skb and pass it to the stack.
363 static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
365 const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
366 unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len);
367 struct net_device *ndev = q->netdev;
368 skb_frag_t frags[RX_MAX_FRAGS];
369 struct skb_shared_info *si;
370 unsigned int headroom;
371 gro_result_t gro_res;
377 u64_stats_update_begin(&q->syncp);
379 q->stats.rx_bytes += pkt_len;
380 u64_stats_update_end(&q->syncp);
384 /* account for head- and tail-room, present only for 1-buffer packets */
386 headroom = be16_to_cpu(rxreq->headroom);
387 if (likely(headroom))
388 tot_len += FUN_RX_TAILROOM + headroom;
390 ref_ok = fun_gather_pkt(q, tot_len, frags);
391 va = skb_frag_address(frags);
392 if (xdp_q && headroom == FUN_XDP_HEADROOM) {
393 va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
396 headroom = 0; /* XDP_PASS trims it */
398 if (unlikely(!ref_ok))
401 if (likely(headroom)) {
402 /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */
403 prefetch(va + headroom);
404 skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN));
408 skb_reserve(skb, headroom);
409 __skb_put(skb, pkt_len);
410 skb->protocol = eth_type_trans(skb, ndev);
413 skb = napi_get_frags(q->napi);
420 si = skb_shinfo(skb);
421 si->nr_frags = rxreq->nsgl;
422 for (i = 0; i < si->nr_frags; i++)
423 si->frags[i] = frags[i];
426 skb->data_len = pkt_len;
427 skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN);
430 skb_record_rx_queue(skb, q->qidx);
431 cv = be16_to_cpu(rxreq->pkt_cv);
432 if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
433 skb_set_hash(skb, be32_to_cpu(rxreq->hash),
434 cqe_to_pkt_hash_type(cv));
435 if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
436 FUN_QSTAT_INC(q, rx_cso);
437 skb->ip_summed = CHECKSUM_UNNECESSARY;
438 skb->csum_level = be16_to_cpu(rxreq->csum) - 1;
440 if (unlikely(rx_hwtstamp_enabled(q->netdev)))
441 skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp);
443 trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
445 gro_res = skb->data_len ? napi_gro_frags(q->napi) :
446 napi_gro_receive(q->napi, skb);
447 if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE)
448 FUN_QSTAT_INC(q, gro_merged);
449 else if (gro_res == GRO_HELD)
450 FUN_QSTAT_INC(q, gro_pkts);
454 FUN_QSTAT_INC(q, rx_mem_drops);
456 /* Release the references we've been granted for the frag pages.
457 * We return the ref of the last frag and free the rest.
459 q->cur_buf->pg_refs++;
460 for (i = 0; i < rxreq->nsgl - 1; i++)
461 __free_page(skb_frag_page(frags + i));
464 /* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
465 * indicating the CQE is new.
467 static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
469 u16 sf_p = be16_to_cpu(ci->sf_p);
471 return (sf_p & 1) ^ phase;
474 /* Walk through a CQ identifying and processing fresh CQEs up to the given
475 * budget. Return the remaining budget.
477 static int fun_process_cqes(struct funeth_rxq *q, int budget)
479 struct funeth_priv *fp = netdev_priv(q->netdev);
480 struct funeth_txq **xdpqs, *xdp_q = NULL;
482 xdpqs = rcu_dereference_bh(fp->xdpqs);
484 xdp_q = xdpqs[smp_processor_id()];
486 while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
487 /* access other descriptor fields after the phase check */
490 fun_handle_cqe_pkt(q, xdp_q);
494 if (unlikely(q->xdp_flush)) {
495 if (q->xdp_flush & FUN_XDP_FLUSH_TX)
496 fun_txq_wr_db(xdp_q);
497 if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
505 /* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
506 * doorbells as needed.
508 int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
510 struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
511 struct funeth_rxq *q = irq->rxq;
512 int work_done = budget - fun_process_cqes(q, budget);
513 u32 cq_db_val = q->cq_head;
515 if (unlikely(work_done >= budget))
516 FUN_QSTAT_INC(q, rx_budget);
517 else if (napi_complete_done(napi, work_done))
518 cq_db_val |= q->irq_db_val;
520 /* check whether to post new Rx buffers */
521 if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
522 u64_stats_update_begin(&q->syncp);
523 q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
524 u64_stats_update_end(&q->syncp);
525 q->rq_cons_db = q->rq_cons;
526 writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
529 writel(cq_db_val, q->cq_db);
533 /* Free the Rx buffers of an Rx queue. */
534 static void fun_rxq_free_bufs(struct funeth_rxq *q)
536 struct funeth_rxbuf *b = q->bufs;
539 for (i = 0; i <= q->rq_mask; i++, b++)
540 funeth_free_page(q, b);
542 funeth_free_page(q, &q->spare_buf);
546 /* Initially provision an Rx queue with Rx buffers. */
547 static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
549 struct funeth_rxbuf *b = q->bufs;
552 for (i = 0; i <= q->rq_mask; i++, b++) {
553 if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
554 fun_rxq_free_bufs(q);
557 q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
559 q->cur_buf = q->bufs;
563 /* Initialize a used-buffer cache of the given depth. */
564 static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
568 c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
569 return c->bufs ? 0 : -ENOMEM;
572 /* Deallocate an Rx queue's used-buffer cache and its contents. */
573 static void fun_rxq_free_cache(struct funeth_rxq *q)
575 struct funeth_rxbuf *b = q->cache.bufs;
578 for (i = 0; i <= q->cache.mask; i++, b++)
579 funeth_free_page(q, b);
581 kvfree(q->cache.bufs);
582 q->cache.bufs = NULL;
585 int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
587 struct funeth_priv *fp = netdev_priv(q->netdev);
588 struct fun_admin_epcq_req cmd;
592 headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
593 if (headroom != q->headroom) {
594 cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
597 FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY,
598 0, q->hw_cqid, headroom);
599 err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0,
603 q->headroom = headroom;
606 WRITE_ONCE(q->xdp_prog, prog);
610 /* Create an Rx queue, allocating the host memory it needs. */
611 static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
617 struct funeth_priv *fp = netdev_priv(dev);
618 struct funeth_rxq *q;
622 numa_node = fun_irq_node(irq);
623 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
629 q->cq_mask = ncqe - 1;
630 q->rq_mask = nrqe - 1;
631 q->numa_node = numa_node;
632 q->rq_db_thres = nrqe / 4;
633 u64_stats_init(&q->syncp);
634 q->dma_dev = &fp->pdev->dev;
636 q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
637 sizeof(*q->bufs), false, numa_node,
638 &q->rq_dma_addr, (void **)&q->bufs, NULL);
642 q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
643 false, numa_node, &q->cq_dma_addr, NULL,
648 err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
652 err = fun_rxq_alloc_bufs(q, numa_node);
656 q->stats.rx_bufs = q->rq_mask;
657 q->init_state = FUN_QSTATE_INIT_SW;
661 fun_rxq_free_cache(q);
663 dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
666 fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
667 q->rq_dma_addr, q->bufs);
671 netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
675 static void fun_rxq_free_sw(struct funeth_rxq *q)
677 struct funeth_priv *fp = netdev_priv(q->netdev);
679 fun_rxq_free_cache(q);
680 fun_rxq_free_bufs(q);
681 fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
682 q->rqes, q->rq_dma_addr, q->bufs);
683 dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
684 q->cqes, q->cq_dma_addr);
686 /* Before freeing the queue transfer key counters to the device. */
687 fp->rx_packets += q->stats.rx_pkts;
688 fp->rx_bytes += q->stats.rx_bytes;
689 fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
694 /* Create an Rx queue's resources on the device. */
695 int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
697 struct funeth_priv *fp = netdev_priv(q->netdev);
698 unsigned int ncqe = q->cq_mask + 1;
699 unsigned int nrqe = q->rq_mask + 1;
702 err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
707 err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
718 q->napi = &irq->napi;
719 q->irq_db_val = fp->cq_irq_db;
720 q->next_cqe_info = cqe_to_info(q->cqes);
722 q->xdp_prog = fp->xdp_prog;
723 q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
725 err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
726 FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0,
727 FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
728 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT,
729 &q->hw_sqid, &q->rq_db);
733 err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
734 FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0,
735 q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
736 q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
737 irq->irq_idx, 0, fp->fdev->kern_end_qid,
738 &q->hw_cqid, &q->cq_db);
743 writel(q->rq_mask, q->rq_db);
744 q->init_state = FUN_QSTATE_INIT_FULL;
746 netif_info(fp, ifup, q->netdev,
747 "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n",
748 q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
749 q->numa_node, q->headroom);
753 fun_destroy_sq(fp->fdev, q->hw_sqid);
755 xdp_rxq_info_unreg(&q->xdp_rxq);
757 netdev_err(q->netdev,
758 "Failed to create Rx queue %u on device, error %d\n",
763 static void fun_rxq_free_dev(struct funeth_rxq *q)
765 struct funeth_priv *fp = netdev_priv(q->netdev);
768 if (q->init_state < FUN_QSTATE_INIT_FULL)
771 irq = container_of(q->napi, struct fun_irq, napi);
772 netif_info(fp, ifdown, q->netdev,
773 "Freeing Rx queue %u (id %u/%u), IRQ %u\n",
774 q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
777 xdp_rxq_info_unreg(&q->xdp_rxq);
778 fun_destroy_sq(fp->fdev, q->hw_sqid);
779 fun_destroy_cq(fp->fdev, q->hw_cqid);
780 q->init_state = FUN_QSTATE_INIT_SW;
783 /* Create or advance an Rx queue, allocating all the host and device resources
784 * needed to reach the target state.
786 int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
787 unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
788 int state, struct funeth_rxq **qp)
790 struct funeth_rxq *q = *qp;
794 q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
799 if (q->init_state >= state)
802 err = fun_rxq_create_dev(q, irq);
814 /* Free Rx queue resources until it reaches the target state. */
815 struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
817 if (state < FUN_QSTATE_INIT_FULL)
820 if (state == FUN_QSTATE_DESTROYED) {