1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
8 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
12 #include "aq_hw_utils.h"
18 #include <linux/filter.h>
19 #include <linux/bpf_trace.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
23 static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
26 struct skb_shared_info *sinfo;
29 if (xdp_buff_has_frags(xdp)) {
30 sinfo = xdp_get_shared_info_from_buff(xdp);
32 for (i = 0; i < sinfo->nr_frags; i++) {
33 skb_frag_t *frag = &sinfo->frags[i];
35 page_ref_inc(skb_frag_page(frag));
38 page_ref_inc(buff->rxdata.page);
41 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
43 unsigned int len = PAGE_SIZE << rxpage->order;
45 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
47 /* Drop the ref for being in the ring. */
48 __free_pages(rxpage->page, rxpage->order);
52 static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
54 struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
55 unsigned int order = rx_ring->page_order;
60 page = dev_alloc_pages(order);
64 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
67 if (unlikely(dma_mapping_error(dev, daddr)))
71 rxpage->daddr = daddr;
72 rxpage->order = order;
73 rxpage->pg_off = rx_ring->page_offset;
78 __free_pages(page, order);
84 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
86 unsigned int order = self->page_order;
87 u16 page_offset = self->page_offset;
88 u16 frame_max = self->frame_max;
89 u16 tail_size = self->tail_size;
92 if (rxbuf->rxdata.page) {
93 /* One means ring is the only user and can reuse */
94 if (page_ref_count(rxbuf->rxdata.page) > 1) {
95 /* Try reuse buffer */
96 rxbuf->rxdata.pg_off += frame_max + page_offset +
98 if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
99 (PAGE_SIZE << order)) {
100 u64_stats_update_begin(&self->stats.rx.syncp);
101 self->stats.rx.pg_flips++;
102 u64_stats_update_end(&self->stats.rx.syncp);
105 /* Buffer exhausted. We have other users and
106 * should release this page and realloc
108 aq_free_rxpage(&rxbuf->rxdata,
109 aq_nic_get_dev(self->aq_nic));
110 u64_stats_update_begin(&self->stats.rx.syncp);
111 self->stats.rx.pg_losts++;
112 u64_stats_update_end(&self->stats.rx.syncp);
115 rxbuf->rxdata.pg_off = page_offset;
116 u64_stats_update_begin(&self->stats.rx.syncp);
117 self->stats.rx.pg_reuses++;
118 u64_stats_update_end(&self->stats.rx.syncp);
122 if (!rxbuf->rxdata.page) {
123 ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
125 u64_stats_update_begin(&self->stats.rx.syncp);
126 self->stats.rx.alloc_fails++;
127 u64_stats_update_end(&self->stats.rx.syncp);
135 static int aq_ring_alloc(struct aq_ring_s *self,
136 struct aq_nic_s *aq_nic)
141 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
143 if (!self->buff_ring) {
148 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
149 self->size * self->dx_size,
150 &self->dx_ring_pa, GFP_KERNEL);
151 if (!self->dx_ring) {
164 int aq_ring_tx_alloc(struct aq_ring_s *self,
165 struct aq_nic_s *aq_nic,
167 struct aq_nic_cfg_s *aq_nic_cfg)
169 self->aq_nic = aq_nic;
171 self->size = aq_nic_cfg->txds;
172 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
174 return aq_ring_alloc(self, aq_nic);
177 int aq_ring_rx_alloc(struct aq_ring_s *self,
178 struct aq_nic_s *aq_nic,
180 struct aq_nic_cfg_s *aq_nic_cfg)
182 self->aq_nic = aq_nic;
184 self->size = aq_nic_cfg->rxds;
185 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
186 self->xdp_prog = aq_nic->xdp_prog;
187 self->frame_max = AQ_CFG_RX_FRAME_MAX;
189 /* Only order-2 is allowed if XDP is enabled */
190 if (READ_ONCE(self->xdp_prog)) {
191 self->page_offset = AQ_XDP_HEADROOM;
192 self->page_order = AQ_CFG_XDP_PAGEORDER;
193 self->tail_size = AQ_XDP_TAILROOM;
195 self->page_offset = 0;
196 self->page_order = fls(self->frame_max / PAGE_SIZE +
197 (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
198 if (aq_nic_cfg->rxpageorder > self->page_order)
199 self->page_order = aq_nic_cfg->rxpageorder;
203 return aq_ring_alloc(self, aq_nic);
207 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
208 unsigned int idx, unsigned int size, unsigned int dx_size)
210 struct device *dev = aq_nic_get_dev(aq_nic);
211 size_t sz = size * dx_size + AQ_CFG_RXDS_DEF;
213 memset(self, 0, sizeof(*self));
215 self->aq_nic = aq_nic;
218 self->dx_size = dx_size;
220 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa,
222 if (!self->dx_ring) {
230 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
235 self->ring_type = ring_type;
237 if (self->ring_type == ATL_RING_RX)
238 u64_stats_init(&self->stats.rx.syncp);
240 u64_stats_init(&self->stats.tx.syncp);
245 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
248 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
251 void aq_ring_update_queue_state(struct aq_ring_s *ring)
253 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
254 aq_ring_queue_stop(ring);
255 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
256 aq_ring_queue_wake(ring);
259 void aq_ring_queue_wake(struct aq_ring_s *ring)
261 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
263 if (__netif_subqueue_stopped(ndev,
264 AQ_NIC_RING2QMAP(ring->aq_nic,
266 netif_wake_subqueue(ndev,
267 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
268 u64_stats_update_begin(&ring->stats.tx.syncp);
269 ring->stats.tx.queue_restarts++;
270 u64_stats_update_end(&ring->stats.tx.syncp);
274 void aq_ring_queue_stop(struct aq_ring_s *ring)
276 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
278 if (!__netif_subqueue_stopped(ndev,
279 AQ_NIC_RING2QMAP(ring->aq_nic,
281 netif_stop_subqueue(ndev,
282 AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
285 bool aq_ring_tx_clean(struct aq_ring_s *self)
287 struct device *dev = aq_nic_get_dev(self->aq_nic);
290 for (budget = AQ_CFG_TX_CLEAN_BUDGET;
291 budget && self->sw_head != self->hw_head; budget--) {
292 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
294 if (likely(buff->is_mapped)) {
295 if (unlikely(buff->is_sop)) {
297 buff->eop_index != 0xffffU &&
298 (!aq_ring_dx_in_range(self->sw_head,
303 dma_unmap_single(dev, buff->pa, buff->len,
306 dma_unmap_page(dev, buff->pa, buff->len,
311 if (likely(!buff->is_eop))
315 u64_stats_update_begin(&self->stats.tx.syncp);
316 ++self->stats.tx.packets;
317 self->stats.tx.bytes += buff->skb->len;
318 u64_stats_update_end(&self->stats.tx.syncp);
319 dev_kfree_skb_any(buff->skb);
320 } else if (buff->xdpf) {
321 u64_stats_update_begin(&self->stats.tx.syncp);
322 ++self->stats.tx.packets;
323 self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
324 u64_stats_update_end(&self->stats.tx.syncp);
325 xdp_return_frame_rx_napi(buff->xdpf);
332 buff->eop_index = 0xffffU;
333 self->sw_head = aq_ring_next_dx(self, self->sw_head);
339 static void aq_rx_checksum(struct aq_ring_s *self,
340 struct aq_ring_buff_s *buff,
343 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
346 if (unlikely(buff->is_cso_err)) {
347 u64_stats_update_begin(&self->stats.rx.syncp);
348 ++self->stats.rx.errors;
349 u64_stats_update_end(&self->stats.rx.syncp);
350 skb->ip_summed = CHECKSUM_NONE;
353 if (buff->is_ip_cso) {
354 __skb_incr_checksum_unnecessary(skb);
356 skb->ip_summed = CHECKSUM_NONE;
359 if (buff->is_udp_cso || buff->is_tcp_cso)
360 __skb_incr_checksum_unnecessary(skb);
363 int aq_xdp_xmit(struct net_device *dev, int num_frames,
364 struct xdp_frame **frames, u32 flags)
366 struct aq_nic_s *aq_nic = netdev_priv(dev);
367 unsigned int vec, i, drop = 0;
368 int cpu = smp_processor_id();
369 struct aq_nic_cfg_s *aq_cfg;
370 struct aq_ring_s *ring;
372 aq_cfg = aq_nic_get_cfg(aq_nic);
373 vec = cpu % aq_cfg->vecs;
374 ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
376 for (i = 0; i < num_frames; i++) {
377 struct xdp_frame *xdpf = frames[i];
379 if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
383 return num_frames - drop;
386 static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
387 struct net_device *dev,
388 struct aq_ring_buff_s *buff)
390 struct xdp_frame *xdpf;
393 xdpf = xdp_convert_buff_to_frame(xdp);
397 skb = xdp_build_skb_from_frame(xdpf, dev);
401 aq_get_rxpages_xdp(buff, xdp);
405 static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
406 struct xdp_buff *xdp,
407 struct aq_ring_s *rx_ring,
408 struct aq_ring_buff_s *buff)
410 int result = NETDEV_TX_BUSY;
411 struct aq_ring_s *tx_ring;
412 struct xdp_frame *xdpf;
413 struct bpf_prog *prog;
414 u32 act = XDP_ABORTED;
417 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
418 ++rx_ring->stats.rx.packets;
419 rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
420 u64_stats_update_end(&rx_ring->stats.rx.syncp);
422 prog = READ_ONCE(rx_ring->xdp_prog);
424 return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
426 prefetchw(xdp->data_hard_start); /* xdp_frame write */
428 /* single buffer XDP program, but packet is multi buffer, aborted */
429 if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
432 act = bpf_prog_run_xdp(prog, xdp);
435 skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
438 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
439 ++rx_ring->stats.rx.xdp_pass;
440 u64_stats_update_end(&rx_ring->stats.rx.syncp);
443 xdpf = xdp_convert_buff_to_frame(xdp);
446 tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
447 result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
448 if (result == NETDEV_TX_BUSY)
450 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
451 ++rx_ring->stats.rx.xdp_tx;
452 u64_stats_update_end(&rx_ring->stats.rx.syncp);
453 aq_get_rxpages_xdp(buff, xdp);
456 if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
459 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
460 ++rx_ring->stats.rx.xdp_redirect;
461 u64_stats_update_end(&rx_ring->stats.rx.syncp);
462 aq_get_rxpages_xdp(buff, xdp);
468 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
469 ++rx_ring->stats.rx.xdp_aborted;
470 u64_stats_update_end(&rx_ring->stats.rx.syncp);
471 trace_xdp_exception(aq_nic->ndev, prog, act);
472 bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
475 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
476 ++rx_ring->stats.rx.xdp_drop;
477 u64_stats_update_end(&rx_ring->stats.rx.syncp);
481 return ERR_PTR(-result);
484 static bool aq_add_rx_fragment(struct device *dev,
485 struct aq_ring_s *ring,
486 struct aq_ring_buff_s *buff,
487 struct xdp_buff *xdp)
489 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
490 struct aq_ring_buff_s *buff_ = buff;
492 memset(sinfo, 0, sizeof(*sinfo));
496 if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
499 frag = &sinfo->frags[sinfo->nr_frags++];
500 buff_ = &ring->buff_ring[buff_->next];
501 dma_sync_single_range_for_cpu(dev,
503 buff_->rxdata.pg_off,
506 sinfo->xdp_frags_size += buff_->len;
507 skb_frag_fill_page_desc(frag, buff_->rxdata.page,
508 buff_->rxdata.pg_off,
511 buff_->is_cleaned = 1;
513 buff->is_ip_cso &= buff_->is_ip_cso;
514 buff->is_udp_cso &= buff_->is_udp_cso;
515 buff->is_tcp_cso &= buff_->is_tcp_cso;
516 buff->is_cso_err |= buff_->is_cso_err;
518 if (page_is_pfmemalloc(buff_->rxdata.page))
519 xdp_buff_set_frag_pfmemalloc(xdp);
521 } while (!buff_->is_eop);
523 xdp_buff_set_frags_flag(xdp);
528 static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
529 int *work_done, int budget)
531 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
534 for (; (self->sw_head != self->hw_head) && budget;
535 self->sw_head = aq_ring_next_dx(self, self->sw_head),
536 --budget, ++(*work_done)) {
537 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
538 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
539 struct aq_ring_buff_s *buff_ = NULL;
540 struct sk_buff *skb = NULL;
541 unsigned int next_ = 0U;
545 if (buff->is_cleaned)
549 unsigned int frag_cnt = 0U;
552 bool is_rsc_completed = true;
554 if (buff_->next >= self->size) {
561 buff_ = &self->buff_ring[next_];
563 aq_ring_dx_in_range(self->sw_head,
567 if (unlikely(!is_rsc_completed) ||
568 frag_cnt > MAX_SKB_FRAGS) {
573 buff->is_error |= buff_->is_error;
574 buff->is_cso_err |= buff_->is_cso_err;
576 } while (!buff_->is_eop);
578 if (buff->is_error ||
579 (buff->is_lro && buff->is_cso_err)) {
582 if (buff_->next >= self->size) {
587 buff_ = &self->buff_ring[next_];
589 buff_->is_cleaned = true;
590 } while (!buff_->is_eop);
592 u64_stats_update_begin(&self->stats.rx.syncp);
593 ++self->stats.rx.errors;
594 u64_stats_update_end(&self->stats.rx.syncp);
599 if (buff->is_error) {
600 u64_stats_update_begin(&self->stats.rx.syncp);
601 ++self->stats.rx.errors;
602 u64_stats_update_end(&self->stats.rx.syncp);
606 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
609 buff->len, DMA_FROM_DEVICE);
611 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
612 if (unlikely(!skb)) {
613 u64_stats_update_begin(&self->stats.rx.syncp);
614 self->stats.rx.skb_alloc_fails++;
615 u64_stats_update_end(&self->stats.rx.syncp);
621 aq_ptp_extract_ts(self->aq_nic, skb_hwtstamps(skb),
622 aq_buf_vaddr(&buff->rxdata),
626 if (hdr_len > AQ_CFG_RX_HDR_SIZE)
627 hdr_len = eth_get_headlen(skb->dev,
628 aq_buf_vaddr(&buff->rxdata),
631 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
632 ALIGN(hdr_len, sizeof(long)));
634 if (buff->len - hdr_len > 0) {
635 skb_add_rx_frag(skb, i++, buff->rxdata.page,
636 buff->rxdata.pg_off + hdr_len,
639 page_ref_inc(buff->rxdata.page);
646 buff_ = &self->buff_ring[next_];
648 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
650 buff_->rxdata.pg_off,
653 skb_add_rx_frag(skb, i++,
655 buff_->rxdata.pg_off,
658 page_ref_inc(buff_->rxdata.page);
659 buff_->is_cleaned = 1;
661 buff->is_ip_cso &= buff_->is_ip_cso;
662 buff->is_udp_cso &= buff_->is_udp_cso;
663 buff->is_tcp_cso &= buff_->is_tcp_cso;
664 buff->is_cso_err |= buff_->is_cso_err;
666 } while (!buff_->is_eop);
670 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
673 skb->protocol = eth_type_trans(skb, ndev);
675 aq_rx_checksum(self, buff, skb);
677 skb_set_hash(skb, buff->rss_hash,
678 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
680 /* Send all PTP traffic to 0 queue */
681 skb_record_rx_queue(skb,
683 : AQ_NIC_RING2QMAP(self->aq_nic,
686 u64_stats_update_begin(&self->stats.rx.syncp);
687 ++self->stats.rx.packets;
688 self->stats.rx.bytes += skb->len;
689 u64_stats_update_end(&self->stats.rx.syncp);
691 napi_gro_receive(napi, skb);
698 static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
699 struct napi_struct *napi, int *work_done,
702 int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
704 struct aq_nic_s *aq_nic = rx_ring->aq_nic;
705 bool is_rsc_completed = true;
709 dev = aq_nic_get_dev(aq_nic);
710 for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
711 rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
712 --budget, ++(*work_done)) {
713 struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
714 bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
715 struct aq_ring_buff_s *buff_ = NULL;
716 u16 ptp_hwtstamp_len = 0;
717 struct skb_shared_hwtstamps shhwtstamps;
718 struct sk_buff *skb = NULL;
719 unsigned int next_ = 0U;
723 if (buff->is_cleaned)
729 if (buff_->next >= rx_ring->size) {
734 buff_ = &rx_ring->buff_ring[next_];
736 aq_ring_dx_in_range(rx_ring->sw_head,
740 if (unlikely(!is_rsc_completed))
743 buff->is_error |= buff_->is_error;
744 buff->is_cso_err |= buff_->is_cso_err;
745 } while (!buff_->is_eop);
747 if (!is_rsc_completed) {
751 if (buff->is_error ||
752 (buff->is_lro && buff->is_cso_err)) {
755 if (buff_->next >= rx_ring->size) {
760 buff_ = &rx_ring->buff_ring[next_];
762 buff_->is_cleaned = true;
763 } while (!buff_->is_eop);
765 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
766 ++rx_ring->stats.rx.errors;
767 u64_stats_update_end(&rx_ring->stats.rx.syncp);
772 if (buff->is_error) {
773 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
774 ++rx_ring->stats.rx.errors;
775 u64_stats_update_end(&rx_ring->stats.rx.syncp);
779 dma_sync_single_range_for_cpu(dev,
782 buff->len, DMA_FROM_DEVICE);
783 hard_start = page_address(buff->rxdata.page) +
784 buff->rxdata.pg_off - rx_ring->page_offset;
787 ptp_hwtstamp_len = aq_ptp_extract_ts(rx_ring->aq_nic, &shhwtstamps,
788 aq_buf_vaddr(&buff->rxdata),
790 buff->len -= ptp_hwtstamp_len;
793 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
794 xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
797 if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
798 u64_stats_update_begin(&rx_ring->stats.rx.syncp);
799 ++rx_ring->stats.rx.packets;
800 rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
801 ++rx_ring->stats.rx.xdp_aborted;
802 u64_stats_update_end(&rx_ring->stats.rx.syncp);
807 skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
808 if (IS_ERR(skb) || !skb)
811 if (ptp_hwtstamp_len > 0)
812 *skb_hwtstamps(skb) = shhwtstamps;
815 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
818 aq_rx_checksum(rx_ring, buff, skb);
820 skb_set_hash(skb, buff->rss_hash,
821 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
823 /* Send all PTP traffic to 0 queue */
824 skb_record_rx_queue(skb,
826 : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
829 napi_gro_receive(napi, skb);
836 int aq_ring_rx_clean(struct aq_ring_s *self,
837 struct napi_struct *napi,
841 if (static_branch_unlikely(&aq_xdp_locking_key))
842 return __aq_ring_xdp_clean(self, napi, work_done, budget);
844 return __aq_ring_rx_clean(self, napi, work_done, budget);
847 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
849 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
850 while (self->sw_head != self->hw_head) {
853 aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
855 (self->sw_head * self->dx_size),
857 aq_ptp_tx_hwtstamp(aq_nic, ns);
859 self->sw_head = aq_ring_next_dx(self, self->sw_head);
864 int aq_ring_rx_fill(struct aq_ring_s *self)
866 struct aq_ring_buff_s *buff = NULL;
870 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
874 for (i = aq_ring_avail_dx(self); i--;
875 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
876 buff = &self->buff_ring[self->sw_tail];
879 buff->len = self->frame_max;
881 err = aq_get_rxpages(self, buff);
885 buff->pa = aq_buf_daddr(&buff->rxdata);
893 void aq_ring_rx_deinit(struct aq_ring_s *self)
898 for (; self->sw_head != self->sw_tail;
899 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
900 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
902 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
906 void aq_ring_free(struct aq_ring_s *self)
911 kfree(self->buff_ring);
912 self->buff_ring = NULL;
915 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
916 self->size * self->dx_size, self->dx_ring,
918 self->dx_ring = NULL;
922 void aq_ring_hwts_rx_free(struct aq_ring_s *self)
928 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
929 self->size * self->dx_size + AQ_CFG_RXDS_DEF,
930 self->dx_ring, self->dx_ring_pa);
931 self->dx_ring = NULL;
935 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
940 if (self->ring_type == ATL_RING_RX) {
941 /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
944 start = u64_stats_fetch_begin(&self->stats.rx.syncp);
945 data[count] = self->stats.rx.packets;
946 data[++count] = self->stats.rx.jumbo_packets;
947 data[++count] = self->stats.rx.lro_packets;
948 data[++count] = self->stats.rx.errors;
949 data[++count] = self->stats.rx.alloc_fails;
950 data[++count] = self->stats.rx.skb_alloc_fails;
951 data[++count] = self->stats.rx.polls;
952 data[++count] = self->stats.rx.pg_flips;
953 data[++count] = self->stats.rx.pg_reuses;
954 data[++count] = self->stats.rx.pg_losts;
955 data[++count] = self->stats.rx.xdp_aborted;
956 data[++count] = self->stats.rx.xdp_drop;
957 data[++count] = self->stats.rx.xdp_pass;
958 data[++count] = self->stats.rx.xdp_tx;
959 data[++count] = self->stats.rx.xdp_invalid;
960 data[++count] = self->stats.rx.xdp_redirect;
961 } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
963 /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
966 start = u64_stats_fetch_begin(&self->stats.tx.syncp);
967 data[count] = self->stats.tx.packets;
968 data[++count] = self->stats.tx.queue_restarts;
969 } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));