1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Atlantic Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
8 /* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
13 #include "aq_common.h"
16 #define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
17 #define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
30 * +----------+----------+----------+-----------
31 * 8bytes|len l3,l4 | pa | pa | pa
32 * +----------+----------+----------+-----------
33 * 4/8bytes|len pkt |len pkt | | skb
34 * +----------+----------+----------+-----------
35 * 4/8bytes|is_gso |len,flags |len |len,is_eop
36 * +----------+----------+----------+-----------
38 * This aq_ring_buff_s doesn't have endianness dependency.
39 * It is __packed for cache line optimizations.
41 struct __packed aq_ring_buff_s {
51 struct aq_rxpage rxdata;
58 struct xdp_frame *xdpf;
96 struct aq_ring_stats_rx_s {
97 struct u64_stats_sync syncp; /* must be first */
117 struct aq_ring_stats_tx_s {
118 struct u64_stats_sync syncp; /* must be first */
125 union aq_ring_stats_s {
126 struct aq_ring_stats_rx_s rx;
127 struct aq_ring_stats_tx_s tx;
136 struct aq_ring_buff_s *buff_ring;
137 u8 *dx_ring; /* descriptors ring, dma shared mem */
138 struct aq_nic_s *aq_nic;
139 unsigned int idx; /* for HW layer registers operations */
140 unsigned int hw_head;
141 unsigned int sw_head;
142 unsigned int sw_tail;
143 unsigned int size; /* descriptors number */
144 unsigned int dx_size; /* TX or RX descriptor size, */
145 /* stored here for fater math */
150 union aq_ring_stats_s stats;
151 dma_addr_t dx_ring_pa;
152 struct bpf_prog *xdp_prog;
153 enum atl_ring_type ring_type;
154 struct xdp_rxq_info xdp_rxq;
157 struct aq_ring_param_s {
158 unsigned int vec_idx;
160 cpumask_t affinity_mask;
163 static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
165 return page_to_virt(rxpage->page) + rxpage->pg_off;
168 static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
170 return rxpage->daddr + rxpage->pg_off;
173 static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
176 return (++dx >= self->size) ? 0U : dx;
179 static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
181 return (((self->sw_tail >= self->sw_head)) ?
182 (self->size - 1) - self->sw_tail + self->sw_head :
183 self->sw_head - self->sw_tail - 1);
186 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
187 struct aq_nic_s *aq_nic,
189 struct aq_nic_cfg_s *aq_nic_cfg);
190 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
191 struct aq_nic_s *aq_nic,
193 struct aq_nic_cfg_s *aq_nic_cfg);
195 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
196 void aq_ring_rx_deinit(struct aq_ring_s *self);
197 void aq_ring_free(struct aq_ring_s *self);
198 void aq_ring_update_queue_state(struct aq_ring_s *ring);
199 void aq_ring_queue_wake(struct aq_ring_s *ring);
200 void aq_ring_queue_stop(struct aq_ring_s *ring);
201 bool aq_ring_tx_clean(struct aq_ring_s *self);
202 int aq_xdp_xmit(struct net_device *dev, int num_frames,
203 struct xdp_frame **frames, u32 flags);
204 int aq_ring_rx_clean(struct aq_ring_s *self,
205 struct napi_struct *napi,
208 int aq_ring_rx_fill(struct aq_ring_s *self);
210 struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
211 struct aq_nic_s *aq_nic, unsigned int idx,
212 unsigned int size, unsigned int dx_size);
213 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
215 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
217 #endif /* AQ_RING_H */