2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
19 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
20 struct aq_nic_s *aq_nic)
25 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
27 if (!self->buff_ring) {
31 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
32 self->size * self->dx_size,
33 &self->dx_ring_pa, GFP_KERNEL);
47 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
48 struct aq_nic_s *aq_nic,
50 struct aq_nic_cfg_s *aq_nic_cfg)
54 self->aq_nic = aq_nic;
56 self->size = aq_nic_cfg->txds;
57 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
59 self = aq_ring_alloc(self, aq_nic);
73 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
74 struct aq_nic_s *aq_nic,
76 struct aq_nic_cfg_s *aq_nic_cfg)
80 self->aq_nic = aq_nic;
82 self->size = aq_nic_cfg->rxds;
83 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
85 self = aq_ring_alloc(self, aq_nic);
99 int aq_ring_init(struct aq_ring_s *self)
104 spin_lock_init(&self->header.lock);
108 void aq_ring_tx_clean(struct aq_ring_s *self)
110 struct device *dev = aq_nic_get_dev(self->aq_nic);
112 for (; self->sw_head != self->hw_head;
113 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
114 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
116 if (likely(buff->is_mapped)) {
117 if (unlikely(buff->is_sop))
118 dma_unmap_single(dev, buff->pa, buff->len,
121 dma_unmap_page(dev, buff->pa, buff->len,
125 if (unlikely(buff->is_eop))
126 dev_kfree_skb_any(buff->skb);
130 static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
133 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
136 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
137 int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
139 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
141 bool is_rsc_completed = true;
143 for (; (self->sw_head != self->hw_head) && budget;
144 self->sw_head = aq_ring_next_dx(self, self->sw_head),
145 --budget, ++(*work_done)) {
146 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
147 struct sk_buff *skb = NULL;
148 unsigned int next_ = 0U;
150 struct aq_ring_buff_s *buff_ = NULL;
152 if (buff->is_error) {
153 __free_pages(buff->page, 0);
157 if (buff->is_cleaned)
161 for (next_ = buff->next,
162 buff_ = &self->buff_ring[next_]; true;
164 buff_ = &self->buff_ring[next_]) {
166 aq_ring_dx_in_range(self->sw_head,
170 if (unlikely(!is_rsc_completed)) {
171 is_rsc_completed = false;
179 if (!is_rsc_completed) {
185 /* for single fragment packets use build_skb() */
187 skb = build_skb(page_address(buff->page),
188 buff->len + AQ_SKB_ALIGN);
189 if (unlikely(!skb)) {
194 skb_put(skb, buff->len);
196 skb = netdev_alloc_skb(ndev, ETH_HLEN);
197 if (unlikely(!skb)) {
201 skb_put(skb, ETH_HLEN);
202 memcpy(skb->data, page_address(buff->page), ETH_HLEN);
204 skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
205 buff->len - ETH_HLEN,
206 SKB_TRUESIZE(buff->len - ETH_HLEN));
208 for (i = 1U, next_ = buff->next,
209 buff_ = &self->buff_ring[next_]; true;
211 buff_ = &self->buff_ring[next_], ++i) {
212 skb_add_rx_frag(skb, i, buff_->page, 0,
214 SKB_TRUESIZE(buff->len -
216 buff_->is_cleaned = 1;
223 skb->protocol = eth_type_trans(skb, ndev);
224 if (unlikely(buff->is_cso_err)) {
225 ++self->stats.rx.errors;
226 skb->ip_summed = CHECKSUM_NONE;
228 if (buff->is_ip_cso) {
229 __skb_incr_checksum_unnecessary(skb);
230 if (buff->is_udp_cso || buff->is_tcp_cso)
231 __skb_incr_checksum_unnecessary(skb);
233 skb->ip_summed = CHECKSUM_NONE;
237 skb_set_hash(skb, buff->rss_hash,
238 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
241 skb_record_rx_queue(skb, self->idx);
243 netif_receive_skb(skb);
245 ++self->stats.rx.packets;
246 self->stats.rx.bytes += skb->len;
253 int aq_ring_rx_fill(struct aq_ring_s *self)
255 unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
256 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
257 struct aq_ring_buff_s *buff = NULL;
261 for (i = aq_ring_avail_dx(self); i--;
262 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
263 buff = &self->buff_ring[self->sw_tail];
266 buff->len = AQ_CFG_RX_FRAME_MAX;
268 buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
269 __GFP_COMP, pages_order);
275 buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
277 AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
279 if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
289 if (buff && buff->page)
290 __free_pages(buff->page, 0);
296 void aq_ring_rx_deinit(struct aq_ring_s *self)
301 for (; self->sw_head != self->sw_tail;
302 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
303 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
305 dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
306 AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
308 __free_pages(buff->page, 0);
314 void aq_ring_free(struct aq_ring_s *self)
319 kfree(self->buff_ring);
322 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
323 self->size * self->dx_size, self->dx_ring,