1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell Octeon EP (EndPoint) Ethernet Driver
4 * Copyright (C) 2020 Marvell.
9 #include <linux/etherdevice.h>
10 #include <linux/vmalloc.h>
12 #include "octep_config.h"
13 #include "octep_main.h"
15 /* Reset various index of Tx queue data structure. */
16 static void octep_iq_reset_indices(struct octep_iq *iq)
19 iq->host_write_index = 0;
20 iq->octep_read_index = 0;
22 iq->pkts_processed = 0;
24 atomic_set(&iq->instr_pending, 0);
28 * octep_iq_process_completions() - Process Tx queue completions.
30 * @iq: Octeon Tx queue data structure.
31 * @budget: max number of completions to be processed in one invocation.
33 int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
35 u32 compl_pkts, compl_bytes, compl_sg;
36 struct octep_device *oct = iq->octep_dev;
37 struct octep_tx_buffer *tx_buffer;
38 struct skb_shared_info *shinfo;
39 u32 fi = iq->flush_index;
46 iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
48 while (likely(budget && (fi != iq->octep_read_index))) {
49 tx_buffer = iq->buff_info + fi;
53 if (unlikely(fi == iq->max_count))
55 compl_bytes += skb->len;
59 if (!tx_buffer->gather) {
60 dma_unmap_single(iq->dev, tx_buffer->dma,
61 tx_buffer->skb->len, DMA_TO_DEVICE);
62 dev_kfree_skb_any(skb);
67 shinfo = skb_shinfo(skb);
68 frags = shinfo->nr_frags;
71 dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
72 tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
74 i = 1; /* entry 0 is main skb, unmapped above */
76 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
77 tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
81 dev_kfree_skb_any(skb);
84 iq->pkts_processed += compl_pkts;
85 atomic_sub(compl_pkts, &iq->instr_pending);
86 iq->stats.instr_completed += compl_pkts;
87 iq->stats.bytes_sent += compl_bytes;
88 iq->stats.sgentry_sent += compl_sg;
91 netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
93 if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
94 ((iq->max_count - atomic_read(&iq->instr_pending)) >
95 OCTEP_WAKE_QUEUE_THRESHOLD))
96 netif_wake_subqueue(iq->netdev, iq->q_no);
101 * octep_iq_free_pending() - Free Tx buffers for pending completions.
103 * @iq: Octeon Tx queue data structure.
105 static void octep_iq_free_pending(struct octep_iq *iq)
107 struct octep_tx_buffer *tx_buffer;
108 struct skb_shared_info *shinfo;
109 u32 fi = iq->flush_index;
113 while (fi != iq->host_write_index) {
114 tx_buffer = iq->buff_info + fi;
115 skb = tx_buffer->skb;
118 if (unlikely(fi == iq->max_count))
121 if (!tx_buffer->gather) {
122 dma_unmap_single(iq->dev, tx_buffer->dma,
123 tx_buffer->skb->len, DMA_TO_DEVICE);
124 dev_kfree_skb_any(skb);
129 shinfo = skb_shinfo(skb);
130 frags = shinfo->nr_frags;
132 dma_unmap_single(iq->dev,
133 tx_buffer->sglist[0].dma_ptr[0],
134 tx_buffer->sglist[0].len[0],
137 i = 1; /* entry 0 is main skb, unmapped above */
139 dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
140 tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
144 dev_kfree_skb_any(skb);
147 atomic_set(&iq->instr_pending, 0);
148 iq->flush_index = fi;
149 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
153 * octep_clean_iqs() - Clean Tx queues to shutdown the device.
155 * @oct: Octeon device private data structure.
157 * Free the buffers in Tx queue descriptors pending completion and
158 * reset queue indices
160 void octep_clean_iqs(struct octep_device *oct)
164 for (i = 0; i < oct->num_iqs; i++) {
165 octep_iq_free_pending(oct->iq[i]);
166 octep_iq_reset_indices(oct->iq[i]);
171 * octep_setup_iq() - Setup a Tx queue.
173 * @oct: Octeon device private data structure.
174 * @q_no: Tx queue number to be setup.
176 * Allocate resources for a Tx queue.
178 static int octep_setup_iq(struct octep_device *oct, int q_no)
180 u32 desc_ring_size, buff_info_size, sglist_size;
184 iq = vzalloc(sizeof(*iq));
190 iq->netdev = oct->netdev;
191 iq->dev = &oct->pdev->dev;
193 iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
194 iq->ring_size_mask = iq->max_count - 1;
195 iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
196 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
198 /* Allocate memory for hardware queue descriptors */
199 desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
200 iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
201 &iq->desc_ring_dma, GFP_KERNEL);
202 if (unlikely(!iq->desc_ring)) {
204 "Failed to allocate DMA memory for IQ-%d\n", q_no);
205 goto desc_dma_alloc_err;
208 /* Allocate memory for hardware SGLIST descriptors */
209 sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
210 CFG_GET_IQ_NUM_DESC(oct->conf);
211 iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
212 &iq->sglist_dma, GFP_KERNEL);
213 if (unlikely(!iq->sglist)) {
215 "Failed to allocate DMA memory for IQ-%d SGLIST\n",
217 goto sglist_alloc_err;
220 /* allocate memory to manage Tx packets pending completion */
221 buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
222 iq->buff_info = vzalloc(buff_info_size);
223 if (!iq->buff_info) {
225 "Failed to allocate buff info for IQ-%d\n", q_no);
229 /* Setup sglist addresses in tx_buffer entries */
230 for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
231 struct octep_tx_buffer *tx_buffer;
233 tx_buffer = &iq->buff_info[i];
235 &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
236 tx_buffer->sglist_dma =
237 iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
240 octep_iq_reset_indices(iq);
241 oct->hw_ops.setup_iq_regs(oct, q_no);
247 dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
249 dma_free_coherent(iq->dev, desc_ring_size,
250 iq->desc_ring, iq->desc_ring_dma);
253 oct->iq[q_no] = NULL;
259 * octep_free_iq() - Free Tx queue resources.
261 * @iq: Octeon Tx queue data structure.
263 * Free all the resources allocated for a Tx queue.
265 static void octep_free_iq(struct octep_iq *iq)
267 struct octep_device *oct = iq->octep_dev;
268 u64 desc_ring_size, sglist_size;
271 desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
273 vfree(iq->buff_info);
276 dma_free_coherent(iq->dev, desc_ring_size,
277 iq->desc_ring, iq->desc_ring_dma);
279 sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
280 CFG_GET_IQ_NUM_DESC(oct->conf);
282 dma_free_coherent(iq->dev, sglist_size,
283 iq->sglist, iq->sglist_dma);
286 oct->iq[q_no] = NULL;
291 * octep_setup_iqs() - setup resources for all Tx queues.
293 * @oct: Octeon device private data structure.
295 int octep_setup_iqs(struct octep_device *oct)
300 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
301 if (octep_setup_iq(oct, i)) {
302 dev_err(&oct->pdev->dev,
303 "Failed to setup IQ(TxQ)-%d.\n", i);
306 dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
314 octep_free_iq(oct->iq[i]);
320 * octep_free_iqs() - Free resources of all Tx queues.
322 * @oct: Octeon device private data structure.
324 void octep_free_iqs(struct octep_device *oct)
328 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
329 octep_free_iq(oct->iq[i]);
330 dev_dbg(&oct->pdev->dev,
331 "Successfully destroyed IQ(TxQ)-%d.\n", i);