1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
6 #include <linux/dma-mapping.h>
10 static struct mt76_txwi_cache *
11 mt76_alloc_txwi(struct mt76_dev *dev)
13 struct mt76_txwi_cache *t;
18 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
19 txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
23 addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
25 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
31 static struct mt76_txwi_cache *
32 __mt76_get_txwi(struct mt76_dev *dev)
34 struct mt76_txwi_cache *t = NULL;
36 spin_lock(&dev->lock);
37 if (!list_empty(&dev->txwi_cache)) {
38 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
42 spin_unlock(&dev->lock);
47 static struct mt76_txwi_cache *
48 mt76_get_txwi(struct mt76_dev *dev)
50 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
55 return mt76_alloc_txwi(dev);
59 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
64 spin_lock(&dev->lock);
65 list_add(&t->list, &dev->txwi_cache);
66 spin_unlock(&dev->lock);
68 EXPORT_SYMBOL_GPL(mt76_put_txwi);
71 mt76_free_pending_txwi(struct mt76_dev *dev)
73 struct mt76_txwi_cache *t;
76 while ((t = __mt76_get_txwi(dev)) != NULL)
77 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
83 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
85 writel(q->desc_dma, &q->regs->desc_base);
86 writel(q->ndesc, &q->regs->ring_size);
87 q->head = readl(&q->regs->dma_idx);
92 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
99 /* clear descriptors */
100 for (i = 0; i < q->ndesc; i++)
101 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
103 writel(0, &q->regs->cpu_idx);
104 writel(0, &q->regs->dma_idx);
105 mt76_dma_sync_idx(dev, q);
109 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
110 int idx, int n_desc, int bufsize,
115 spin_lock_init(&q->lock);
116 spin_lock_init(&q->cleanup_lock);
118 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
120 q->buf_size = bufsize;
123 size = q->ndesc * sizeof(struct mt76_desc);
124 q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
128 size = q->ndesc * sizeof(*q->entry);
129 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
133 mt76_dma_queue_reset(dev, q);
139 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
140 struct mt76_queue_buf *buf, int nbufs, u32 info,
141 struct sk_buff *skb, void *txwi)
143 struct mt76_queue_entry *entry;
144 struct mt76_desc *desc;
149 q->entry[q->head].txwi = DMA_DUMMY_DATA;
150 q->entry[q->head].skip_buf0 = true;
153 for (i = 0; i < nbufs; i += 2, buf += 2) {
154 u32 buf0 = buf[0].addr, buf1 = 0;
157 q->head = (q->head + 1) % q->ndesc;
159 desc = &q->desc[idx];
160 entry = &q->entry[idx];
162 if (buf[0].skip_unmap)
163 entry->skip_buf0 = true;
164 entry->skip_buf1 = i == nbufs - 1;
166 entry->dma_addr[0] = buf[0].addr;
167 entry->dma_len[0] = buf[0].len;
169 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
171 entry->dma_addr[1] = buf[1].addr;
172 entry->dma_len[1] = buf[1].len;
174 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
175 if (buf[1].skip_unmap)
176 entry->skip_buf1 = true;
180 ctrl |= MT_DMA_CTL_LAST_SEC0;
181 else if (i == nbufs - 2)
182 ctrl |= MT_DMA_CTL_LAST_SEC1;
184 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
185 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
186 WRITE_ONCE(desc->info, cpu_to_le32(info));
187 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
192 q->entry[idx].txwi = txwi;
193 q->entry[idx].skb = skb;
194 q->entry[idx].wcid = 0xffff;
200 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
201 struct mt76_queue_entry *prev_e)
203 struct mt76_queue_entry *e = &q->entry[idx];
206 dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
210 dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
213 if (e->txwi == DMA_DUMMY_DATA)
216 if (e->skb == DMA_DUMMY_DATA)
220 memset(e, 0, sizeof(*e));
224 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
227 writel(q->head, &q->regs->cpu_idx);
231 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
233 struct mt76_queue_entry entry;
239 spin_lock_bh(&q->cleanup_lock);
243 last = readl(&q->regs->dma_idx);
245 while (q->queued > 0 && q->tail != last) {
246 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
247 mt76_queue_tx_complete(dev, q, &entry);
250 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
251 mt76_put_txwi(dev, entry.txwi);
254 if (!flush && q->tail == last)
255 last = readl(&q->regs->dma_idx);
258 spin_unlock_bh(&q->cleanup_lock);
261 spin_lock_bh(&q->lock);
262 mt76_dma_sync_idx(dev, q);
263 mt76_dma_kick_queue(dev, q);
264 spin_unlock_bh(&q->lock);
268 wake_up(&dev->tx_wait);
272 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
273 int *len, u32 *info, bool *more)
275 struct mt76_queue_entry *e = &q->entry[idx];
276 struct mt76_desc *desc = &q->desc[idx];
279 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
281 buf_addr = e->dma_addr[0];
283 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
284 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
285 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
289 *info = le32_to_cpu(desc->info);
291 dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
298 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
299 int *len, u32 *info, bool *more)
308 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
309 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
312 q->tail = (q->tail + 1) % q->ndesc;
315 return mt76_dma_get_buf(dev, q, idx, len, info, more);
319 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
320 struct sk_buff *skb, u32 tx_info)
322 struct mt76_queue_buf buf = {};
325 if (q->queued + 1 >= q->ndesc - 1)
328 addr = dma_map_single(dev->dev, skb->data, skb->len,
330 if (unlikely(dma_mapping_error(dev->dev, addr)))
336 spin_lock_bh(&q->lock);
337 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
338 mt76_dma_kick_queue(dev, q);
339 spin_unlock_bh(&q->lock);
349 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
350 struct sk_buff *skb, struct mt76_wcid *wcid,
351 struct ieee80211_sta *sta)
353 struct ieee80211_tx_status status = {
356 struct mt76_tx_info tx_info = {
359 struct ieee80211_hw *hw;
360 int len, n = 0, ret = -ENOMEM;
361 struct mt76_txwi_cache *t;
362 struct sk_buff *iter;
366 t = mt76_get_txwi(dev);
370 txwi = mt76_get_txwi_ptr(dev, t);
372 skb->prev = skb->next = NULL;
373 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
374 mt76_insert_hdr_pad(skb);
376 len = skb_headlen(skb);
377 addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
378 if (unlikely(dma_mapping_error(dev->dev, addr)))
381 tx_info.buf[n].addr = t->dma_addr;
382 tx_info.buf[n++].len = dev->drv->txwi_size;
383 tx_info.buf[n].addr = addr;
384 tx_info.buf[n++].len = len;
386 skb_walk_frags(skb, iter) {
387 if (n == ARRAY_SIZE(tx_info.buf))
390 addr = dma_map_single(dev->dev, iter->data, iter->len,
392 if (unlikely(dma_mapping_error(dev->dev, addr)))
395 tx_info.buf[n].addr = addr;
396 tx_info.buf[n++].len = iter->len;
400 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
405 dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
407 ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
408 dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
413 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
414 tx_info.info, tx_info.skb, t);
417 for (n--; n > 0; n--)
418 dma_unmap_single(dev->dev, tx_info.buf[n].addr,
419 tx_info.buf[n].len, DMA_TO_DEVICE);
422 #ifdef CONFIG_NL80211_TESTMODE
423 /* fix tx_done accounting on queue overflow */
424 if (mt76_is_testmode_skb(dev, skb, &hw)) {
425 struct mt76_phy *phy = hw->priv;
427 if (tx_info.skb == phy->test.tx_skb)
432 mt76_put_txwi(dev, t);
435 status.skb = tx_info.skb;
436 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
437 ieee80211_tx_status_ext(hw, &status);
443 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
448 int len = SKB_WITH_OVERHEAD(q->buf_size);
449 int offset = q->buf_offset;
454 spin_lock_bh(&q->lock);
456 while (q->queued < q->ndesc - 1) {
457 struct mt76_queue_buf qbuf;
459 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
463 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
464 if (unlikely(dma_mapping_error(dev->dev, addr))) {
469 qbuf.addr = addr + offset;
470 qbuf.len = len - offset;
471 qbuf.skip_unmap = false;
472 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
477 mt76_dma_kick_queue(dev, q);
479 spin_unlock_bh(&q->lock);
485 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
494 spin_lock_bh(&q->lock);
496 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
502 spin_unlock_bh(&q->lock);
507 page = virt_to_page(q->rx_page.va);
508 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
509 memset(&q->rx_page, 0, sizeof(q->rx_page));
513 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
515 struct mt76_queue *q = &dev->q_rx[qid];
521 for (i = 0; i < q->ndesc; i++)
522 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
524 mt76_dma_rx_cleanup(dev, q);
525 mt76_dma_sync_idx(dev, q);
526 mt76_dma_rx_fill(dev, q);
531 dev_kfree_skb(q->rx_head);
536 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
539 struct sk_buff *skb = q->rx_head;
540 struct skb_shared_info *shinfo = skb_shinfo(skb);
541 int nr_frags = shinfo->nr_frags;
543 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
544 struct page *page = virt_to_head_page(data);
545 int offset = data - page_address(page) + q->buf_offset;
547 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
556 if (nr_frags < ARRAY_SIZE(shinfo->frags))
557 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
563 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
565 int len, data_len, done = 0;
570 while (done < budget) {
573 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
578 data_len = q->buf_size;
580 data_len = SKB_WITH_OVERHEAD(q->buf_size);
582 if (data_len < len + q->buf_offset) {
583 dev_kfree_skb(q->rx_head);
589 mt76_add_fragment(dev, q, data, len, more);
593 if (!more && dev->drv->rx_check &&
594 !(dev->drv->rx_check(dev, data, len)))
597 skb = build_skb(data, q->buf_size);
601 skb_reserve(skb, q->buf_offset);
603 if (q == &dev->q_rx[MT_RXQ_MCU]) {
604 u32 *rxfce = (u32 *)skb->cb;
616 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
623 mt76_dma_rx_fill(dev, q);
627 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
629 struct mt76_dev *dev;
630 int qid, done = 0, cur;
632 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
633 qid = napi - dev->napi;
638 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
639 mt76_rx_poll_complete(dev, qid, napi);
641 } while (cur && done < budget);
645 if (done < budget && napi_complete(napi))
646 dev->drv->rx_poll_complete(dev, qid);
650 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
653 mt76_dma_init(struct mt76_dev *dev,
654 int (*poll)(struct napi_struct *napi, int budget))
658 init_dummy_netdev(&dev->napi_dev);
659 init_dummy_netdev(&dev->tx_napi_dev);
660 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
661 wiphy_name(dev->hw->wiphy));
662 dev->napi_dev.threaded = 1;
664 mt76_for_each_q_rx(dev, i) {
665 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
666 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
667 napi_enable(&dev->napi[i]);
673 static const struct mt76_queue_ops mt76_dma_ops = {
674 .init = mt76_dma_init,
675 .alloc = mt76_dma_alloc_queue,
676 .reset_q = mt76_dma_queue_reset,
677 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
678 .tx_queue_skb = mt76_dma_tx_queue_skb,
679 .tx_cleanup = mt76_dma_tx_cleanup,
680 .rx_cleanup = mt76_dma_rx_cleanup,
681 .rx_reset = mt76_dma_rx_reset,
682 .kick = mt76_dma_kick_queue,
685 void mt76_dma_attach(struct mt76_dev *dev)
687 dev->queue_ops = &mt76_dma_ops;
689 EXPORT_SYMBOL_GPL(mt76_dma_attach);
691 void mt76_dma_cleanup(struct mt76_dev *dev)
695 mt76_worker_disable(&dev->tx_worker);
696 netif_napi_del(&dev->tx_napi);
698 for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
699 mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
701 mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
704 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
705 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
707 mt76_for_each_q_rx(dev, i) {
708 netif_napi_del(&dev->napi[i]);
709 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
712 mt76_free_pending_txwi(dev);
714 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);