mediatek: mt76-6e-usb: Copied entire code from v5.18.y
[platform/kernel/linux-rpi.git] / drivers / net / wireless / mediatek / mt76-6e-usb / dma.c
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9
10 static struct mt76_txwi_cache *
11 mt76_alloc_txwi(struct mt76_dev *dev)
12 {
13         struct mt76_txwi_cache *t;
14         dma_addr_t addr;
15         u8 *txwi;
16         int size;
17
18         size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
19         txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
20         if (!txwi)
21                 return NULL;
22
23         addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
24                               DMA_TO_DEVICE);
25         t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
26         t->dma_addr = addr;
27
28         return t;
29 }
30
31 static struct mt76_txwi_cache *
32 __mt76_get_txwi(struct mt76_dev *dev)
33 {
34         struct mt76_txwi_cache *t = NULL;
35
36         spin_lock(&dev->lock);
37         if (!list_empty(&dev->txwi_cache)) {
38                 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
39                                      list);
40                 list_del(&t->list);
41         }
42         spin_unlock(&dev->lock);
43
44         return t;
45 }
46
47 static struct mt76_txwi_cache *
48 mt76_get_txwi(struct mt76_dev *dev)
49 {
50         struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
51
52         if (t)
53                 return t;
54
55         return mt76_alloc_txwi(dev);
56 }
57
58 void
59 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
60 {
61         if (!t)
62                 return;
63
64         spin_lock(&dev->lock);
65         list_add(&t->list, &dev->txwi_cache);
66         spin_unlock(&dev->lock);
67 }
68 EXPORT_SYMBOL_GPL(mt76_put_txwi);
69
70 static void
71 mt76_free_pending_txwi(struct mt76_dev *dev)
72 {
73         struct mt76_txwi_cache *t;
74
75         local_bh_disable();
76         while ((t = __mt76_get_txwi(dev)) != NULL)
77                 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
78                                  DMA_TO_DEVICE);
79         local_bh_enable();
80 }
81
82 static void
83 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
84 {
85         writel(q->desc_dma, &q->regs->desc_base);
86         writel(q->ndesc, &q->regs->ring_size);
87         q->head = readl(&q->regs->dma_idx);
88         q->tail = q->head;
89 }
90
91 static void
92 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
93 {
94         int i;
95
96         if (!q || !q->ndesc)
97                 return;
98
99         /* clear descriptors */
100         for (i = 0; i < q->ndesc; i++)
101                 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
102
103         writel(0, &q->regs->cpu_idx);
104         writel(0, &q->regs->dma_idx);
105         mt76_dma_sync_idx(dev, q);
106 }
107
108 static int
109 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
110                      int idx, int n_desc, int bufsize,
111                      u32 ring_base)
112 {
113         int size;
114
115         spin_lock_init(&q->lock);
116         spin_lock_init(&q->cleanup_lock);
117
118         q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
119         q->ndesc = n_desc;
120         q->buf_size = bufsize;
121         q->hw_idx = idx;
122
123         size = q->ndesc * sizeof(struct mt76_desc);
124         q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
125         if (!q->desc)
126                 return -ENOMEM;
127
128         size = q->ndesc * sizeof(*q->entry);
129         q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
130         if (!q->entry)
131                 return -ENOMEM;
132
133         mt76_dma_queue_reset(dev, q);
134
135         return 0;
136 }
137
138 static int
139 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
140                  struct mt76_queue_buf *buf, int nbufs, u32 info,
141                  struct sk_buff *skb, void *txwi)
142 {
143         struct mt76_queue_entry *entry;
144         struct mt76_desc *desc;
145         u32 ctrl;
146         int i, idx = -1;
147
148         if (txwi) {
149                 q->entry[q->head].txwi = DMA_DUMMY_DATA;
150                 q->entry[q->head].skip_buf0 = true;
151         }
152
153         for (i = 0; i < nbufs; i += 2, buf += 2) {
154                 u32 buf0 = buf[0].addr, buf1 = 0;
155
156                 idx = q->head;
157                 q->head = (q->head + 1) % q->ndesc;
158
159                 desc = &q->desc[idx];
160                 entry = &q->entry[idx];
161
162                 if (buf[0].skip_unmap)
163                         entry->skip_buf0 = true;
164                 entry->skip_buf1 = i == nbufs - 1;
165
166                 entry->dma_addr[0] = buf[0].addr;
167                 entry->dma_len[0] = buf[0].len;
168
169                 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
170                 if (i < nbufs - 1) {
171                         entry->dma_addr[1] = buf[1].addr;
172                         entry->dma_len[1] = buf[1].len;
173                         buf1 = buf[1].addr;
174                         ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
175                         if (buf[1].skip_unmap)
176                                 entry->skip_buf1 = true;
177                 }
178
179                 if (i == nbufs - 1)
180                         ctrl |= MT_DMA_CTL_LAST_SEC0;
181                 else if (i == nbufs - 2)
182                         ctrl |= MT_DMA_CTL_LAST_SEC1;
183
184                 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
185                 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
186                 WRITE_ONCE(desc->info, cpu_to_le32(info));
187                 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
188
189                 q->queued++;
190         }
191
192         q->entry[idx].txwi = txwi;
193         q->entry[idx].skb = skb;
194         q->entry[idx].wcid = 0xffff;
195
196         return idx;
197 }
198
199 static void
200 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
201                         struct mt76_queue_entry *prev_e)
202 {
203         struct mt76_queue_entry *e = &q->entry[idx];
204
205         if (!e->skip_buf0)
206                 dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
207                                  DMA_TO_DEVICE);
208
209         if (!e->skip_buf1)
210                 dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
211                                  DMA_TO_DEVICE);
212
213         if (e->txwi == DMA_DUMMY_DATA)
214                 e->txwi = NULL;
215
216         if (e->skb == DMA_DUMMY_DATA)
217                 e->skb = NULL;
218
219         *prev_e = *e;
220         memset(e, 0, sizeof(*e));
221 }
222
223 static void
224 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
225 {
226         wmb();
227         writel(q->head, &q->regs->cpu_idx);
228 }
229
230 static void
231 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
232 {
233         struct mt76_queue_entry entry;
234         int last;
235
236         if (!q || !q->ndesc)
237                 return;
238
239         spin_lock_bh(&q->cleanup_lock);
240         if (flush)
241                 last = -1;
242         else
243                 last = readl(&q->regs->dma_idx);
244
245         while (q->queued > 0 && q->tail != last) {
246                 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
247                 mt76_queue_tx_complete(dev, q, &entry);
248
249                 if (entry.txwi) {
250                         if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
251                                 mt76_put_txwi(dev, entry.txwi);
252                 }
253
254                 if (!flush && q->tail == last)
255                         last = readl(&q->regs->dma_idx);
256
257         }
258         spin_unlock_bh(&q->cleanup_lock);
259
260         if (flush) {
261                 spin_lock_bh(&q->lock);
262                 mt76_dma_sync_idx(dev, q);
263                 mt76_dma_kick_queue(dev, q);
264                 spin_unlock_bh(&q->lock);
265         }
266
267         if (!q->queued)
268                 wake_up(&dev->tx_wait);
269 }
270
271 static void *
272 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
273                  int *len, u32 *info, bool *more)
274 {
275         struct mt76_queue_entry *e = &q->entry[idx];
276         struct mt76_desc *desc = &q->desc[idx];
277         dma_addr_t buf_addr;
278         void *buf = e->buf;
279         int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
280
281         buf_addr = e->dma_addr[0];
282         if (len) {
283                 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
284                 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
285                 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
286         }
287
288         if (info)
289                 *info = le32_to_cpu(desc->info);
290
291         dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
292         e->buf = NULL;
293
294         return buf;
295 }
296
297 static void *
298 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
299                  int *len, u32 *info, bool *more)
300 {
301         int idx = q->tail;
302
303         *more = false;
304         if (!q->queued)
305                 return NULL;
306
307         if (flush)
308                 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
309         else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
310                 return NULL;
311
312         q->tail = (q->tail + 1) % q->ndesc;
313         q->queued--;
314
315         return mt76_dma_get_buf(dev, q, idx, len, info, more);
316 }
317
318 static int
319 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
320                           struct sk_buff *skb, u32 tx_info)
321 {
322         struct mt76_queue_buf buf = {};
323         dma_addr_t addr;
324
325         if (q->queued + 1 >= q->ndesc - 1)
326                 goto error;
327
328         addr = dma_map_single(dev->dev, skb->data, skb->len,
329                               DMA_TO_DEVICE);
330         if (unlikely(dma_mapping_error(dev->dev, addr)))
331                 goto error;
332
333         buf.addr = addr;
334         buf.len = skb->len;
335
336         spin_lock_bh(&q->lock);
337         mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
338         mt76_dma_kick_queue(dev, q);
339         spin_unlock_bh(&q->lock);
340
341         return 0;
342
343 error:
344         dev_kfree_skb(skb);
345         return -ENOMEM;
346 }
347
348 static int
349 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
350                       struct sk_buff *skb, struct mt76_wcid *wcid,
351                       struct ieee80211_sta *sta)
352 {
353         struct ieee80211_tx_status status = {
354                 .sta = sta,
355         };
356         struct mt76_tx_info tx_info = {
357                 .skb = skb,
358         };
359         struct ieee80211_hw *hw;
360         int len, n = 0, ret = -ENOMEM;
361         struct mt76_txwi_cache *t;
362         struct sk_buff *iter;
363         dma_addr_t addr;
364         u8 *txwi;
365
366         t = mt76_get_txwi(dev);
367         if (!t)
368                 goto free_skb;
369
370         txwi = mt76_get_txwi_ptr(dev, t);
371
372         skb->prev = skb->next = NULL;
373         if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
374                 mt76_insert_hdr_pad(skb);
375
376         len = skb_headlen(skb);
377         addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
378         if (unlikely(dma_mapping_error(dev->dev, addr)))
379                 goto free;
380
381         tx_info.buf[n].addr = t->dma_addr;
382         tx_info.buf[n++].len = dev->drv->txwi_size;
383         tx_info.buf[n].addr = addr;
384         tx_info.buf[n++].len = len;
385
386         skb_walk_frags(skb, iter) {
387                 if (n == ARRAY_SIZE(tx_info.buf))
388                         goto unmap;
389
390                 addr = dma_map_single(dev->dev, iter->data, iter->len,
391                                       DMA_TO_DEVICE);
392                 if (unlikely(dma_mapping_error(dev->dev, addr)))
393                         goto unmap;
394
395                 tx_info.buf[n].addr = addr;
396                 tx_info.buf[n++].len = iter->len;
397         }
398         tx_info.nbuf = n;
399
400         if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
401                 ret = -ENOMEM;
402                 goto unmap;
403         }
404
405         dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
406                                 DMA_TO_DEVICE);
407         ret = dev->drv->tx_prepare_skb(dev, txwi, q->qid, wcid, sta, &tx_info);
408         dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
409                                    DMA_TO_DEVICE);
410         if (ret < 0)
411                 goto unmap;
412
413         return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
414                                 tx_info.info, tx_info.skb, t);
415
416 unmap:
417         for (n--; n > 0; n--)
418                 dma_unmap_single(dev->dev, tx_info.buf[n].addr,
419                                  tx_info.buf[n].len, DMA_TO_DEVICE);
420
421 free:
422 #ifdef CONFIG_NL80211_TESTMODE
423         /* fix tx_done accounting on queue overflow */
424         if (mt76_is_testmode_skb(dev, skb, &hw)) {
425                 struct mt76_phy *phy = hw->priv;
426
427                 if (tx_info.skb == phy->test.tx_skb)
428                         phy->test.tx_done--;
429         }
430 #endif
431
432         mt76_put_txwi(dev, t);
433
434 free_skb:
435         status.skb = tx_info.skb;
436         hw = mt76_tx_status_get_hw(dev, tx_info.skb);
437         ieee80211_tx_status_ext(hw, &status);
438
439         return ret;
440 }
441
442 static int
443 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
444 {
445         dma_addr_t addr;
446         void *buf;
447         int frames = 0;
448         int len = SKB_WITH_OVERHEAD(q->buf_size);
449         int offset = q->buf_offset;
450
451         if (!q->ndesc)
452                 return 0;
453
454         spin_lock_bh(&q->lock);
455
456         while (q->queued < q->ndesc - 1) {
457                 struct mt76_queue_buf qbuf;
458
459                 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
460                 if (!buf)
461                         break;
462
463                 addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
464                 if (unlikely(dma_mapping_error(dev->dev, addr))) {
465                         skb_free_frag(buf);
466                         break;
467                 }
468
469                 qbuf.addr = addr + offset;
470                 qbuf.len = len - offset;
471                 qbuf.skip_unmap = false;
472                 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
473                 frames++;
474         }
475
476         if (frames)
477                 mt76_dma_kick_queue(dev, q);
478
479         spin_unlock_bh(&q->lock);
480
481         return frames;
482 }
483
484 static void
485 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
486 {
487         struct page *page;
488         void *buf;
489         bool more;
490
491         if (!q->ndesc)
492                 return;
493
494         spin_lock_bh(&q->lock);
495         do {
496                 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
497                 if (!buf)
498                         break;
499
500                 skb_free_frag(buf);
501         } while (1);
502         spin_unlock_bh(&q->lock);
503
504         if (!q->rx_page.va)
505                 return;
506
507         page = virt_to_page(q->rx_page.va);
508         __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
509         memset(&q->rx_page, 0, sizeof(q->rx_page));
510 }
511
512 static void
513 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
514 {
515         struct mt76_queue *q = &dev->q_rx[qid];
516         int i;
517
518         if (!q->ndesc)
519                 return;
520
521         for (i = 0; i < q->ndesc; i++)
522                 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
523
524         mt76_dma_rx_cleanup(dev, q);
525         mt76_dma_sync_idx(dev, q);
526         mt76_dma_rx_fill(dev, q);
527
528         if (!q->rx_head)
529                 return;
530
531         dev_kfree_skb(q->rx_head);
532         q->rx_head = NULL;
533 }
534
535 static void
536 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
537                   int len, bool more)
538 {
539         struct sk_buff *skb = q->rx_head;
540         struct skb_shared_info *shinfo = skb_shinfo(skb);
541         int nr_frags = shinfo->nr_frags;
542
543         if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
544                 struct page *page = virt_to_head_page(data);
545                 int offset = data - page_address(page) + q->buf_offset;
546
547                 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
548         } else {
549                 skb_free_frag(data);
550         }
551
552         if (more)
553                 return;
554
555         q->rx_head = NULL;
556         if (nr_frags < ARRAY_SIZE(shinfo->frags))
557                 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
558         else
559                 dev_kfree_skb(skb);
560 }
561
562 static int
563 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
564 {
565         int len, data_len, done = 0;
566         struct sk_buff *skb;
567         unsigned char *data;
568         bool more;
569
570         while (done < budget) {
571                 u32 info;
572
573                 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
574                 if (!data)
575                         break;
576
577                 if (q->rx_head)
578                         data_len = q->buf_size;
579                 else
580                         data_len = SKB_WITH_OVERHEAD(q->buf_size);
581
582                 if (data_len < len + q->buf_offset) {
583                         dev_kfree_skb(q->rx_head);
584                         q->rx_head = NULL;
585                         goto free_frag;
586                 }
587
588                 if (q->rx_head) {
589                         mt76_add_fragment(dev, q, data, len, more);
590                         continue;
591                 }
592
593                 if (!more && dev->drv->rx_check &&
594                     !(dev->drv->rx_check(dev, data, len)))
595                         goto free_frag;
596
597                 skb = build_skb(data, q->buf_size);
598                 if (!skb)
599                         goto free_frag;
600
601                 skb_reserve(skb, q->buf_offset);
602
603                 if (q == &dev->q_rx[MT_RXQ_MCU]) {
604                         u32 *rxfce = (u32 *)skb->cb;
605                         *rxfce = info;
606                 }
607
608                 __skb_put(skb, len);
609                 done++;
610
611                 if (more) {
612                         q->rx_head = skb;
613                         continue;
614                 }
615
616                 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
617                 continue;
618
619 free_frag:
620                 skb_free_frag(data);
621         }
622
623         mt76_dma_rx_fill(dev, q);
624         return done;
625 }
626
627 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
628 {
629         struct mt76_dev *dev;
630         int qid, done = 0, cur;
631
632         dev = container_of(napi->dev, struct mt76_dev, napi_dev);
633         qid = napi - dev->napi;
634
635         rcu_read_lock();
636
637         do {
638                 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
639                 mt76_rx_poll_complete(dev, qid, napi);
640                 done += cur;
641         } while (cur && done < budget);
642
643         rcu_read_unlock();
644
645         if (done < budget && napi_complete(napi))
646                 dev->drv->rx_poll_complete(dev, qid);
647
648         return done;
649 }
650 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
651
652 static int
653 mt76_dma_init(struct mt76_dev *dev,
654               int (*poll)(struct napi_struct *napi, int budget))
655 {
656         int i;
657
658         init_dummy_netdev(&dev->napi_dev);
659         init_dummy_netdev(&dev->tx_napi_dev);
660         snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
661                  wiphy_name(dev->hw->wiphy));
662         dev->napi_dev.threaded = 1;
663
664         mt76_for_each_q_rx(dev, i) {
665                 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
666                 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
667                 napi_enable(&dev->napi[i]);
668         }
669
670         return 0;
671 }
672
673 static const struct mt76_queue_ops mt76_dma_ops = {
674         .init = mt76_dma_init,
675         .alloc = mt76_dma_alloc_queue,
676         .reset_q = mt76_dma_queue_reset,
677         .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
678         .tx_queue_skb = mt76_dma_tx_queue_skb,
679         .tx_cleanup = mt76_dma_tx_cleanup,
680         .rx_cleanup = mt76_dma_rx_cleanup,
681         .rx_reset = mt76_dma_rx_reset,
682         .kick = mt76_dma_kick_queue,
683 };
684
685 void mt76_dma_attach(struct mt76_dev *dev)
686 {
687         dev->queue_ops = &mt76_dma_ops;
688 }
689 EXPORT_SYMBOL_GPL(mt76_dma_attach);
690
691 void mt76_dma_cleanup(struct mt76_dev *dev)
692 {
693         int i;
694
695         mt76_worker_disable(&dev->tx_worker);
696         netif_napi_del(&dev->tx_napi);
697
698         for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
699                 mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
700                 if (dev->phy2)
701                         mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
702         }
703
704         for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
705                 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
706
707         mt76_for_each_q_rx(dev, i) {
708                 netif_napi_del(&dev->napi[i]);
709                 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
710         }
711
712         mt76_free_pending_txwi(dev);
713 }
714 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);