1 // SPDX-License-Identifier: ISC
3 * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
6 #include <linux/module.h>
11 #define MT_VEND_REQ_MAX_RETRY 10
12 #define MT_VEND_REQ_TOUT_MS 300
14 static bool disable_usb_sg;
15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
18 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
19 u16 val, u16 offset, void *buf, size_t len)
21 struct usb_interface *uintf = to_usb_interface(dev->dev);
22 struct usb_device *udev = interface_to_usbdev(uintf);
26 lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
28 pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
29 : usb_sndctrlpipe(udev, 0);
30 for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
31 if (test_bit(MT76_REMOVED, &dev->phy.state))
34 ret = usb_control_msg(udev, pipe, req, req_type, val,
35 offset, buf, len, MT_VEND_REQ_TOUT_MS);
37 set_bit(MT76_REMOVED, &dev->phy.state);
38 if (ret >= 0 || ret == -ENODEV)
40 usleep_range(5000, 10000);
43 dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
47 EXPORT_SYMBOL_GPL(__mt76u_vendor_request);
49 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
50 u8 req_type, u16 val, u16 offset,
51 void *buf, size_t len)
55 mutex_lock(&dev->usb.usb_ctrl_mtx);
56 ret = __mt76u_vendor_request(dev, req, req_type,
57 val, offset, buf, len);
58 trace_usb_reg_wr(dev, offset, val);
59 mutex_unlock(&dev->usb.usb_ctrl_mtx);
63 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
65 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr)
67 struct mt76_usb *usb = &dev->usb;
71 ret = __mt76u_vendor_request(dev, req, req_type, addr >> 16,
72 addr, usb->data, sizeof(__le32));
73 if (ret == sizeof(__le32))
74 data = get_unaligned_le32(usb->data);
75 trace_usb_reg_rr(dev, addr, data);
79 EXPORT_SYMBOL_GPL(___mt76u_rr);
81 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
85 switch (addr & MT_VEND_TYPE_MASK) {
86 case MT_VEND_TYPE_EEPROM:
87 req = MT_VEND_READ_EEPROM;
89 case MT_VEND_TYPE_CFG:
90 req = MT_VEND_READ_CFG;
93 req = MT_VEND_MULTI_READ;
97 return ___mt76u_rr(dev, req, USB_DIR_IN | USB_TYPE_VENDOR,
98 addr & ~MT_VEND_TYPE_MASK);
101 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
105 mutex_lock(&dev->usb.usb_ctrl_mtx);
106 ret = __mt76u_rr(dev, addr);
107 mutex_unlock(&dev->usb.usb_ctrl_mtx);
112 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type,
115 struct mt76_usb *usb = &dev->usb;
117 put_unaligned_le32(val, usb->data);
118 __mt76u_vendor_request(dev, req, req_type, addr >> 16,
119 addr, usb->data, sizeof(__le32));
120 trace_usb_reg_wr(dev, addr, val);
122 EXPORT_SYMBOL_GPL(___mt76u_wr);
124 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
128 switch (addr & MT_VEND_TYPE_MASK) {
129 case MT_VEND_TYPE_CFG:
130 req = MT_VEND_WRITE_CFG;
133 req = MT_VEND_MULTI_WRITE;
136 ___mt76u_wr(dev, req, USB_DIR_OUT | USB_TYPE_VENDOR,
137 addr & ~MT_VEND_TYPE_MASK, val);
140 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
142 mutex_lock(&dev->usb.usb_ctrl_mtx);
143 __mt76u_wr(dev, addr, val);
144 mutex_unlock(&dev->usb.usb_ctrl_mtx);
147 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
150 mutex_lock(&dev->usb.usb_ctrl_mtx);
151 val |= __mt76u_rr(dev, addr) & ~mask;
152 __mt76u_wr(dev, addr, val);
153 mutex_unlock(&dev->usb.usb_ctrl_mtx);
158 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
159 const void *data, int len)
161 struct mt76_usb *usb = &dev->usb;
162 const u8 *val = data;
164 int current_batch_size;
167 /* Assure that always a multiple of 4 bytes are copied,
168 * otherwise beacons can be corrupted.
169 * See: "mt76: round up length on mt76_wr_copy"
170 * Commit 850e8f6fbd5d0003b0
172 len = round_up(len, 4);
174 mutex_lock(&usb->usb_ctrl_mtx);
176 current_batch_size = min_t(int, usb->data_len, len - i);
177 memcpy(usb->data, val + i, current_batch_size);
178 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
179 USB_DIR_OUT | USB_TYPE_VENDOR,
180 0, offset + i, usb->data,
185 i += current_batch_size;
187 mutex_unlock(&usb->usb_ctrl_mtx);
190 void mt76u_read_copy(struct mt76_dev *dev, u32 offset,
193 struct mt76_usb *usb = &dev->usb;
194 int i = 0, batch_len, ret;
197 len = round_up(len, 4);
198 mutex_lock(&usb->usb_ctrl_mtx);
200 batch_len = min_t(int, usb->data_len, len - i);
201 ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
202 USB_DIR_IN | USB_TYPE_VENDOR,
203 (offset + i) >> 16, offset + i,
204 usb->data, batch_len);
208 memcpy(val + i, usb->data, batch_len);
211 mutex_unlock(&usb->usb_ctrl_mtx);
213 EXPORT_SYMBOL_GPL(mt76u_read_copy);
215 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
216 const u16 offset, const u32 val)
218 mutex_lock(&dev->usb.usb_ctrl_mtx);
219 __mt76u_vendor_request(dev, req,
220 USB_DIR_OUT | USB_TYPE_VENDOR,
221 val & 0xffff, offset, NULL, 0);
222 __mt76u_vendor_request(dev, req,
223 USB_DIR_OUT | USB_TYPE_VENDOR,
224 val >> 16, offset + 2, NULL, 0);
225 mutex_unlock(&dev->usb.usb_ctrl_mtx);
227 EXPORT_SYMBOL_GPL(mt76u_single_wr);
230 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
231 const struct mt76_reg_pair *data, int len)
233 struct mt76_usb *usb = &dev->usb;
235 mutex_lock(&usb->usb_ctrl_mtx);
237 __mt76u_wr(dev, base + data->reg, data->value);
241 mutex_unlock(&usb->usb_ctrl_mtx);
247 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
248 const struct mt76_reg_pair *data, int n)
250 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
251 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
253 return mt76u_req_wr_rp(dev, base, data, n);
257 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
260 struct mt76_usb *usb = &dev->usb;
262 mutex_lock(&usb->usb_ctrl_mtx);
264 data->value = __mt76u_rr(dev, base + data->reg);
268 mutex_unlock(&usb->usb_ctrl_mtx);
274 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
275 struct mt76_reg_pair *data, int n)
277 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
278 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
280 return mt76u_req_rd_rp(dev, base, data, n);
283 static bool mt76u_check_sg(struct mt76_dev *dev)
285 struct usb_interface *uintf = to_usb_interface(dev->dev);
286 struct usb_device *udev = interface_to_usbdev(uintf);
288 return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
289 (udev->bus->no_sg_constraint ||
290 udev->speed == USB_SPEED_WIRELESS));
294 mt76u_set_endpoints(struct usb_interface *intf,
295 struct mt76_usb *usb)
297 struct usb_host_interface *intf_desc = intf->cur_altsetting;
298 struct usb_endpoint_descriptor *ep_desc;
299 int i, in_ep = 0, out_ep = 0;
301 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
302 ep_desc = &intf_desc->endpoint[i].desc;
304 if (usb_endpoint_is_bulk_in(ep_desc) &&
305 in_ep < __MT_EP_IN_MAX) {
306 usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
308 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
309 out_ep < __MT_EP_OUT_MAX) {
310 usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
315 if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
321 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
326 for (i = 0; i < nsgs; i++) {
331 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
335 page = virt_to_head_page(data);
336 offset = data - page_address(page);
337 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
343 for (j = nsgs; j < urb->num_sgs; j++)
344 skb_free_frag(sg_virt(&urb->sg[j]));
348 urb->num_sgs = max_t(int, i, urb->num_sgs);
349 urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
350 sg_init_marker(urb->sg, urb->num_sgs);
352 return i ? : -ENOMEM;
356 mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
357 struct urb *urb, int nsgs, gfp_t gfp)
359 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
361 if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
362 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
364 urb->transfer_buffer_length = q->buf_size;
365 urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
367 return urb->transfer_buffer ? 0 : -ENOMEM;
371 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
374 unsigned int size = sizeof(struct urb);
377 size += sg_max_size * sizeof(struct scatterlist);
379 e->urb = kzalloc(size, GFP_KERNEL);
383 usb_init_urb(e->urb);
385 if (dev->usb.sg_en && sg_max_size > 0)
386 e->urb->sg = (struct scatterlist *)(e->urb + 1);
392 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
393 struct mt76_queue_entry *e)
395 enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
398 sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
399 err = mt76u_urb_alloc(dev, e, sg_size);
403 return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
406 static void mt76u_urb_free(struct urb *urb)
410 for (i = 0; i < urb->num_sgs; i++)
411 skb_free_frag(sg_virt(&urb->sg[i]));
413 if (urb->transfer_buffer)
414 skb_free_frag(urb->transfer_buffer);
420 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
421 struct urb *urb, usb_complete_t complete_fn,
424 struct usb_interface *uintf = to_usb_interface(dev->dev);
425 struct usb_device *udev = interface_to_usbdev(uintf);
428 if (dir == USB_DIR_IN)
429 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
431 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
435 urb->complete = complete_fn;
436 urb->context = context;
440 mt76u_get_next_rx_entry(struct mt76_queue *q)
442 struct urb *urb = NULL;
445 spin_lock_irqsave(&q->lock, flags);
447 urb = q->entry[q->tail].urb;
448 q->tail = (q->tail + 1) % q->ndesc;
451 spin_unlock_irqrestore(&q->lock, flags);
457 mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
460 u16 dma_len, min_len;
462 dma_len = get_unaligned_le16(data);
463 if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
466 min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
467 if (data_len < min_len || !dma_len ||
468 dma_len + MT_DMA_HDR_LEN > data_len ||
474 static struct sk_buff *
475 mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
476 int len, int buf_size)
478 int head_room, drv_flags = dev->drv->drv_flags;
481 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
482 if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
485 /* slow path, not enough space for data and
488 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
492 skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
493 data += head_room + MT_SKB_HEAD_LEN;
494 page = virt_to_head_page(data);
495 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
496 page, data - page_address(page),
497 len - MT_SKB_HEAD_LEN, buf_size);
503 skb = build_skb(data, buf_size);
507 skb_reserve(skb, head_room);
514 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
517 u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
518 int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
519 int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
522 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
525 len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
529 head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
530 data_len = min_t(int, len, data_len - head_room);
532 if (len == data_len &&
533 dev->drv->rx_check && !dev->drv->rx_check(dev, data, data_len))
536 skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
541 while (len > 0 && nsgs < urb->num_sgs) {
542 data_len = min_t(int, len, urb->sg[nsgs].length);
543 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
544 sg_page(&urb->sg[nsgs]),
545 urb->sg[nsgs].offset, data_len,
550 dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
555 static void mt76u_complete_rx(struct urb *urb)
557 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
558 struct mt76_queue *q = urb->context;
561 trace_rx_urb(dev, urb);
563 switch (urb->status) {
570 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
577 spin_lock_irqsave(&q->lock, flags);
578 if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
581 q->head = (q->head + 1) % q->ndesc;
583 mt76_worker_schedule(&dev->usb.rx_worker);
585 spin_unlock_irqrestore(&q->lock, flags);
589 mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
592 int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
594 mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
595 mt76u_complete_rx, &dev->q_rx[qid]);
596 trace_submit_urb(dev, urb);
598 return usb_submit_urb(urb, GFP_ATOMIC);
602 mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
604 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
609 urb = mt76u_get_next_rx_entry(q);
613 count = mt76u_process_rx_entry(dev, urb, q->buf_size);
615 err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
619 mt76u_submit_rx_buf(dev, qid, urb);
621 if (qid == MT_RXQ_MAIN) {
623 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
628 static void mt76u_rx_worker(struct mt76_worker *w)
630 struct mt76_usb *usb = container_of(w, struct mt76_usb, rx_worker);
631 struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
635 mt76_for_each_q_rx(dev, i)
636 mt76u_process_rx_queue(dev, &dev->q_rx[i]);
641 mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
643 struct mt76_queue *q = &dev->q_rx[qid];
647 spin_lock_irqsave(&q->lock, flags);
648 for (i = 0; i < q->ndesc; i++) {
649 err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
653 q->head = q->tail = 0;
655 spin_unlock_irqrestore(&q->lock, flags);
661 mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
663 struct mt76_queue *q = &dev->q_rx[qid];
666 spin_lock_init(&q->lock);
667 q->entry = devm_kcalloc(dev->dev,
668 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
673 q->ndesc = MT_NUM_RX_ENTRIES;
674 q->buf_size = PAGE_SIZE;
676 for (i = 0; i < q->ndesc; i++) {
677 err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
682 return mt76u_submit_rx_buffers(dev, qid);
685 int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
687 return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
689 EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
692 mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
697 for (i = 0; i < q->ndesc; i++) {
698 if (!q->entry[i].urb)
701 mt76u_urb_free(q->entry[i].urb);
702 q->entry[i].urb = NULL;
708 page = virt_to_page(q->rx_page.va);
709 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
710 memset(&q->rx_page, 0, sizeof(q->rx_page));
713 static void mt76u_free_rx(struct mt76_dev *dev)
717 mt76_worker_teardown(&dev->usb.rx_worker);
719 mt76_for_each_q_rx(dev, i)
720 mt76u_free_rx_queue(dev, &dev->q_rx[i]);
723 void mt76u_stop_rx(struct mt76_dev *dev)
727 mt76_worker_disable(&dev->usb.rx_worker);
729 mt76_for_each_q_rx(dev, i) {
730 struct mt76_queue *q = &dev->q_rx[i];
733 for (j = 0; j < q->ndesc; j++)
734 usb_poison_urb(q->entry[j].urb);
737 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
739 int mt76u_resume_rx(struct mt76_dev *dev)
743 mt76_for_each_q_rx(dev, i) {
744 struct mt76_queue *q = &dev->q_rx[i];
747 for (j = 0; j < q->ndesc; j++)
748 usb_unpoison_urb(q->entry[j].urb);
750 err = mt76u_submit_rx_buffers(dev, i);
755 mt76_worker_enable(&dev->usb.rx_worker);
759 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
761 static void mt76u_status_worker(struct mt76_worker *w)
763 struct mt76_usb *usb = container_of(w, struct mt76_usb, status_worker);
764 struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
765 struct mt76_queue_entry entry;
766 struct mt76_queue *q;
769 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
770 q = dev->phy.q_tx[i];
774 while (q->queued > 0) {
775 if (!q->entry[q->tail].done)
778 entry = q->entry[q->tail];
779 q->entry[q->tail].done = false;
781 mt76_queue_tx_complete(dev, q, &entry);
785 wake_up(&dev->tx_wait);
787 mt76_worker_schedule(&dev->tx_worker);
789 if (dev->drv->tx_status_data &&
790 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
791 queue_work(dev->wq, &dev->usb.stat_work);
795 static void mt76u_tx_status_data(struct work_struct *work)
797 struct mt76_usb *usb;
798 struct mt76_dev *dev;
802 usb = container_of(work, struct mt76_usb, stat_work);
803 dev = container_of(usb, struct mt76_dev, usb);
806 if (test_bit(MT76_REMOVED, &dev->phy.state))
809 if (!dev->drv->tx_status_data(dev, &update))
814 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
815 queue_work(dev->wq, &usb->stat_work);
817 clear_bit(MT76_READING_STATS, &dev->phy.state);
820 static void mt76u_complete_tx(struct urb *urb)
822 struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
823 struct mt76_queue_entry *e = urb->context;
825 if (mt76u_urb_error(urb))
826 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
829 mt76_worker_schedule(&dev->usb.status_worker);
833 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
836 urb->transfer_buffer_length = skb->len;
838 if (!dev->usb.sg_en) {
839 urb->transfer_buffer = skb->data;
843 sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
844 urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
852 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
853 enum mt76_txq_id qid, struct sk_buff *skb,
854 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
856 struct mt76_tx_info tx_info = {
862 if (q->queued == q->ndesc)
865 skb->prev = skb->next = NULL;
866 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
870 err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
874 mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
875 q->entry[idx].urb, mt76u_complete_tx,
878 q->head = (q->head + 1) % q->ndesc;
879 q->entry[idx].skb = tx_info.skb;
880 q->entry[idx].wcid = 0xffff;
886 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
891 while (q->first != q->head) {
892 urb = q->entry[q->first].urb;
894 trace_submit_urb(dev, urb);
895 err = usb_submit_urb(urb, GFP_ATOMIC);
898 set_bit(MT76_REMOVED, &dev->phy.state);
900 dev_err(dev->dev, "tx urb submit failed:%d\n",
904 q->first = (q->first + 1) % q->ndesc;
908 static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
910 if (mt76_chip(dev) == 0x7663) {
911 static const u8 lmac_queue_map[] = {
912 /* ac to lmac mapping */
913 [IEEE80211_AC_BK] = 0,
914 [IEEE80211_AC_BE] = 1,
915 [IEEE80211_AC_VI] = 2,
916 [IEEE80211_AC_VO] = 4,
919 if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
922 return lmac_queue_map[ac];
925 return mt76_ac_to_hwq(ac);
928 static int mt76u_alloc_tx(struct mt76_dev *dev)
930 struct mt76_queue *q;
933 for (i = 0; i <= MT_TXQ_PSD; i++) {
934 if (i >= IEEE80211_NUM_ACS) {
935 dev->phy.q_tx[i] = dev->phy.q_tx[0];
939 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
943 spin_lock_init(&q->lock);
944 q->hw_idx = mt76u_ac_to_hwq(dev, i);
946 dev->phy.q_tx[i] = q;
948 q->entry = devm_kcalloc(dev->dev,
949 MT_NUM_TX_ENTRIES, sizeof(*q->entry),
954 q->ndesc = MT_NUM_TX_ENTRIES;
955 for (j = 0; j < q->ndesc; j++) {
956 err = mt76u_urb_alloc(dev, &q->entry[j],
965 static void mt76u_free_tx(struct mt76_dev *dev)
969 mt76_worker_teardown(&dev->usb.status_worker);
971 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
972 struct mt76_queue *q;
975 q = dev->phy.q_tx[i];
979 for (j = 0; j < q->ndesc; j++) {
980 usb_free_urb(q->entry[j].urb);
981 q->entry[j].urb = NULL;
986 void mt76u_stop_tx(struct mt76_dev *dev)
990 mt76_worker_disable(&dev->usb.status_worker);
992 ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
995 struct mt76_queue_entry entry;
996 struct mt76_queue *q;
999 dev_err(dev->dev, "timed out waiting for pending tx\n");
1001 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1002 q = dev->phy.q_tx[i];
1006 for (j = 0; j < q->ndesc; j++)
1007 usb_kill_urb(q->entry[j].urb);
1010 mt76_worker_disable(&dev->tx_worker);
1012 /* On device removal we maight queue skb's, but mt76u_tx_kick()
1013 * will fail to submit urb, cleanup those skb's manually.
1015 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
1016 q = dev->phy.q_tx[i];
1020 while (q->queued > 0) {
1021 entry = q->entry[q->tail];
1022 q->entry[q->tail].done = false;
1023 mt76_queue_tx_complete(dev, q, &entry);
1027 mt76_worker_enable(&dev->tx_worker);
1030 cancel_work_sync(&dev->usb.stat_work);
1031 clear_bit(MT76_READING_STATS, &dev->phy.state);
1033 mt76_worker_enable(&dev->usb.status_worker);
1035 mt76_tx_status_check(dev, true);
1037 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
1039 void mt76u_queues_deinit(struct mt76_dev *dev)
1047 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
1049 int mt76u_alloc_queues(struct mt76_dev *dev)
1053 err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
1057 return mt76u_alloc_tx(dev);
1059 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
1061 static const struct mt76_queue_ops usb_queue_ops = {
1062 .tx_queue_skb = mt76u_tx_queue_skb,
1063 .kick = mt76u_tx_kick,
1066 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
1067 struct mt76_bus_ops *ops)
1069 struct usb_device *udev = interface_to_usbdev(intf);
1070 struct mt76_usb *usb = &dev->usb;
1073 INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
1075 usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0));
1076 if (usb->data_len < 32)
1079 usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
1083 mutex_init(&usb->usb_ctrl_mtx);
1085 dev->queue_ops = &usb_queue_ops;
1087 dev_set_drvdata(&udev->dev, dev);
1089 usb->sg_en = mt76u_check_sg(dev);
1091 err = mt76u_set_endpoints(intf, usb);
1095 err = mt76_worker_setup(dev->hw, &usb->rx_worker, mt76u_rx_worker,
1100 err = mt76_worker_setup(dev->hw, &usb->status_worker,
1101 mt76u_status_worker, "usb-status");
1105 sched_set_fifo_low(usb->rx_worker.task);
1106 sched_set_fifo_low(usb->status_worker.task);
1110 EXPORT_SYMBOL_GPL(__mt76u_init);
1112 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf)
1114 static struct mt76_bus_ops bus_ops = {
1118 .read_copy = mt76u_read_copy,
1119 .write_copy = mt76u_copy,
1120 .wr_rp = mt76u_wr_rp,
1121 .rd_rp = mt76u_rd_rp,
1122 .type = MT76_BUS_USB,
1125 return __mt76u_init(dev, intf, &bus_ops);
1127 EXPORT_SYMBOL_GPL(mt76u_init);
1129 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
1130 MODULE_LICENSE("Dual BSD/GPL");