1 // SPDX-License-Identifier: GPL-2.0
3 * mtu3_qmu.c - Queue Management Unit driver for device controller
5 * Copyright (C) 2016 MediaTek Inc.
7 * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
11 * Queue Management Unit (QMU) is designed to unload SW effort
12 * to serve DMA interrupts.
13 * By preparing General Purpose Descriptor (GPD) and Buffer Descriptor (BD),
14 * SW links data buffers and triggers QMU to send / receive data to
15 * host / from device at a time.
16 * And now only GPD is supported.
18 * For more detailed information, please refer to QMU Programming Guide
21 #include <linux/dmapool.h>
22 #include <linux/iopoll.h>
25 #include "mtu3_trace.h"
27 #define QMU_CHECKSUM_LEN 16
29 #define GPD_FLAGS_HWO BIT(0)
30 #define GPD_FLAGS_BDP BIT(1)
31 #define GPD_FLAGS_BPS BIT(2)
32 #define GPD_FLAGS_ZLP BIT(6)
33 #define GPD_FLAGS_IOC BIT(7)
34 #define GET_GPD_HWO(gpd) (le32_to_cpu((gpd)->dw0_info) & GPD_FLAGS_HWO)
36 #define GPD_RX_BUF_LEN_OG(x) (((x) & 0xffff) << 16)
37 #define GPD_RX_BUF_LEN_EL(x) (((x) & 0xfffff) << 12)
38 #define GPD_RX_BUF_LEN(mtu, x) \
41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
44 #define GPD_DATA_LEN_OG(x) ((x) & 0xffff)
45 #define GPD_DATA_LEN_EL(x) ((x) & 0xfffff)
46 #define GPD_DATA_LEN(mtu, x) \
49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
52 #define GPD_EXT_FLAG_ZLP BIT(29)
53 #define GPD_EXT_NGP_OG(x) (((x) & 0xf) << 20)
54 #define GPD_EXT_BUF_OG(x) (((x) & 0xf) << 16)
55 #define GPD_EXT_NGP_EL(x) (((x) & 0xf) << 28)
56 #define GPD_EXT_BUF_EL(x) (((x) & 0xf) << 24)
57 #define GPD_EXT_NGP(mtu, x) \
60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
63 #define GPD_EXT_BUF(mtu, x) \
66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
69 #define HILO_GEN64(hi, lo) (((u64)(hi) << 32) + (lo))
70 #define HILO_DMA(hi, lo) \
71 ((dma_addr_t)HILO_GEN64((le32_to_cpu(hi)), (le32_to_cpu(lo))))
73 static dma_addr_t read_txq_cur_addr(void __iomem *mbase, u8 epnum)
78 txcpr = mtu3_readl(mbase, USB_QMU_TQCPR(epnum));
79 txhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
81 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(txhiar), txcpr);
84 static dma_addr_t read_rxq_cur_addr(void __iomem *mbase, u8 epnum)
89 rxcpr = mtu3_readl(mbase, USB_QMU_RQCPR(epnum));
90 rxhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
92 return HILO_DMA(QMU_CUR_GPD_ADDR_HI(rxhiar), rxcpr);
95 static void write_txq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
99 mtu3_writel(mbase, USB_QMU_TQSAR(epnum),
100 cpu_to_le32(lower_32_bits(dma)));
101 tqhiar = mtu3_readl(mbase, USB_QMU_TQHIAR(epnum));
102 tqhiar &= ~QMU_START_ADDR_HI_MSK;
103 tqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
104 mtu3_writel(mbase, USB_QMU_TQHIAR(epnum), tqhiar);
107 static void write_rxq_start_addr(void __iomem *mbase, u8 epnum, dma_addr_t dma)
111 mtu3_writel(mbase, USB_QMU_RQSAR(epnum),
112 cpu_to_le32(lower_32_bits(dma)));
113 rqhiar = mtu3_readl(mbase, USB_QMU_RQHIAR(epnum));
114 rqhiar &= ~QMU_START_ADDR_HI_MSK;
115 rqhiar |= QMU_START_ADDR_HI(upper_32_bits(dma));
116 mtu3_writel(mbase, USB_QMU_RQHIAR(epnum), rqhiar);
119 static struct qmu_gpd *gpd_dma_to_virt(struct mtu3_gpd_ring *ring,
122 dma_addr_t dma_base = ring->dma;
123 struct qmu_gpd *gpd_head = ring->start;
124 u32 offset = (dma_addr - dma_base) / sizeof(*gpd_head);
126 if (offset >= MAX_GPD_NUM)
129 return gpd_head + offset;
132 static dma_addr_t gpd_virt_to_dma(struct mtu3_gpd_ring *ring,
135 dma_addr_t dma_base = ring->dma;
136 struct qmu_gpd *gpd_head = ring->start;
139 offset = gpd - gpd_head;
140 if (offset >= MAX_GPD_NUM)
143 return dma_base + (offset * sizeof(*gpd));
146 static void gpd_ring_init(struct mtu3_gpd_ring *ring, struct qmu_gpd *gpd)
151 ring->end = gpd + MAX_GPD_NUM - 1;
154 static void reset_gpd_list(struct mtu3_ep *mep)
156 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
157 struct qmu_gpd *gpd = ring->start;
160 gpd->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
161 gpd_ring_init(ring, gpd);
165 int mtu3_gpd_ring_alloc(struct mtu3_ep *mep)
168 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
170 /* software own all gpds as default */
171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma);
175 gpd_ring_init(ring, gpd);
180 void mtu3_gpd_ring_free(struct mtu3_ep *mep)
182 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
184 dma_pool_free(mep->mtu->qmu_gpd_pool,
185 ring->start, ring->dma);
186 memset(ring, 0, sizeof(*ring));
189 void mtu3_qmu_resume(struct mtu3_ep *mep)
191 struct mtu3 *mtu = mep->mtu;
192 void __iomem *mbase = mtu->mac_base;
193 int epnum = mep->epnum;
196 offset = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
198 mtu3_writel(mbase, offset, QMU_Q_RESUME);
199 if (!(mtu3_readl(mbase, offset) & QMU_Q_ACTIVE))
200 mtu3_writel(mbase, offset, QMU_Q_RESUME);
203 static struct qmu_gpd *advance_enq_gpd(struct mtu3_gpd_ring *ring)
205 if (ring->enqueue < ring->end)
208 ring->enqueue = ring->start;
210 return ring->enqueue;
213 static struct qmu_gpd *advance_deq_gpd(struct mtu3_gpd_ring *ring)
215 if (ring->dequeue < ring->end)
218 ring->dequeue = ring->start;
220 return ring->dequeue;
223 /* check if a ring is emtpy */
224 static int gpd_ring_empty(struct mtu3_gpd_ring *ring)
226 struct qmu_gpd *enq = ring->enqueue;
227 struct qmu_gpd *next;
229 if (ring->enqueue < ring->end)
234 /* one gpd is reserved to simplify gpd preparation */
235 return next == ring->dequeue;
238 int mtu3_prepare_transfer(struct mtu3_ep *mep)
240 return gpd_ring_empty(&mep->gpd_ring);
243 static int mtu3_prepare_tx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
246 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
247 struct qmu_gpd *gpd = ring->enqueue;
248 struct usb_request *req = &mreq->request;
249 struct mtu3 *mtu = mep->mtu;
253 gpd->dw0_info = 0; /* SW own it */
254 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
255 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
256 gpd->dw3_info = cpu_to_le32(GPD_DATA_LEN(mtu, req->length));
258 /* get the next GPD */
259 enq = advance_enq_gpd(ring);
260 enq_dma = gpd_virt_to_dma(ring, enq);
261 dev_dbg(mep->mtu->dev, "TX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
262 mep->epnum, gpd, enq, &enq_dma);
264 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
265 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
266 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
267 gpd->dw0_info = cpu_to_le32(ext_addr);
271 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_ZLP);
273 gpd->dw3_info |= cpu_to_le32(GPD_EXT_FLAG_ZLP);
276 /* prevent reorder, make sure GPD's HWO is set last */
278 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
281 trace_mtu3_prepare_gpd(mep, gpd);
286 static int mtu3_prepare_rx_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
289 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
290 struct qmu_gpd *gpd = ring->enqueue;
291 struct usb_request *req = &mreq->request;
292 struct mtu3 *mtu = mep->mtu;
296 gpd->dw0_info = 0; /* SW own it */
297 gpd->buffer = cpu_to_le32(lower_32_bits(req->dma));
298 ext_addr = GPD_EXT_BUF(mtu, upper_32_bits(req->dma));
299 gpd->dw0_info = cpu_to_le32(GPD_RX_BUF_LEN(mtu, req->length));
301 /* get the next GPD */
302 enq = advance_enq_gpd(ring);
303 enq_dma = gpd_virt_to_dma(ring, enq);
304 dev_dbg(mep->mtu->dev, "RX-EP%d queue gpd=%p, enq=%p, qdma=%pad\n",
305 mep->epnum, gpd, enq, &enq_dma);
307 enq->dw0_info &= cpu_to_le32(~GPD_FLAGS_HWO);
308 gpd->next_gpd = cpu_to_le32(lower_32_bits(enq_dma));
309 ext_addr |= GPD_EXT_NGP(mtu, upper_32_bits(enq_dma));
310 gpd->dw3_info = cpu_to_le32(ext_addr);
311 /* prevent reorder, make sure GPD's HWO is set last */
313 gpd->dw0_info |= cpu_to_le32(GPD_FLAGS_IOC | GPD_FLAGS_HWO);
316 trace_mtu3_prepare_gpd(mep, gpd);
321 void mtu3_insert_gpd(struct mtu3_ep *mep, struct mtu3_request *mreq)
325 mtu3_prepare_tx_gpd(mep, mreq);
327 mtu3_prepare_rx_gpd(mep, mreq);
330 int mtu3_qmu_start(struct mtu3_ep *mep)
332 struct mtu3 *mtu = mep->mtu;
333 void __iomem *mbase = mtu->mac_base;
334 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
335 u8 epnum = mep->epnum;
338 /* set QMU start address */
339 write_txq_start_addr(mbase, epnum, ring->dma);
340 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_DMAREQEN);
341 /* send zero length packet according to ZLP flag in GPD */
342 mtu3_setbits(mbase, U3D_QCR1, QMU_TX_ZLP(epnum));
343 mtu3_writel(mbase, U3D_TQERRIESR0,
344 QMU_TX_LEN_ERR(epnum) | QMU_TX_CS_ERR(epnum));
346 if (mtu3_readl(mbase, USB_QMU_TQCSR(epnum)) & QMU_Q_ACTIVE) {
347 dev_warn(mtu->dev, "Tx %d Active Now!\n", epnum);
350 mtu3_writel(mbase, USB_QMU_TQCSR(epnum), QMU_Q_START);
353 write_rxq_start_addr(mbase, epnum, ring->dma);
354 mtu3_setbits(mbase, MU3D_EP_RXCR0(epnum), RX_DMAREQEN);
355 /* don't expect ZLP */
356 mtu3_clrbits(mbase, U3D_QCR3, QMU_RX_ZLP(epnum));
357 /* move to next GPD when receive ZLP */
358 mtu3_setbits(mbase, U3D_QCR3, QMU_RX_COZ(epnum));
359 mtu3_writel(mbase, U3D_RQERRIESR0,
360 QMU_RX_LEN_ERR(epnum) | QMU_RX_CS_ERR(epnum));
361 mtu3_writel(mbase, U3D_RQERRIESR1, QMU_RX_ZLP_ERR(epnum));
363 if (mtu3_readl(mbase, USB_QMU_RQCSR(epnum)) & QMU_Q_ACTIVE) {
364 dev_warn(mtu->dev, "Rx %d Active Now!\n", epnum);
367 mtu3_writel(mbase, USB_QMU_RQCSR(epnum), QMU_Q_START);
373 /* may called in atomic context */
374 void mtu3_qmu_stop(struct mtu3_ep *mep)
376 struct mtu3 *mtu = mep->mtu;
377 void __iomem *mbase = mtu->mac_base;
378 int epnum = mep->epnum;
383 qcsr = mep->is_in ? USB_QMU_TQCSR(epnum) : USB_QMU_RQCSR(epnum);
385 if (!(mtu3_readl(mbase, qcsr) & QMU_Q_ACTIVE)) {
386 dev_dbg(mtu->dev, "%s's qmu is inactive now!\n", mep->name);
389 mtu3_writel(mbase, qcsr, QMU_Q_STOP);
392 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
394 ret = readl_poll_timeout_atomic(mbase + qcsr, value,
395 !(value & QMU_Q_ACTIVE), 1, 1000);
397 dev_err(mtu->dev, "stop %s's qmu failed\n", mep->name);
401 /* flush fifo again to make sure the fifo is empty */
403 mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
405 dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
408 void mtu3_qmu_flush(struct mtu3_ep *mep)
411 dev_dbg(mep->mtu->dev, "%s flush QMU %s\n", __func__,
412 ((mep->is_in) ? "TX" : "RX"));
420 * QMU can't transfer zero length packet directly (a hardware limit
421 * on old SoCs), so when needs to send ZLP, we intentionally trigger
422 * a length error interrupt, and in the ISR sends a ZLP by BMU.
424 static void qmu_tx_zlp_error_handler(struct mtu3 *mtu, u8 epnum)
426 struct mtu3_ep *mep = mtu->in_eps + epnum;
427 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
428 void __iomem *mbase = mtu->mac_base;
429 struct qmu_gpd *gpd_current = NULL;
430 struct mtu3_request *mreq;
431 dma_addr_t cur_gpd_dma;
435 mreq = next_request(mep);
436 if (mreq && mreq->request.length != 0)
439 cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
440 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
442 if (GPD_DATA_LEN(mtu, le32_to_cpu(gpd_current->dw3_info)) != 0) {
443 dev_err(mtu->dev, "TX EP%d buffer length error(!=0)\n", epnum);
447 dev_dbg(mtu->dev, "%s send ZLP for req=%p\n", __func__, mreq);
448 trace_mtu3_zlp_exp_gpd(mep, gpd_current);
450 mtu3_clrbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
452 ret = readl_poll_timeout_atomic(mbase + MU3D_EP_TXCR0(mep->epnum),
453 txcsr, !(txcsr & TX_FIFOFULL), 1, 1000);
455 dev_err(mtu->dev, "%s wait for fifo empty fail\n", __func__);
458 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_TXPKTRDY);
459 /* prevent reorder, make sure GPD's HWO is set last */
461 /* by pass the current GDP */
462 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
464 /*enable DMAREQEN, switch back to QMU mode */
465 mtu3_setbits(mbase, MU3D_EP_TXCR0(mep->epnum), TX_DMAREQEN);
466 mtu3_qmu_resume(mep);
470 * when rx error happens (except zlperr), QMU will stop, and RQCPR saves
471 * the GPD encountered error, Done irq will arise after resuming QMU again.
473 static void qmu_error_rx(struct mtu3 *mtu, u8 epnum)
475 struct mtu3_ep *mep = mtu->out_eps + epnum;
476 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
477 struct qmu_gpd *gpd_current = NULL;
478 struct mtu3_request *mreq;
479 dma_addr_t cur_gpd_dma;
481 cur_gpd_dma = read_rxq_cur_addr(mtu->mac_base, epnum);
482 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
484 mreq = next_request(mep);
485 if (!mreq || mreq->gpd != gpd_current) {
486 dev_err(mtu->dev, "no correct RX req is found\n");
490 mreq->request.status = -EAGAIN;
492 /* by pass the current GDP */
493 gpd_current->dw0_info |= cpu_to_le32(GPD_FLAGS_BPS | GPD_FLAGS_HWO);
494 mtu3_qmu_resume(mep);
496 dev_dbg(mtu->dev, "%s EP%d, current=%p, req=%p\n",
497 __func__, epnum, gpd_current, mreq);
501 * NOTE: request list maybe is already empty as following case:
502 * queue_tx --> qmu_interrupt(clear interrupt pending, schedule tasklet)-->
503 * queue_tx --> process_tasklet(meanwhile, the second one is transferred,
504 * tasklet process both of them)-->qmu_interrupt for second one.
505 * To avoid upper case, put qmu_done_tx in ISR directly to process it.
507 static void qmu_done_tx(struct mtu3 *mtu, u8 epnum)
509 struct mtu3_ep *mep = mtu->in_eps + epnum;
510 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
511 void __iomem *mbase = mtu->mac_base;
512 struct qmu_gpd *gpd = ring->dequeue;
513 struct qmu_gpd *gpd_current = NULL;
514 struct usb_request *request = NULL;
515 struct mtu3_request *mreq;
516 dma_addr_t cur_gpd_dma;
518 /*transfer phy address got from QMU register to virtual address */
519 cur_gpd_dma = read_txq_cur_addr(mbase, epnum);
520 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
522 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
523 __func__, epnum, gpd, gpd_current, ring->enqueue);
525 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
527 mreq = next_request(mep);
529 if (mreq == NULL || mreq->gpd != gpd) {
530 dev_err(mtu->dev, "no correct TX req is found\n");
534 request = &mreq->request;
535 request->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
536 trace_mtu3_complete_gpd(mep, gpd);
537 mtu3_req_complete(mep, request, 0);
539 gpd = advance_deq_gpd(ring);
542 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
543 __func__, epnum, ring->dequeue, ring->enqueue);
547 static void qmu_done_rx(struct mtu3 *mtu, u8 epnum)
549 struct mtu3_ep *mep = mtu->out_eps + epnum;
550 struct mtu3_gpd_ring *ring = &mep->gpd_ring;
551 void __iomem *mbase = mtu->mac_base;
552 struct qmu_gpd *gpd = ring->dequeue;
553 struct qmu_gpd *gpd_current = NULL;
554 struct usb_request *req = NULL;
555 struct mtu3_request *mreq;
556 dma_addr_t cur_gpd_dma;
558 cur_gpd_dma = read_rxq_cur_addr(mbase, epnum);
559 gpd_current = gpd_dma_to_virt(ring, cur_gpd_dma);
561 dev_dbg(mtu->dev, "%s EP%d, last=%p, current=%p, enq=%p\n",
562 __func__, epnum, gpd, gpd_current, ring->enqueue);
564 while (gpd != gpd_current && !GET_GPD_HWO(gpd)) {
566 mreq = next_request(mep);
568 if (mreq == NULL || mreq->gpd != gpd) {
569 dev_err(mtu->dev, "no correct RX req is found\n");
572 req = &mreq->request;
574 req->actual = GPD_DATA_LEN(mtu, le32_to_cpu(gpd->dw3_info));
575 trace_mtu3_complete_gpd(mep, gpd);
576 mtu3_req_complete(mep, req, 0);
578 gpd = advance_deq_gpd(ring);
581 dev_dbg(mtu->dev, "%s EP%d, deq=%p, enq=%p, complete\n",
582 __func__, epnum, ring->dequeue, ring->enqueue);
585 static void qmu_done_isr(struct mtu3 *mtu, u32 done_status)
589 for (i = 1; i < mtu->num_eps; i++) {
590 if (done_status & QMU_RX_DONE_INT(i))
592 if (done_status & QMU_TX_DONE_INT(i))
597 static void qmu_exception_isr(struct mtu3 *mtu, u32 qmu_status)
599 void __iomem *mbase = mtu->mac_base;
603 if ((qmu_status & RXQ_CSERR_INT) || (qmu_status & RXQ_LENERR_INT)) {
604 errval = mtu3_readl(mbase, U3D_RQERRIR0);
605 mtu3_writel(mbase, U3D_RQERRIR0, errval);
607 for (i = 1; i < mtu->num_eps; i++) {
608 if (errval & QMU_RX_CS_ERR(i))
609 dev_err(mtu->dev, "Rx %d CS error!\n", i);
611 if (errval & QMU_RX_LEN_ERR(i))
612 dev_err(mtu->dev, "RX %d Length error\n", i);
614 if (errval & (QMU_RX_CS_ERR(i) | QMU_RX_LEN_ERR(i)))
615 qmu_error_rx(mtu, i);
619 if (qmu_status & RXQ_ZLPERR_INT) {
620 errval = mtu3_readl(mbase, U3D_RQERRIR1);
621 for (i = 1; i < mtu->num_eps; i++) {
622 if (errval & QMU_RX_ZLP_ERR(i))
623 dev_dbg(mtu->dev, "RX EP%d Recv ZLP\n", i);
625 mtu3_writel(mbase, U3D_RQERRIR1, errval);
628 if ((qmu_status & TXQ_CSERR_INT) || (qmu_status & TXQ_LENERR_INT)) {
629 errval = mtu3_readl(mbase, U3D_TQERRIR0);
630 for (i = 1; i < mtu->num_eps; i++) {
631 if (errval & QMU_TX_CS_ERR(i))
632 dev_err(mtu->dev, "Tx %d checksum error!\n", i);
634 if (errval & QMU_TX_LEN_ERR(i))
635 qmu_tx_zlp_error_handler(mtu, i);
637 mtu3_writel(mbase, U3D_TQERRIR0, errval);
641 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu)
643 void __iomem *mbase = mtu->mac_base;
647 /* U3D_QISAR1 is read update */
648 qmu_status = mtu3_readl(mbase, U3D_QISAR1);
649 qmu_status &= mtu3_readl(mbase, U3D_QIER1);
651 qmu_done_status = mtu3_readl(mbase, U3D_QISAR0);
652 qmu_done_status &= mtu3_readl(mbase, U3D_QIER0);
653 mtu3_writel(mbase, U3D_QISAR0, qmu_done_status); /* W1C */
654 dev_dbg(mtu->dev, "=== QMUdone[tx=%x, rx=%x] QMUexp[%x] ===\n",
655 (qmu_done_status & 0xFFFF), qmu_done_status >> 16,
657 trace_mtu3_qmu_isr(qmu_done_status, qmu_status);
660 qmu_done_isr(mtu, qmu_done_status);
663 qmu_exception_isr(mtu, qmu_status);
668 int mtu3_qmu_init(struct mtu3 *mtu)
671 compiletime_assert(QMU_GPD_SIZE == 16, "QMU_GPD size SHOULD be 16B");
673 mtu->qmu_gpd_pool = dma_pool_create("QMU_GPD", mtu->dev,
674 QMU_GPD_RING_SIZE, QMU_GPD_SIZE, 0);
676 if (!mtu->qmu_gpd_pool)
682 void mtu3_qmu_exit(struct mtu3 *mtu)
684 dma_pool_destroy(mtu->qmu_gpd_pool);