1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
34 #include "dwc_otg_pcd.h"
37 #include "dwc_otg_cfi.h"
40 #ifndef __maybe_unused
41 #define __maybe_unused __attribute__((unused))
44 //#define PRINT_CFI_DMA_DESCS
51 * This function updates OTG.
53 static void dwc_otg_pcd_update_otg(dwc_otg_pcd_t * pcd, const unsigned reset)
57 pcd->b_hnp_enable = 0;
58 pcd->a_hnp_support = 0;
59 pcd->a_alt_hnp_support = 0;
62 if (pcd->fops->hnp_changed) {
63 pcd->fops->hnp_changed(pcd);
68 * This file contains the implementation of the PCD Interrupt handlers.
70 * The PCD handles the device interrupts. Many conditions can cause a
71 * device interrupt. When an interrupt occurs, the device interrupt
72 * service routine determines the cause of the interrupt and
73 * dispatches handling to the appropriate function. These interrupt
74 * handling functions are described below.
75 * All interrupt registers are processed from LSB to MSB.
79 * This function prints the ep0 state for debug purposes.
81 static inline void print_ep0_state(dwc_otg_pcd_t * pcd)
86 switch (pcd->ep0state) {
88 dwc_strcpy(str, "EP0_DISCONNECT");
91 dwc_strcpy(str, "EP0_IDLE");
93 case EP0_IN_DATA_PHASE:
94 dwc_strcpy(str, "EP0_IN_DATA_PHASE");
96 case EP0_OUT_DATA_PHASE:
97 dwc_strcpy(str, "EP0_OUT_DATA_PHASE");
99 case EP0_IN_STATUS_PHASE:
100 dwc_strcpy(str, "EP0_IN_STATUS_PHASE");
102 case EP0_OUT_STATUS_PHASE:
103 dwc_strcpy(str, "EP0_OUT_STATUS_PHASE");
106 dwc_strcpy(str, "EP0_STALL");
109 dwc_strcpy(str, "EP0_INVALID");
112 DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
117 static inline void print_desc(struct dwc_otg_dma_desc *ddesc,
118 const uint8_t * epname, int descnum)
121 ("%s DMA_DESC(%d) buf=0x%08x bytes=0x%04x; sp=0x%x; l=0x%x; sts=0x%02x; bs=0x%02x\n",
122 epname, descnum, ddesc->buf, ddesc->status.b.bytes,
123 ddesc->status.b.sp, ddesc->status.b.l, ddesc->status.b.sts,
129 * This function returns pointer to in ep struct with number ep_num
131 static inline dwc_otg_pcd_ep_t *get_in_ep(dwc_otg_pcd_t * pcd, uint32_t ep_num)
134 int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
138 for (i = 0; i < num_in_eps; ++i) {
139 if (pcd->in_ep[i].dwc_ep.num == ep_num)
140 return &pcd->in_ep[i];
147 * This function returns pointer to out ep struct with number ep_num
149 static inline dwc_otg_pcd_ep_t *get_out_ep(dwc_otg_pcd_t * pcd, uint32_t ep_num)
152 int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
156 for (i = 0; i < num_out_eps; ++i) {
157 if (pcd->out_ep[i].dwc_ep.num == ep_num)
158 return &pcd->out_ep[i];
165 * This functions gets a pointer to an EP from the wIndex address
166 * value of the control request.
168 dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t * pcd, u16 wIndex)
170 dwc_otg_pcd_ep_t *ep;
171 uint32_t ep_num = UE_GET_ADDR(wIndex);
175 } else if (UE_GET_DIR(wIndex) == UE_DIR_IN) { /* in ep */
176 ep = &pcd->in_ep[ep_num - 1];
178 ep = &pcd->out_ep[ep_num - 1];
185 * This function checks the EP request queue, if the queue is not
186 * empty the next request is started.
188 void start_next_request(dwc_otg_pcd_ep_t * ep)
190 dwc_otg_pcd_request_t *req = 0;
191 uint32_t max_transfer =
192 GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
195 struct dwc_otg_pcd *pcd;
199 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
200 req = DWC_CIRCLEQ_FIRST(&ep->queue);
203 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
204 ep->dwc_ep.cfi_req_len = req->length;
205 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd, ep, req);
208 /* Setup and start the Transfer */
209 ep->dwc_ep.dma_addr = req->dma;
210 ep->dwc_ep.start_xfer_buff = req->buf;
211 ep->dwc_ep.xfer_buff = req->buf;
212 ep->dwc_ep.sent_zlp = 0;
213 ep->dwc_ep.total_len = req->length;
214 ep->dwc_ep.xfer_len = 0;
215 ep->dwc_ep.xfer_count = 0;
217 ep->dwc_ep.maxxfer = max_transfer;
218 if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
219 uint32_t out_max_xfer = DDMA_MAX_TRANSFER_SIZE
220 - (DDMA_MAX_TRANSFER_SIZE % 4);
221 if (ep->dwc_ep.is_in) {
222 if (ep->dwc_ep.maxxfer >
223 DDMA_MAX_TRANSFER_SIZE) {
225 DDMA_MAX_TRANSFER_SIZE;
228 if (ep->dwc_ep.maxxfer > out_max_xfer) {
234 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
235 ep->dwc_ep.maxxfer -=
236 (ep->dwc_ep.maxxfer % ep->dwc_ep.maxpacket);
239 if ((ep->dwc_ep.total_len %
240 ep->dwc_ep.maxpacket == 0)
241 && (ep->dwc_ep.total_len != 0)) {
242 ep->dwc_ep.sent_zlp = 1;
249 dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
254 * This function handles the SOF Interrupts. At this time the SOF
255 * Interrupt is disabled.
257 int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t * pcd)
259 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
261 gintsts_data_t gintsts;
263 DWC_DEBUGPL(DBG_PCD, "SOF\n");
265 /* Clear interrupt */
267 gintsts.b.sofintr = 1;
268 dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
274 * This function handles the Rx Status Queue Level Interrupt, which
275 * indicates that there is a least one packet in the Rx FIFO. The
276 * packets are moved from the FIFO to memory, where they will be
277 * processed when the Endpoint Interrupt Register indicates Transfer
278 * Complete or SETUP Phase Done.
280 * Repeat the following until the Rx Status Queue is empty:
281 * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
283 * -# If Receive FIFO is empty then skip to step Clear the interrupt
285 * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
286 * SETUP data to the buffer
287 * -# If OUT Data Packet call dwc_otg_read_packet to copy the data
288 * to the destination buffer
290 int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t * pcd)
292 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
293 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
294 gintmsk_data_t gintmask = {.d32 = 0 };
295 device_grxsts_data_t status;
296 dwc_otg_pcd_ep_t *ep;
297 gintsts_data_t gintsts;
299 static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
302 //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
303 /* Disable the Rx Status Queue Level interrupt */
304 gintmask.b.rxstsqlvl = 1;
305 dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0);
307 /* Get the Status from the top of the FIFO */
308 status.d32 = dwc_read_reg32(&global_regs->grxstsp);
310 DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
311 "pktsts:%x Frame:%d(0x%0x)\n",
312 status.b.epnum, status.b.bcnt,
313 dpid_str[status.b.dpid],
314 status.b.pktsts, status.b.fn, status.b.fn);
315 /* Get pointer to EP structure */
316 ep = get_out_ep(pcd, status.b.epnum);
318 switch (status.b.pktsts) {
319 case DWC_DSTS_GOUT_NAK:
320 DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
322 case DWC_STS_DATA_UPDT:
323 DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
324 if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
325 /** @todo NGS Check for buffer overflow? */
326 dwc_otg_read_packet(core_if,
327 ep->dwc_ep.xfer_buff,
329 ep->dwc_ep.xfer_count += status.b.bcnt;
330 ep->dwc_ep.xfer_buff += status.b.bcnt;
333 case DWC_STS_XFER_COMP:
334 DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
336 case DWC_DSTS_SETUP_COMP:
338 DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
341 case DWC_DSTS_SETUP_UPDT:
342 dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
345 "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
346 pcd->setup_pkt->req.bmRequestType,
347 pcd->setup_pkt->req.bRequest,
348 UGETW(pcd->setup_pkt->req.wValue),
349 UGETW(pcd->setup_pkt->req.wIndex),
350 UGETW(pcd->setup_pkt->req.wLength));
352 ep->dwc_ep.xfer_count += status.b.bcnt;
355 DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
360 /* Enable the Rx Status Queue Level interrupt */
361 dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32);
362 /* Clear interrupt */
364 gintsts.b.rxstsqlvl = 1;
365 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
367 //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__);
372 * This function examines the Device IN Token Learning Queue to
373 * determine the EP number of the last IN token received. This
374 * implementation is for the Mass Storage device where there are only
375 * 2 IN EPs (Control-IN and BULK-IN).
377 * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
378 * are 8 EP Numbers in each of the other possible DTKNQ Registers.
380 * @param core_if Programming view of DWC_otg controller.
383 static inline int get_ep_of_last_in_token(dwc_otg_core_if_t * core_if)
385 dwc_otg_device_global_regs_t *dev_global_regs =
386 core_if->dev_if->dev_global_regs;
387 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
388 /* Number of Token Queue Registers */
389 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
390 dtknq1_data_t dtknqr1;
391 uint32_t in_tkn_epnums[4];
394 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
397 //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH);
399 /* Read the DTKNQ Registers */
400 for (i = 0; i < DTKNQ_REG_CNT; i++) {
401 in_tkn_epnums[i] = dwc_read_reg32(addr);
402 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
404 if (addr == &dev_global_regs->dvbusdis) {
405 addr = &dev_global_regs->dtknqr3_dthrctl;
412 /* Copy the DTKNQR1 data to the bit field. */
413 dtknqr1.d32 = in_tkn_epnums[0];
414 /* Get the EP numbers */
415 in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
416 ndx = dtknqr1.b.intknwptr - 1;
418 //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx);
420 /** @todo Find a simpler way to calculate the max
422 int cnt = TOKEN_Q_DEPTH;
423 if (TOKEN_Q_DEPTH <= 6) {
424 cnt = TOKEN_Q_DEPTH - 1;
425 } else if (TOKEN_Q_DEPTH <= 14) {
426 cnt = TOKEN_Q_DEPTH - 7;
427 } else if (TOKEN_Q_DEPTH <= 22) {
428 cnt = TOKEN_Q_DEPTH - 15;
430 cnt = TOKEN_Q_DEPTH - 23;
432 epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
435 epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
436 } else if (ndx <= 13) {
438 epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
439 } else if (ndx <= 21) {
441 epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
442 } else if (ndx <= 29) {
444 epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
447 //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum);
452 * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
453 * The active request is checked for the next packet to be loaded into
454 * the non-periodic Tx FIFO.
456 int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t * pcd)
458 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
459 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
460 gnptxsts_data_t txstatus = {.d32 = 0 };
461 gintsts_data_t gintsts;
464 dwc_otg_pcd_ep_t *ep = 0;
468 /* Get the epnum from the IN Token Learning Queue. */
469 epnum = get_ep_of_last_in_token(core_if);
470 ep = get_in_ep(pcd, epnum);
472 DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %d \n", epnum);
474 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
475 if (len > ep->dwc_ep.maxpacket) {
476 len = ep->dwc_ep.maxpacket;
478 dwords = (len + 3) / 4;
480 /* While there is space in the queue and space in the FIFO and
481 * More data to tranfer, Write packets to the Tx FIFO */
482 txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
483 DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
485 while (txstatus.b.nptxqspcavail > 0 &&
486 txstatus.b.nptxfspcavail > dwords &&
487 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
489 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
490 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
492 if (len > ep->dwc_ep.maxpacket) {
493 len = ep->dwc_ep.maxpacket;
496 dwords = (len + 3) / 4;
497 txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
498 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
501 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
502 dwc_read_reg32(&global_regs->gnptxsts));
504 /* Clear interrupt */
506 gintsts.b.nptxfempty = 1;
507 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
513 * This function is called when dedicated Tx FIFO Empty interrupt occurs.
514 * The active request is checked for the next packet to be loaded into
515 * apropriate Tx FIFO.
517 static int32_t write_empty_tx_fifo(dwc_otg_pcd_t * pcd, uint32_t epnum)
519 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
520 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
521 dtxfsts_data_t txstatus = {.d32 = 0 };
522 dwc_otg_pcd_ep_t *ep = 0;
526 ep = get_in_ep(pcd, epnum);
528 DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
530 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
532 if (len > ep->dwc_ep.maxpacket) {
533 len = ep->dwc_ep.maxpacket;
536 dwords = (len + 3) / 4;
538 /* While there is space in the queue and space in the FIFO and
539 * More data to tranfer, Write packets to the Tx FIFO */
540 txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
541 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
543 while (txstatus.b.txfspcavail > dwords &&
544 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len &&
545 ep->dwc_ep.xfer_len != 0) {
547 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
549 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
550 if (len > ep->dwc_ep.maxpacket) {
551 len = ep->dwc_ep.maxpacket;
554 dwords = (len + 3) / 4;
556 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
557 DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
561 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
562 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts));
568 * This function is called when the Device is disconnected. It stops
569 * any active requests and informs the Gadget driver of the
572 void dwc_otg_pcd_stop(dwc_otg_pcd_t * pcd)
574 int i, num_in_eps, num_out_eps;
575 dwc_otg_pcd_ep_t *ep;
578 gintmsk_data_t intr_mask = {.d32 = 0 };
580 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
582 num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
583 num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
585 DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
586 /* don't disconnect drivers more than once */
587 if (pcd->ep0state == EP0_DISCONNECT) {
588 DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
591 pcd->ep0state = EP0_DISCONNECT;
593 /* Reset the OTG state. */
594 dwc_otg_pcd_update_otg(pcd, 1);
596 /* Disable the NP Tx Fifo Empty Interrupt. */
597 intr_mask.b.nptxfempty = 1;
598 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
601 /* Flush the FIFOs */
602 /**@todo NGS Flush Periodic FIFOs */
603 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
604 dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd));
606 /* prevent new request submissions, kill any outstanding requests */
608 dwc_otg_request_nuke(ep);
609 /* prevent new request submissions, kill any outstanding requests */
610 for (i = 0; i < num_in_eps; i++) {
611 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i];
612 dwc_otg_request_nuke(ep);
614 /* prevent new request submissions, kill any outstanding requests */
615 for (i = 0; i < num_out_eps; i++) {
616 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i];
617 dwc_otg_request_nuke(ep);
620 /* report disconnect; the driver is already quiesced */
621 if (pcd->fops->disconnect) {
622 DWC_SPINUNLOCK(pcd->lock);
623 pcd->fops->disconnect(pcd);
624 DWC_SPINLOCK(pcd->lock);
626 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
630 * This interrupt indicates that ...
632 int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t * pcd)
634 gintmsk_data_t intr_mask = {.d32 = 0 };
635 gintsts_data_t gintsts;
637 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "i2cintr");
638 intr_mask.b.i2cintr = 1;
639 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
642 /* Clear interrupt */
644 gintsts.b.i2cintr = 1;
645 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
651 * This interrupt indicates that ...
653 int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t * pcd)
655 gintsts_data_t gintsts;
657 DWC_DEBUGPL(DBG_CIL,"Early Suspend Detected\n");
659 /* Clear interrupt */
661 gintsts.b.erlysuspend = 1;
662 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
668 * This function configures EPO to receive SETUP packets.
670 * @todo NGS: Update the comments from the HW FS.
672 * -# Program the following fields in the endpoint specific registers
673 * for Control OUT EP 0, in order to receive a setup packet
674 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
676 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
677 * to back setup packets)
678 * - In DMA mode, DOEPDMA0 Register with a memory address to
679 * store any setup packets received
681 * @param core_if Programming view of DWC_otg controller.
682 * @param pcd Programming view of the PCD.
684 static inline void ep0_out_start(dwc_otg_core_if_t * core_if,
687 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
688 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
689 dwc_otg_dma_desc_t *dma_desc;
690 depctl_data_t doepctl = {.d32 = 0 };
693 DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
694 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
697 doeptsize0.b.supcnt = 3;
698 doeptsize0.b.pktcnt = 1;
699 doeptsize0.b.xfersize = 8 * 3;
701 if (core_if->dma_enable) {
702 if (!core_if->dma_desc_enable) {
703 /** put here as for Hermes mode deptisz register should not be written */
704 dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
707 /** @todo dma needs to handle multiple setup packets (up to 3) */
708 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
709 pcd->setup_pkt_dma_handle);
711 dev_if->setup_desc_index =
712 (dev_if->setup_desc_index + 1) & 1;
714 dev_if->setup_desc_addr[dev_if->setup_desc_index];
716 /** DMA Descriptor Setup */
717 dma_desc->status.b.bs = BS_HOST_BUSY;
718 dma_desc->status.b.l = 1;
719 dma_desc->status.b.ioc = 1;
720 dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket;
721 dma_desc->buf = pcd->setup_pkt_dma_handle;
722 dma_desc->status.b.bs = BS_HOST_READY;
724 /** DOEPDMA0 Register write */
725 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
726 dev_if->dma_setup_desc_addr[dev_if->
731 /** put here as for Hermes mode deptisz register should not be written */
732 dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
736 /** DOEPCTL0 Register write */
739 //doepctl.b.snak = 1;
740 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
743 DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
744 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
745 DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
746 dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
751 * This interrupt occurs when a USB Reset is detected. When the USB
752 * Reset Interrupt occurs the device state is set to DEFAULT and the
753 * EP0 state is set to IDLE.
754 * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
755 * -# Unmask the following interrupt bits
756 * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
757 * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
758 * - DOEPMSK.SETUP = 1
759 * - DOEPMSK.XferCompl = 1
760 * - DIEPMSK.XferCompl = 1
761 * - DIEPMSK.TimeOut = 1
762 * -# Program the following fields in the endpoint specific registers
763 * for Control OUT EP 0, in order to receive a setup packet
764 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
766 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
767 * to back setup packets)
768 * - In DMA mode, DOEPDMA0 Register with a memory address to
769 * store any setup packets received
770 * At this point, all the required initialization, except for enabling
771 * the control 0 OUT endpoint is done, for receiving SETUP packets.
773 int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd)
775 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
776 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
777 depctl_data_t doepctl = {.d32 = 0 };
778 depctl_data_t diepctl = {.d32 = 0 };
779 daint_data_t daintmsk = {.d32 = 0 };
780 doepmsk_data_t doepmsk = {.d32 = 0 };
781 diepmsk_data_t diepmsk = {.d32 = 0 };
782 dcfg_data_t dcfg = {.d32 = 0 };
783 grstctl_t resetctl = {.d32 = 0 };
784 dctl_data_t dctl = {.d32 = 0 };
786 gintsts_data_t gintsts;
787 pcgcctl_data_t power = {.d32 = 0 };
789 power.d32 = dwc_read_reg32(core_if->pcgcctl);
790 if (power.b.stoppclk) {
792 power.b.stoppclk = 1;
793 dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
796 dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
798 power.b.rstpdwnmodule = 1;
799 dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
802 core_if->lx_state = DWC_OTG_L0;
804 DWC_DEBUGPL(DBG_CIL,"USB RESET\n");
806 for (i = 1; i < 16; ++i) {
807 dwc_otg_pcd_ep_t *ep;
809 ep = get_in_ep(pcd, i);
811 dwc_ep = &ep->dwc_ep;
812 dwc_ep->next_frame = 0xffffffff;
815 #endif /* DWC_EN_ISOC */
817 /* reset the HNP settings */
818 dwc_otg_pcd_update_otg(pcd, 1);
820 /* Clear the Remote Wakeup Signalling */
821 dctl.b.rmtwkupsig = 1;
822 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
824 /* Set NAK for all OUT EPs */
826 for (i = 0; i <= dev_if->num_out_eps; i++) {
827 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
830 /* Set data pid0 for all eps except for ep0 */
831 doepctl.b.setd0pid = 1;
832 for (i = 1; i <= dev_if->num_out_eps; i++) {
833 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
836 diepctl.b.setd0pid = 1;
837 for (i = 1; i <= dev_if->num_in_eps; i++) {
838 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, diepctl.d32);
840 /* Flush the NP Tx FIFO */
841 dwc_otg_flush_tx_fifo(core_if, 0x10);
842 /* Flush the NP Rx FIFO */
843 dwc_otg_flush_rx_fifo(core_if);
844 /* Flush the Learning Queue */
845 resetctl.b.intknqflsh = 1;
846 dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
848 if (core_if->multiproc_int_enable) {
849 daintmsk.b.inep0 = 1;
850 daintmsk.b.outep0 = 1;
851 dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk,
855 doepmsk.b.xfercompl = 1;
856 doepmsk.b.ahberr = 1;
857 doepmsk.b.epdisabled = 1;
859 if (core_if->dma_desc_enable) {
860 doepmsk.b.stsphsercvd = 1;
864 doepmsk.b.babble = 1;
867 if(core_if->dma_enable) {
871 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0],
874 diepmsk.b.xfercompl = 1;
875 diepmsk.b.timeout = 1;
876 diepmsk.b.epdisabled = 1;
877 diepmsk.b.ahberr = 1;
878 diepmsk.b.intknepmis = 1;
880 if (core_if->dma_desc_enable) {
884 if(core_if->dma_enable) {
888 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0],
891 daintmsk.b.inep0 = 1;
892 daintmsk.b.outep0 = 1;
893 dwc_write_reg32(&dev_if->dev_global_regs->daintmsk,
897 doepmsk.b.xfercompl = 1;
898 doepmsk.b.ahberr = 1;
899 doepmsk.b.epdisabled = 1;
901 if (core_if->dma_desc_enable) {
902 doepmsk.b.stsphsercvd = 1;
905 dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
907 diepmsk.b.xfercompl = 1;
908 diepmsk.b.timeout = 1;
909 diepmsk.b.epdisabled = 1;
910 diepmsk.b.ahberr = 1;
911 diepmsk.b.intknepmis = 1;
913 if (core_if->dma_desc_enable) {
917 dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
920 /* Reset Device Address */
921 dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
923 dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
925 /* setup EP0 to receive SETUP packets */
926 ep0_out_start(core_if, pcd);
928 /* Clear interrupt */
930 gintsts.b.usbreset = 1;
931 dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
937 * Get the device speed from the device status register and convert it
938 * to USB speed constant.
940 * @param core_if Programming view of DWC_otg controller.
942 static int get_device_speed(dwc_otg_core_if_t * core_if)
946 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
948 switch (dsts.b.enumspd) {
949 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
950 speed = USB_SPEED_HIGH;
952 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
953 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
954 speed = USB_SPEED_FULL;
957 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
958 speed = USB_SPEED_LOW;
966 * Read the device status register and set the device speed in the
968 * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
970 int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t * pcd)
972 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
973 gintsts_data_t gintsts;
974 gusbcfg_data_t gusbcfg;
975 dwc_otg_core_global_regs_t *global_regs =
976 GET_CORE_IF(pcd)->core_global_regs;
977 uint8_t utmi16b, utmi8b;
979 DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
981 if (GET_CORE_IF(pcd)->snpsid >= 0x4f54260a) {
988 dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep);
991 print_ep0_state(pcd);
995 pcd->ep0state = EP0_IDLE;
999 speed = get_device_speed(GET_CORE_IF(pcd));
1000 pcd->fops->connect(pcd, speed);
1002 /* Set USB turnaround time based on device speed and PHY interface. */
1003 gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
1004 if (speed == USB_SPEED_HIGH) {
1005 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1006 DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
1007 /* ULPI interface */
1008 gusbcfg.b.usbtrdtim = 9;
1010 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1011 DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
1012 /* UTMI+ interface */
1013 if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
1014 gusbcfg.b.usbtrdtim = utmi8b;
1015 } else if (GET_CORE_IF(pcd)->hwcfg4.b.
1016 utmi_phy_data_width == 1) {
1017 gusbcfg.b.usbtrdtim = utmi16b;
1018 } else if (GET_CORE_IF(pcd)->core_params->
1019 phy_utmi_width == 8) {
1020 gusbcfg.b.usbtrdtim = utmi8b;
1022 gusbcfg.b.usbtrdtim = utmi16b;
1025 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1026 DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
1027 /* UTMI+ OR ULPI interface */
1028 if (gusbcfg.b.ulpi_utmi_sel == 1) {
1029 /* ULPI interface */
1030 gusbcfg.b.usbtrdtim = 9;
1032 /* UTMI+ interface */
1033 if (GET_CORE_IF(pcd)->core_params->
1034 phy_utmi_width == 16) {
1035 gusbcfg.b.usbtrdtim = utmi16b;
1037 gusbcfg.b.usbtrdtim = utmi8b;
1042 /* Full or low speed */
1043 gusbcfg.b.usbtrdtim = 9;
1045 dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32);
1047 /* Clear interrupt */
1049 gintsts.b.enumdone = 1;
1050 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1056 * This interrupt indicates that the ISO OUT Packet was dropped due to
1057 * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
1058 * read all the data from the Rx FIFO.
1060 int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t * pcd)
1062 gintmsk_data_t intr_mask = {.d32 = 0 };
1063 gintsts_data_t gintsts;
1065 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
1066 "ISOC Out Dropped");
1068 intr_mask.b.isooutdrop = 1;
1069 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1072 /* Clear interrupt */
1074 gintsts.b.isooutdrop = 1;
1075 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1082 * This interrupt indicates the end of the portion of the micro-frame
1083 * for periodic transactions. If there is a periodic transaction for
1084 * the next frame, load the packets into the EP periodic Tx FIFO.
1086 int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t * pcd)
1088 gintmsk_data_t intr_mask = {.d32 = 0 };
1089 gintsts_data_t gintsts;
1090 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "EOP");
1092 intr_mask.b.eopframe = 1;
1093 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1096 /* Clear interrupt */
1098 gintsts.b.eopframe = 1;
1099 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1106 * This interrupt indicates that EP of the packet on the top of the
1107 * non-periodic Tx FIFO does not match EP of the IN Token received.
1109 * The "Device IN Token Queue" Registers are read to determine the
1110 * order the IN Tokens have been received. The non-periodic Tx FIFO
1111 * is flushed, so it can be reloaded in the order seen in the IN Token
1114 int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t * core_if)
1116 gintsts_data_t gintsts;
1117 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1119 /* Clear interrupt */
1121 gintsts.b.epmismatch = 1;
1122 dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
1128 * This funcion stalls EP0.
1130 static inline void ep0_do_stall(dwc_otg_pcd_t * pcd, const int err_val)
1132 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1133 usb_device_request_t *ctrl = &pcd->setup_pkt->req;
1134 DWC_WARN("req %02x.%02x protocol STALL; err %d\n",
1135 ctrl->bmRequestType, ctrl->bRequest, err_val);
1137 ep0->dwc_ep.is_in = 1;
1138 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->dwc_ep);
1139 pcd->ep0.stopped = 1;
1140 pcd->ep0state = EP0_IDLE;
1141 ep0_out_start(GET_CORE_IF(pcd), pcd);
1145 * This functions delegates the setup command to the gadget driver.
1147 static inline void do_gadget_setup(dwc_otg_pcd_t * pcd,
1148 usb_device_request_t * ctrl)
1151 DWC_SPINUNLOCK(pcd->lock);
1152 ret = pcd->fops->setup(pcd, (uint8_t *) ctrl);
1153 DWC_SPINLOCK(pcd->lock);
1155 ep0_do_stall(pcd, ret);
1158 /** @todo This is a g_file_storage gadget driver specific
1159 * workaround: a DELAYED_STATUS result from the fsg_setup
1160 * routine will result in the gadget queueing a EP0 IN status
1161 * phase for a two-stage control transfer. Exactly the same as
1162 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
1163 * specific request. Need a generic way to know when the gadget
1164 * driver will queue the status phase. Can we assume when we
1165 * call the gadget driver setup() function that it will always
1166 * queue and require the following flag? Need to look into
1170 if (ret == 256 + 999) {
1171 pcd->request_config = 1;
1177 * This functions delegates the CFI setup commands to the gadget driver.
1178 * This function will return a negative value to indicate a failure.
1180 static inline int cfi_gadget_setup(dwc_otg_pcd_t * pcd,
1181 struct cfi_usb_ctrlrequest *ctrl_req)
1185 if (pcd->fops && pcd->fops->cfi_setup) {
1186 DWC_SPINUNLOCK(pcd->lock);
1187 ret = pcd->fops->cfi_setup(pcd, ctrl_req);
1188 DWC_SPINLOCK(pcd->lock);
1190 ep0_do_stall(pcd, ret);
1200 * This function starts the Zero-Length Packet for the IN status phase
1201 * of a 2 stage control transfer.
1203 static inline void do_setup_in_status_phase(dwc_otg_pcd_t * pcd)
1205 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1206 if (pcd->ep0state == EP0_STALL) {
1210 pcd->ep0state = EP0_IN_STATUS_PHASE;
1212 /* Prepare for more SETUP Packets */
1213 DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
1214 ep0->dwc_ep.xfer_len = 0;
1215 ep0->dwc_ep.xfer_count = 0;
1216 ep0->dwc_ep.is_in = 1;
1217 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1218 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1220 /* Prepare for more SETUP Packets */
1221 //ep0_out_start(GET_CORE_IF(pcd), pcd);
1225 * This function starts the Zero-Length Packet for the OUT status phase
1226 * of a 2 stage control transfer.
1228 static inline void do_setup_out_status_phase(dwc_otg_pcd_t * pcd)
1230 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1231 if (pcd->ep0state == EP0_STALL) {
1232 DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
1235 pcd->ep0state = EP0_OUT_STATUS_PHASE;
1237 DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
1238 ep0->dwc_ep.xfer_len = 0;
1239 ep0->dwc_ep.xfer_count = 0;
1240 ep0->dwc_ep.is_in = 0;
1241 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1242 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1244 /* Prepare for more SETUP Packets */
1245 if (GET_CORE_IF(pcd)->dma_enable == 0) {
1246 ep0_out_start(GET_CORE_IF(pcd), pcd);
1251 * Clear the EP halt (STALL) and if pending requests start the
1254 static inline void pcd_clear_halt(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep)
1256 if (ep->dwc_ep.stall_clear_flag == 0)
1257 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1259 /* Reactive the EP */
1260 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1263 /* If there is a request in the EP queue start it */
1265 /** @todo FIXME: this causes an EP mismatch in DMA mode.
1266 * epmismatch not yet implemented. */
1269 * Above fixme is solved by implmenting a tasklet to call the
1270 * start_next_request(), outside of interrupt context at some
1271 * time after the current time, after a clear-halt setup packet.
1272 * Still need to implement ep mismatch in the future if a gadget
1273 * ever uses more than one endpoint at once
1276 DWC_TASK_SCHEDULE(pcd->start_xfer_tasklet);
1278 /* Start Control Status Phase */
1279 do_setup_in_status_phase(pcd);
1283 * This function is called when the SET_FEATURE TEST_MODE Setup packet
1284 * is sent from the host. The Device Control register is written with
1285 * the Test Mode bits set to the specified Test Mode. This is done as
1286 * a tasklet so that the "Status" phase of the control transfer
1287 * completes before transmitting the TEST packets.
1289 * @todo This has not been tested since the tasklet struct was put
1290 * into the PCD struct!
1293 void do_test_mode(void *data)
1296 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1297 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1298 int test_mode = pcd->test_mode;
1300 // DWC_WARN("%s() has not been tested since being rewritten!\n", __func__);
1302 dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
1303 switch (test_mode) {
1312 case 3: // TEST_SE0_NAK
1316 case 4: // TEST_PACKET
1320 case 5: // TEST_FORCE_ENABLE
1324 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
1328 * This function process the GET_STATUS Setup Commands.
1330 static inline void do_get_status(dwc_otg_pcd_t * pcd)
1332 usb_device_request_t ctrl = pcd->setup_pkt->req;
1333 dwc_otg_pcd_ep_t *ep;
1334 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1335 uint16_t *status = pcd->status_buf;
1338 DWC_DEBUGPL(DBG_PCD,
1339 "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
1340 ctrl.bmRequestType, ctrl.bRequest,
1341 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1342 UGETW(ctrl.wLength));
1345 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1347 *status = 0x1; /* Self powered */
1348 *status |= pcd->remote_wakeup_enable << 1;
1356 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1357 if (ep == 0 || UGETW(ctrl.wLength) > 2) {
1358 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1361 /** @todo check for EP stall */
1362 *status = ep->stopped;
1365 pcd->ep0_pending = 1;
1366 ep0->dwc_ep.start_xfer_buff = (uint8_t *) status;
1367 ep0->dwc_ep.xfer_buff = (uint8_t *) status;
1368 ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle;
1369 ep0->dwc_ep.xfer_len = 2;
1370 ep0->dwc_ep.xfer_count = 0;
1371 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1372 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1376 * This function process the SET_FEATURE Setup Commands.
1378 static inline void do_set_feature(dwc_otg_pcd_t * pcd)
1380 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1381 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1382 usb_device_request_t ctrl = pcd->setup_pkt->req;
1383 dwc_otg_pcd_ep_t *ep = 0;
1384 int32_t otg_cap_param = core_if->core_params->otg_cap;
1385 gotgctl_data_t gotgctl = {.d32 = 0 };
1387 DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1388 ctrl.bmRequestType, ctrl.bRequest,
1389 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1390 UGETW(ctrl.wLength));
1391 DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
1393 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1395 switch (UGETW(ctrl.wValue)) {
1396 case UF_DEVICE_REMOTE_WAKEUP:
1397 pcd->remote_wakeup_enable = 1;
1401 /* Setup the Test Mode tasklet to do the Test
1402 * Packet generation after the SETUP Status
1403 * phase has completed. */
1405 /** @todo This has not been tested since the
1406 * tasklet struct was put into the PCD
1408 pcd->test_mode = UGETW(ctrl.wIndex) >> 8;
1409 DWC_TASK_SCHEDULE(pcd->test_mode_tasklet);
1412 case UF_DEVICE_B_HNP_ENABLE:
1413 DWC_DEBUGPL(DBG_PCDV,
1414 "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
1416 /* dev may initiate HNP */
1417 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1418 pcd->b_hnp_enable = 1;
1419 dwc_otg_pcd_update_otg(pcd, 0);
1420 DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
1421 /**@todo Is the gotgctl.devhnpen cleared
1422 * by a USB Reset? */
1423 gotgctl.b.devhnpen = 1;
1424 gotgctl.b.hnpreq = 1;
1425 dwc_write_reg32(&global_regs->gotgctl,
1428 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1432 case UF_DEVICE_A_HNP_SUPPORT:
1433 /* RH port supports HNP */
1434 DWC_DEBUGPL(DBG_PCDV,
1435 "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
1436 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1437 pcd->a_hnp_support = 1;
1438 dwc_otg_pcd_update_otg(pcd, 0);
1440 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1444 case UF_DEVICE_A_ALT_HNP_SUPPORT:
1445 /* other RH port does */
1447 "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
1448 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1449 pcd->a_alt_hnp_support = 1;
1450 dwc_otg_pcd_update_otg(pcd, 0);
1452 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1456 do_setup_in_status_phase(pcd);
1460 do_gadget_setup(pcd, &ctrl);
1464 if (UGETW(ctrl.wValue) == UF_ENDPOINT_HALT) {
1465 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1467 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1471 dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
1473 do_setup_in_status_phase(pcd);
1479 * This function process the CLEAR_FEATURE Setup Commands.
1481 static inline void do_clear_feature(dwc_otg_pcd_t * pcd)
1483 usb_device_request_t ctrl = pcd->setup_pkt->req;
1484 dwc_otg_pcd_ep_t *ep = 0;
1486 DWC_DEBUGPL(DBG_PCD,
1487 "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1488 ctrl.bmRequestType, ctrl.bRequest,
1489 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1490 UGETW(ctrl.wLength));
1492 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1494 switch (UGETW(ctrl.wValue)) {
1495 case UF_DEVICE_REMOTE_WAKEUP:
1496 pcd->remote_wakeup_enable = 0;
1500 /** @todo Add CLEAR_FEATURE for TEST modes. */
1503 do_setup_in_status_phase(pcd);
1507 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1509 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1513 pcd_clear_halt(pcd, ep);
1520 * This function process the SET_ADDRESS Setup Commands.
1522 static inline void do_set_address(dwc_otg_pcd_t * pcd)
1524 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1525 usb_device_request_t ctrl = pcd->setup_pkt->req;
1527 if (ctrl.bmRequestType == UT_DEVICE) {
1528 dcfg_data_t dcfg = {.d32 = 0 };
1531 // DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue);
1533 dcfg.b.devaddr = UGETW(ctrl.wValue);
1534 dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
1535 do_setup_in_status_phase(pcd);
1540 * This function processes SETUP commands. In Linux, the USB Command
1541 * processing is done in two places - the first being the PCD and the
1542 * second in the Gadget Driver (for example, the File-Backed Storage
1546 * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
1548 * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
1549 * defined in chapter 9 of the USB 2.0 Specification chapter 9
1552 * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1553 * requests are the ENDPOINT_HALT feature is procesed, all others the
1554 * interface requests are ignored.</td></tr>
1556 * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1557 * requests are processed by the PCD. Interface requests are passed
1558 * to the Gadget Driver.</td></tr>
1560 * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
1561 * with device address received </td></tr>
1563 * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
1564 * requested descriptor</td></tr>
1566 * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
1567 * not implemented by any of the existing Gadget Drivers.</td></tr>
1569 * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
1570 * all EPs and enable EPs for new configuration.</td></tr>
1572 * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
1573 * the current configuration</td></tr>
1575 * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
1576 * EPs and enable EPs for new configuration.</td></tr>
1578 * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
1579 * current interface.</td></tr>
1581 * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
1582 * message.</td></tr>
1585 * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
1586 * processed by pcd_setup. Calling the Function Driver's setup function from
1587 * pcd_setup processes the gadget SETUP commands.
1589 static inline void pcd_setup(dwc_otg_pcd_t * pcd)
1591 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1592 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1593 usb_device_request_t ctrl = pcd->setup_pkt->req;
1594 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1596 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
1600 struct cfi_usb_ctrlrequest cfi_req;
1604 DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1605 ctrl.bmRequestType, ctrl.bRequest,
1606 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1607 UGETW(ctrl.wLength));
1610 doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz);
1612 /** @todo handle > 1 setup packet , assert error for now */
1614 if (core_if->dma_enable && core_if->dma_desc_enable == 0
1615 && (doeptsize0.b.supcnt < 2)) {
1617 ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n");
1620 /* Clean up the request queue */
1621 dwc_otg_request_nuke(ep0);
1624 if (ctrl.bmRequestType & UE_DIR_IN) {
1625 ep0->dwc_ep.is_in = 1;
1626 pcd->ep0state = EP0_IN_DATA_PHASE;
1628 ep0->dwc_ep.is_in = 0;
1629 pcd->ep0state = EP0_OUT_DATA_PHASE;
1632 if (UGETW(ctrl.wLength) == 0) {
1633 ep0->dwc_ep.is_in = 1;
1634 pcd->ep0state = EP0_IN_STATUS_PHASE;
1637 if (UT_GET_TYPE(ctrl.bmRequestType) != UT_STANDARD) {
1640 DWC_MEMCPY(&cfi_req, &ctrl, sizeof(usb_device_request_t));
1642 //printk(KERN_ALERT "CFI: req_type=0x%02x; req=0x%02x\n", ctrl.bRequestType, ctrl.bRequest);
1643 if (UT_GET_TYPE(cfi_req.bRequestType) == UT_VENDOR) {
1644 if (cfi_req.bRequest > 0xB0 && cfi_req.bRequest < 0xBF) {
1645 retval = cfi_setup(pcd, &cfi_req);
1647 ep0_do_stall(pcd, retval);
1648 pcd->ep0_pending = 0;
1652 /* if need gadget setup then call it and check the retval */
1653 if (pcd->cfi->need_gadget_att) {
1655 cfi_gadget_setup(pcd,
1659 pcd->ep0_pending = 0;
1664 if (pcd->cfi->need_status_in_complete) {
1665 do_setup_in_status_phase(pcd);
1672 /* handle non-standard (class/vendor) requests in the gadget driver */
1673 do_gadget_setup(pcd, &ctrl);
1677 /** @todo NGS: Handle bad setup packet? */
1679 ///////////////////////////////////////////
1680 //// --- Standard Request handling --- ////
1682 switch (ctrl.bRequest) {
1687 case UR_CLEAR_FEATURE:
1688 do_clear_feature(pcd);
1691 case UR_SET_FEATURE:
1692 do_set_feature(pcd);
1695 case UR_SET_ADDRESS:
1696 do_set_address(pcd);
1699 case UR_SET_INTERFACE:
1701 // _pcd->request_config = 1; /* Configuration changed */
1702 do_gadget_setup(pcd, &ctrl);
1705 case UR_SYNCH_FRAME:
1706 do_gadget_setup(pcd, &ctrl);
1710 /* Call the Gadget Driver's setup functions */
1711 do_gadget_setup(pcd, &ctrl);
1717 * This function completes the ep0 control transfer.
1719 static int32_t ep0_complete_request(dwc_otg_pcd_ep_t * ep)
1721 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1722 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1723 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1724 dev_if->in_ep_regs[ep->dwc_ep.num];
1726 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
1727 dev_if->out_ep_regs[ep->dwc_ep.num];
1729 deptsiz0_data_t deptsiz;
1730 desc_sts_data_t desc_sts;
1731 dwc_otg_pcd_request_t *req;
1733 dwc_otg_pcd_t *pcd = ep->pcd;
1736 struct cfi_usb_ctrlrequest *ctrlreq;
1737 int retval = -DWC_E_NOT_SUPPORTED;
1740 if (pcd->ep0_pending && DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1741 if (ep->dwc_ep.is_in) {
1743 DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
1745 do_setup_out_status_phase(pcd);
1748 DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
1752 ctrlreq = &pcd->cfi->ctrl_req;
1754 if (UT_GET_TYPE(ctrlreq->bRequestType) == UT_VENDOR) {
1755 if (ctrlreq->bRequest > 0xB0
1756 && ctrlreq->bRequest < 0xBF) {
1758 /* Return if the PCD failed to handle the request */
1761 ctrl_write_complete(pcd->cfi,
1764 ("ERROR setting a new value in the PCD(%d)\n",
1766 ep0_do_stall(pcd, retval);
1767 pcd->ep0_pending = 0;
1771 /* If the gadget needs to be notified on the request */
1772 if (pcd->cfi->need_gadget_att == 1) {
1773 //retval = do_gadget_setup(pcd, &pcd->cfi->ctrl_req);
1775 cfi_gadget_setup(pcd,
1779 /* Return from the function if the gadget failed to process
1780 * the request properly - this should never happen !!!
1784 ("ERROR setting a new value in the gadget(%d)\n",
1786 pcd->ep0_pending = 0;
1791 CFI_INFO("%s: RETVAL=%d\n", __func__,
1793 /* If we hit here then the PCD and the gadget has properly
1794 * handled the request - so send the ZLP IN to the host.
1796 /* @todo: MAS - decide whether we need to start the setup
1797 * stage based on the need_setup value of the cfi object
1799 do_setup_in_status_phase(pcd);
1800 pcd->ep0_pending = 0;
1806 do_setup_in_status_phase(pcd);
1808 pcd->ep0_pending = 0;
1812 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1815 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1817 if (pcd->ep0state == EP0_OUT_STATUS_PHASE
1818 || pcd->ep0state == EP0_IN_STATUS_PHASE) {
1820 } else if (ep->dwc_ep.is_in) {
1821 deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
1822 if (core_if->dma_desc_enable != 0)
1823 desc_sts = dev_if->in_desc_addr->status;
1825 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xfersize=%d pktcnt=%d\n",
1826 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
1827 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1830 if (((core_if->dma_desc_enable == 0)
1831 && (deptsiz.b.xfersize == 0))
1832 || ((core_if->dma_desc_enable != 0)
1833 && (desc_sts.b.bytes == 0))) {
1834 req->actual = ep->dwc_ep.xfer_count;
1835 /* Is a Zero Len Packet needed? */
1836 if (req->sent_zlp) {
1838 DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
1842 do_setup_out_status_phase(pcd);
1847 deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz);
1848 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xsize=%d pktcnt=%d\n",
1849 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
1850 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1852 req->actual = ep->dwc_ep.xfer_count;
1854 /* Is a Zero Len Packet needed? */
1855 if (req->sent_zlp) {
1857 DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
1861 if (core_if->dma_desc_enable == 0)
1862 do_setup_in_status_phase(pcd);
1865 /* Complete the request */
1867 dwc_otg_request_done(ep, req, 0);
1868 ep->dwc_ep.start_xfer_buff = 0;
1869 ep->dwc_ep.xfer_buff = 0;
1870 ep->dwc_ep.xfer_len = 0;
1878 * This function calculates traverses all the CFI DMA descriptors and
1879 * and accumulates the bytes that are left to be transfered.
1881 * @return The total bytes left to transfered, or a negative value as failure
1883 static inline int cfi_calc_desc_residue(dwc_otg_pcd_ep_t * ep)
1887 struct dwc_otg_dma_desc *ddesc = NULL;
1888 struct cfi_ep *cfiep;
1890 /* See if the pcd_ep has its respective cfi_ep mapped */
1891 cfiep = get_cfi_ep_by_pcd_ep(ep->pcd->cfi, ep);
1893 CFI_INFO("%s: Failed to find ep\n", __func__);
1897 ddesc = ep->dwc_ep.descs;
1899 for (i = 0; (i < cfiep->desc_count) && (i < MAX_DMA_DESCS_PER_EP); i++) {
1901 #if defined(PRINT_CFI_DMA_DESCS)
1902 print_desc(ddesc, ep->ep.name, i);
1904 ret += ddesc->status.b.bytes;
1909 CFI_INFO("!!!!!!!!!! WARNING (%s) - residue=%d\n", __func__,
1917 * This function completes the request for the EP. If there are
1918 * additional requests for the EP in the queue they will be started.
1920 static void complete_ep(dwc_otg_pcd_ep_t * ep)
1922 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1923 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1924 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1925 dev_if->in_ep_regs[ep->dwc_ep.num];
1926 deptsiz_data_t deptsiz;
1927 desc_sts_data_t desc_sts;
1928 dwc_otg_pcd_request_t *req = 0;
1929 dwc_otg_dma_desc_t *dma_desc;
1930 uint32_t byte_count = 0;
1934 DWC_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->dwc_ep.num,
1935 (ep->dwc_ep.is_in ? "IN" : "OUT"));
1937 /* Get any pending requests */
1938 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1939 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1941 DWC_DEBUGPL(DBG_CIL,"complete_ep 0x%p, req = NULL!\n", ep);
1945 DWC_DEBUGPL(DBG_CIL,"complete_ep 0x%p, ep->queue empty!\n", ep);
1949 DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
1951 if (ep->dwc_ep.is_in) {
1952 deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
1954 if (core_if->dma_enable) {
1955 if (core_if->dma_desc_enable == 0) {
1956 if (deptsiz.b.xfersize == 0
1957 && deptsiz.b.pktcnt == 0) {
1959 ep->dwc_ep.xfer_len -
1960 ep->dwc_ep.xfer_count;
1962 ep->dwc_ep.xfer_buff += byte_count;
1963 ep->dwc_ep.dma_addr += byte_count;
1964 ep->dwc_ep.xfer_count += byte_count;
1966 DWC_DEBUGPL(DBG_PCDV,
1967 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
1970 is_in ? "IN" : "OUT"),
1971 ep->dwc_ep.xfer_len,
1975 if (ep->dwc_ep.xfer_len <
1976 ep->dwc_ep.total_len) {
1977 dwc_otg_ep_start_transfer
1978 (core_if, &ep->dwc_ep);
1979 } else if (ep->dwc_ep.sent_zlp) {
1981 * This fragment of code should initiate 0
1982 * length trasfer in case if it is queued
1983 * a trasfer with size divisible to EPs max
1984 * packet size and with usb_request zero field
1985 * is set, which means that after data is transfered,
1986 * it is also should be transfered
1987 * a 0 length packet at the end. For Slave and
1988 * Buffer DMA modes in this case SW has
1989 * to initiate 2 transfers one with transfer size,
1990 * and the second with 0 size. For Desriptor
1991 * DMA mode SW is able to initiate a transfer,
1992 * which will handle all the packets including
1995 ep->dwc_ep.sent_zlp = 0;
1996 dwc_otg_ep_start_zl_transfer
1997 (core_if, &ep->dwc_ep);
2003 ("Incomplete transfer (%d - %s [siz=%d pkt=%d])\n",
2005 (ep->dwc_ep.is_in ? "IN" : "OUT"),
2010 dma_desc = ep->dwc_ep.desc_addr;
2012 ep->dwc_ep.sent_zlp = 0;
2015 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2016 ep->dwc_ep.buff_mode);
2017 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2020 residue = cfi_calc_desc_residue(ep);
2024 byte_count = residue;
2027 for (i = 0; i < ep->dwc_ep.desc_cnt;
2029 desc_sts = dma_desc->status;
2030 byte_count += desc_sts.b.bytes;
2036 if (byte_count == 0) {
2037 ep->dwc_ep.xfer_count =
2038 ep->dwc_ep.total_len;
2041 DWC_WARN("Incomplete transfer\n");
2045 if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
2046 DWC_DEBUGPL(DBG_PCDV,
2047 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
2049 ep->dwc_ep.is_in ? "IN" : "OUT",
2050 ep->dwc_ep.xfer_len,
2054 /* Check if the whole transfer was completed,
2055 * if no, setup transfer for next portion of data
2057 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2058 dwc_otg_ep_start_transfer(core_if,
2060 } else if (ep->dwc_ep.sent_zlp) {
2062 * This fragment of code should initiate 0
2063 * length trasfer in case if it is queued
2064 * a trasfer with size divisible to EPs max
2065 * packet size and with usb_request zero field
2066 * is set, which means that after data is transfered,
2067 * it is also should be transfered
2068 * a 0 length packet at the end. For Slave and
2069 * Buffer DMA modes in this case SW has
2070 * to initiate 2 transfers one with transfer size,
2071 * and the second with 0 size. For Desriptor
2072 * DMA mode SW is able to initiate a transfer,
2073 * which will handle all the packets including
2076 ep->dwc_ep.sent_zlp = 0;
2077 dwc_otg_ep_start_zl_transfer(core_if,
2085 ("Incomplete transfer (%d-%s [siz=%d pkt=%d])\n",
2087 (ep->dwc_ep.is_in ? "IN" : "OUT"),
2088 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2092 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
2093 dev_if->out_ep_regs[ep->dwc_ep.num];
2095 if (core_if->dma_enable) {
2096 if (core_if->dma_desc_enable) {
2097 dma_desc = ep->dwc_ep.desc_addr;
2099 ep->dwc_ep.sent_zlp = 0;
2102 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2103 ep->dwc_ep.buff_mode);
2104 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2106 residue = cfi_calc_desc_residue(ep);
2109 byte_count = residue;
2113 for (i = 0; i < ep->dwc_ep.desc_cnt;
2115 desc_sts = dma_desc->status;
2116 byte_count += desc_sts.b.bytes;
2123 ep->dwc_ep.xfer_count = ep->dwc_ep.total_len
2125 ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3);
2130 dwc_read_reg32(&out_ep_regs->doeptsiz);
2132 byte_count = (ep->dwc_ep.xfer_len -
2133 ep->dwc_ep.xfer_count -
2134 deptsiz.b.xfersize);
2135 ep->dwc_ep.xfer_buff += byte_count;
2136 ep->dwc_ep.dma_addr += byte_count;
2137 ep->dwc_ep.xfer_count += byte_count;
2139 /* Check if the whole transfer was completed,
2140 * if no, setup transfer for next portion of data
2142 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2143 dwc_otg_ep_start_transfer(core_if,
2145 } else if (ep->dwc_ep.sent_zlp) {
2147 * This fragment of code should initiate 0
2148 * length trasfer in case if it is queued
2149 * a trasfer with size divisible to EPs max
2150 * packet size and with usb_request zero field
2151 * is set, which means that after data is transfered,
2152 * it is also should be transfered
2153 * a 0 length packet at the end. For Slave and
2154 * Buffer DMA modes in this case SW has
2155 * to initiate 2 transfers one with transfer size,
2156 * and the second with 0 size. For Desriptor
2157 * DMA mode SW is able to initiate a transfer,
2158 * which will handle all the packets including
2161 ep->dwc_ep.sent_zlp = 0;
2162 dwc_otg_ep_start_zl_transfer(core_if,
2170 /* Check if the whole transfer was completed,
2171 * if no, setup transfer for next portion of data
2173 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2174 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
2175 } else if (ep->dwc_ep.sent_zlp) {
2177 * This fragment of code should initiate 0
2178 * length trasfer in case if it is queued
2179 * a trasfer with size divisible to EPs max
2180 * packet size and with usb_request zero field
2181 * is set, which means that after data is transfered,
2182 * it is also should be transfered
2183 * a 0 length packet at the end. For Slave and
2184 * Buffer DMA modes in this case SW has
2185 * to initiate 2 transfers one with transfer size,
2186 * and the second with 0 size. For Desriptor
2187 * DMA mode SW is able to initiate a transfer,
2188 * which will handle all the packets including
2191 ep->dwc_ep.sent_zlp = 0;
2192 dwc_otg_ep_start_zl_transfer(core_if,
2199 DWC_DEBUGPL(DBG_PCDV,
2200 "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
2201 &out_ep_regs->doeptsiz, ep->dwc_ep.num,
2202 ep->dwc_ep.is_in ? "IN" : "OUT",
2203 ep->dwc_ep.xfer_len, ep->dwc_ep.xfer_count,
2204 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2207 /* Complete the request */
2210 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2211 req->actual = ep->dwc_ep.cfi_req_len - byte_count;
2214 req->actual = ep->dwc_ep.xfer_count;
2219 dwc_otg_request_done(ep, req, 0);
2221 ep->dwc_ep.start_xfer_buff = 0;
2222 ep->dwc_ep.xfer_buff = 0;
2223 ep->dwc_ep.xfer_len = 0;
2225 /* If there is a request in the queue start it. */
2226 start_next_request(ep);
2233 * This function BNA interrupt for Isochronous EPs
2236 static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t * ep)
2238 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2239 volatile uint32_t *addr;
2240 depctl_data_t depctl = {.d32 = 0 };
2241 dwc_otg_pcd_t *pcd = ep->pcd;
2242 dwc_otg_dma_desc_t *dma_desc;
2246 dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num);
2248 if (dwc_ep->is_in) {
2249 desc_sts_data_t sts = {.d32 = 0 };
2250 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2251 sts.d32 = dma_desc->status.d32;
2252 sts.b_iso_in.bs = BS_HOST_READY;
2253 dma_desc->status.d32 = sts.d32;
2256 desc_sts_data_t sts = {.d32 = 0 };
2257 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2258 sts.d32 = dma_desc->status.d32;
2259 sts.b_iso_out.bs = BS_HOST_READY;
2260 dma_desc->status.d32 = sts.d32;
2264 if (dwc_ep->is_in == 0) {
2266 &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->
2270 &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2273 dwc_modify_reg32(addr, depctl.d32, depctl.d32);
2277 * This function sets latest iso packet information(non-PTI mode)
2279 * @param core_if Programming view of DWC_otg controller.
2280 * @param ep The EP to start the transfer on.
2283 void set_current_pkt_info(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
2285 deptsiz_data_t deptsiz = {.d32 = 0 };
2286 dma_addr_t dma_addr;
2289 if (ep->proc_buf_num)
2290 dma_addr = ep->dma_addr1;
2292 dma_addr = ep->dma_addr0;
2296 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->
2298 offset = ep->data_per_frame;
2301 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->
2304 ep->data_per_frame +
2305 (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
2308 if (!deptsiz.b.xfersize) {
2309 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2310 ep->pkt_info[ep->cur_pkt].offset =
2311 ep->cur_pkt_dma_addr - dma_addr;
2312 ep->pkt_info[ep->cur_pkt].status = 0;
2314 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2315 ep->pkt_info[ep->cur_pkt].offset =
2316 ep->cur_pkt_dma_addr - dma_addr;
2317 ep->pkt_info[ep->cur_pkt].status = -DWC_E_NO_DATA;
2319 ep->cur_pkt_addr += offset;
2320 ep->cur_pkt_dma_addr += offset;
2325 * This function sets latest iso packet information(DDMA mode)
2327 * @param core_if Programming view of DWC_otg controller.
2328 * @param dwc_ep The EP to start the transfer on.
2331 static void set_ddma_iso_pkts_info(dwc_otg_core_if_t * core_if,
2334 dwc_otg_dma_desc_t *dma_desc;
2335 desc_sts_data_t sts = {.d32 = 0 };
2336 iso_pkt_info_t *iso_packet;
2337 uint32_t data_per_desc;
2341 iso_packet = dwc_ep->pkt_info;
2343 /** Reinit closed DMA Descriptors*/
2345 if (dwc_ep->is_in == 0) {
2347 dwc_ep->iso_desc_addr +
2348 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2351 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2352 i += dwc_ep->pkt_per_frm) {
2353 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2355 ((j + 1) * dwc_ep->maxpacket >
2356 dwc_ep->data_per_frame) ? dwc_ep->
2358 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2360 (data_per_desc % 4) ? (4 -
2364 sts.d32 = dma_desc->status.d32;
2366 /* Write status in iso_packet_decsriptor */
2367 iso_packet->status =
2368 sts.b_iso_out.rxsts +
2369 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2370 if (iso_packet->status) {
2371 iso_packet->status = -DWC_E_NO_DATA;
2374 /* Received data length */
2375 if (!sts.b_iso_out.rxbytes) {
2376 iso_packet->length =
2378 sts.b_iso_out.rxbytes;
2380 iso_packet->length =
2382 sts.b_iso_out.rxbytes + (4 -
2388 iso_packet->offset = offset;
2390 offset += data_per_desc;
2396 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2398 ((j + 1) * dwc_ep->maxpacket >
2399 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2400 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2402 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2404 sts.d32 = dma_desc->status.d32;
2406 /* Write status in iso_packet_decsriptor */
2407 iso_packet->status =
2408 sts.b_iso_out.rxsts +
2409 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2410 if (iso_packet->status) {
2411 iso_packet->status = -DWC_E_NO_DATA;
2414 /* Received data length */
2415 iso_packet->length =
2416 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2418 iso_packet->offset = offset;
2420 offset += data_per_desc;
2425 sts.d32 = dma_desc->status.d32;
2427 /* Write status in iso_packet_decsriptor */
2428 iso_packet->status =
2429 sts.b_iso_out.rxsts + (sts.b_iso_out.bs ^ BS_DMA_DONE);
2430 if (iso_packet->status) {
2431 iso_packet->status = -DWC_E_NO_DATA;
2433 /* Received data length */
2434 if (!sts.b_iso_out.rxbytes) {
2435 iso_packet->length =
2436 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2438 iso_packet->length =
2439 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes +
2440 (4 - dwc_ep->data_per_frame % 4);
2443 iso_packet->offset = offset;
2448 dwc_ep->iso_desc_addr +
2449 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2451 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2452 sts.d32 = dma_desc->status.d32;
2454 /* Write status in iso packet descriptor */
2455 iso_packet->status =
2456 sts.b_iso_in.txsts +
2457 (sts.b_iso_in.bs ^ BS_DMA_DONE);
2458 if (iso_packet->status != 0) {
2459 iso_packet->status = -DWC_E_NO_DATA;
2462 /* Bytes has been transfered */
2463 iso_packet->length =
2464 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2470 sts.d32 = dma_desc->status.d32;
2471 while (sts.b_iso_in.bs == BS_DMA_BUSY) {
2472 sts.d32 = dma_desc->status.d32;
2475 /* Write status in iso packet descriptor ??? do be done with ERROR codes */
2476 iso_packet->status =
2477 sts.b_iso_in.txsts + (sts.b_iso_in.bs ^ BS_DMA_DONE);
2478 if (iso_packet->status != 0) {
2479 iso_packet->status = -DWC_E_NO_DATA;
2482 /* Bytes has been transfered */
2483 iso_packet->length =
2484 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2489 * This function reinitialize DMA Descriptors for Isochronous transfer
2491 * @param core_if Programming view of DWC_otg controller.
2492 * @param dwc_ep The EP to start the transfer on.
2495 static void reinit_ddma_iso_xfer(dwc_otg_core_if_t * core_if, dwc_ep_t * dwc_ep)
2498 dwc_otg_dma_desc_t *dma_desc;
2500 volatile uint32_t *addr;
2501 desc_sts_data_t sts = {.d32 = 0 };
2502 uint32_t data_per_desc;
2504 if (dwc_ep->is_in == 0) {
2505 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
2507 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2510 if (dwc_ep->proc_buf_num == 0) {
2511 /** Buffer 0 descriptors setup */
2512 dma_ad = dwc_ep->dma_addr0;
2514 /** Buffer 1 descriptors setup */
2515 dma_ad = dwc_ep->dma_addr1;
2518 /** Reinit closed DMA Descriptors*/
2520 if (dwc_ep->is_in == 0) {
2522 dwc_ep->iso_desc_addr +
2523 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2525 sts.b_iso_out.bs = BS_HOST_READY;
2526 sts.b_iso_out.rxsts = 0;
2527 sts.b_iso_out.l = 0;
2528 sts.b_iso_out.sp = 0;
2529 sts.b_iso_out.ioc = 0;
2530 sts.b_iso_out.pid = 0;
2531 sts.b_iso_out.framenum = 0;
2533 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2534 i += dwc_ep->pkt_per_frm) {
2535 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2537 ((j + 1) * dwc_ep->maxpacket >
2538 dwc_ep->data_per_frame) ? dwc_ep->
2540 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2542 (data_per_desc % 4) ? (4 -
2545 sts.b_iso_out.rxbytes = data_per_desc;
2546 dma_desc->buf = dma_ad;
2547 dma_desc->status.d32 = sts.d32;
2549 dma_ad += data_per_desc;
2554 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2557 ((j + 1) * dwc_ep->maxpacket >
2558 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2559 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2561 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2562 sts.b_iso_out.rxbytes = data_per_desc;
2564 dma_desc->buf = dma_ad;
2565 dma_desc->status.d32 = sts.d32;
2568 dma_ad += data_per_desc;
2571 sts.b_iso_out.ioc = 1;
2572 sts.b_iso_out.l = dwc_ep->proc_buf_num;
2575 ((j + 1) * dwc_ep->maxpacket >
2576 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2577 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2579 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2580 sts.b_iso_out.rxbytes = data_per_desc;
2582 dma_desc->buf = dma_ad;
2583 dma_desc->status.d32 = sts.d32;
2588 dwc_ep->iso_desc_addr +
2589 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2591 sts.b_iso_in.bs = BS_HOST_READY;
2592 sts.b_iso_in.txsts = 0;
2593 sts.b_iso_in.sp = 0;
2594 sts.b_iso_in.ioc = 0;
2595 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
2596 sts.b_iso_in.framenum = dwc_ep->next_frame;
2597 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
2600 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2601 dma_desc->buf = dma_ad;
2602 dma_desc->status.d32 = sts.d32;
2604 sts.b_iso_in.framenum += dwc_ep->bInterval;
2605 dma_ad += dwc_ep->data_per_frame;
2609 sts.b_iso_in.ioc = 1;
2610 sts.b_iso_in.l = dwc_ep->proc_buf_num;
2612 dma_desc->buf = dma_ad;
2613 dma_desc->status.d32 = sts.d32;
2615 dwc_ep->next_frame =
2616 sts.b_iso_in.framenum + dwc_ep->bInterval * 1;
2618 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2622 * This function is to handle Iso EP transfer complete interrupt
2623 * in case Iso out packet was dropped
2625 * @param core_if Programming view of DWC_otg controller.
2626 * @param dwc_ep The EP for wihich transfer complete was asserted
2629 static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t * core_if,
2634 uint32_t drp_pkt_cnt;
2635 deptsiz_data_t deptsiz = {.d32 = 0 };
2636 depctl_data_t depctl = {.d32 = 0 };
2640 dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->
2643 drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt;
2644 drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm);
2646 /* Setting dropped packets status */
2647 for (i = 0; i < drp_pkt_cnt; ++i) {
2648 dwc_ep->pkt_info[drp_pkt].status = -DWC_E_NO_DATA;
2653 if (deptsiz.b.pktcnt > 0) {
2654 deptsiz.b.xfersize =
2655 dwc_ep->xfer_len - (dwc_ep->pkt_cnt -
2656 deptsiz.b.pktcnt) * dwc_ep->maxpacket;
2658 deptsiz.b.xfersize = 0;
2659 deptsiz.b.pktcnt = 0;
2662 dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz,
2665 if (deptsiz.b.pktcnt > 0) {
2666 if (dwc_ep->proc_buf_num) {
2668 dwc_ep->dma_addr1 + dwc_ep->xfer_len -
2672 dwc_ep->dma_addr0 + dwc_ep->xfer_len -
2673 deptsiz.b.xfersize;;
2676 dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->
2679 /** Re-enable endpoint, clear nak */
2684 dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->
2685 doepctl, depctl.d32, depctl.d32);
2693 * This function sets iso packets information(PTI mode)
2695 * @param core_if Programming view of DWC_otg controller.
2696 * @param ep The EP to start the transfer on.
2699 static uint32_t set_iso_pkts_info(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
2703 iso_pkt_info_t *packet_info = ep->pkt_info;
2705 uint32_t frame_data;
2706 deptsiz_data_t deptsiz;
2708 if (ep->proc_buf_num == 0) {
2709 /** Buffer 0 descriptors setup */
2710 dma_ad = ep->dma_addr0;
2712 /** Buffer 1 descriptors setup */
2713 dma_ad = ep->dma_addr1;
2718 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->
2722 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->
2726 if (!deptsiz.b.xfersize) {
2728 for (i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) {
2729 frame_data = ep->data_per_frame;
2730 for (j = 0; j < ep->pkt_per_frm; ++j) {
2732 /* Packet status - is not set as initially
2733 * it is set to 0 and if packet was sent
2734 successfully, status field will remain 0*/
2736 /* Bytes has been transfered */
2737 packet_info->length =
2739 frame_data) ? ep->maxpacket : frame_data;
2741 /* Received packet offset */
2742 packet_info->offset = offset;
2743 offset += packet_info->length;
2744 frame_data -= packet_info->length;
2751 /* This is a workaround for in case of Transfer Complete with
2752 * PktDrpSts interrupts merging - in this case Transfer complete
2753 * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
2754 * set and with DOEPTSIZ register non zero. Investigations showed,
2755 * that this happens when Out packet is dropped, but because of
2756 * interrupts merging during first interrupt handling PktDrpSts
2757 * bit is cleared and for next merged interrupts it is not reset.
2758 * In this case SW hadles the interrupt as if PktDrpSts bit is set.
2763 return handle_iso_out_pkt_dropped(core_if, ep);
2769 * This function is to handle Iso EP transfer complete interrupt
2771 * @param pcd The PCD
2772 * @param ep The EP for which transfer complete was asserted
2775 static void complete_iso_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep)
2777 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
2778 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2779 uint8_t is_last = 0;
2781 if (core_if->dma_enable) {
2782 if (core_if->dma_desc_enable) {
2783 set_ddma_iso_pkts_info(core_if, dwc_ep);
2784 reinit_ddma_iso_xfer(core_if, dwc_ep);
2787 if (core_if->pti_enh_enable) {
2788 if (set_iso_pkts_info(core_if, dwc_ep)) {
2789 dwc_ep->proc_buf_num =
2790 (dwc_ep->proc_buf_num ^ 1) & 0x1;
2791 dwc_otg_iso_ep_start_buf_transfer
2796 set_current_pkt_info(core_if, dwc_ep);
2797 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
2799 dwc_ep->cur_pkt = 0;
2800 dwc_ep->proc_buf_num =
2801 (dwc_ep->proc_buf_num ^ 1) & 0x1;
2802 if (dwc_ep->proc_buf_num) {
2803 dwc_ep->cur_pkt_addr =
2805 dwc_ep->cur_pkt_dma_addr =
2808 dwc_ep->cur_pkt_addr =
2810 dwc_ep->cur_pkt_dma_addr =
2815 dwc_otg_iso_ep_start_frm_transfer(core_if,
2820 set_current_pkt_info(core_if, dwc_ep);
2821 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
2823 dwc_ep->cur_pkt = 0;
2824 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2825 if (dwc_ep->proc_buf_num) {
2826 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
2827 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
2829 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
2830 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
2834 dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep);
2837 dwc_otg_iso_buffer_done(pcd, ep, ep->iso_req_handle);
2839 #endif /* DWC_EN_ISOC */
2842 * This function handles EP0 Control transfers.
2844 * The state of the control tranfers are tracked in
2845 * <code>ep0state</code>.
2847 static void handle_ep0(dwc_otg_pcd_t * pcd)
2849 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2850 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
2851 desc_sts_data_t desc_sts;
2852 deptsiz0_data_t deptsiz;
2853 uint32_t byte_count;
2856 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
2857 print_ep0_state(pcd);
2860 // DWC_DEBUGPL(DBG_CIL,"HANDLE EP0\n");
2862 switch (pcd->ep0state) {
2863 case EP0_DISCONNECT:
2867 pcd->request_config = 0;
2872 case EP0_IN_DATA_PHASE:
2874 DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
2875 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
2876 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
2879 if (core_if->dma_enable != 0) {
2881 * For EP0 we can only program 1 packet at a time so we
2882 * need to do the make calculations after each complete.
2883 * Call write_packet to make the calculations, as in
2884 * slave mode, and use those values to determine if we
2887 if (core_if->dma_desc_enable == 0) {
2889 dwc_read_reg32(&core_if->dev_if->
2890 in_ep_regs[0]->dieptsiz);
2892 ep0->dwc_ep.xfer_len - deptsiz.b.xfersize;
2895 core_if->dev_if->in_desc_addr->status;
2897 ep0->dwc_ep.xfer_len - desc_sts.b.bytes;
2899 ep0->dwc_ep.xfer_count += byte_count;
2900 ep0->dwc_ep.xfer_buff += byte_count;
2901 ep0->dwc_ep.dma_addr += byte_count;
2903 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
2904 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2906 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2907 } else if (ep0->dwc_ep.sent_zlp) {
2908 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2910 ep0->dwc_ep.sent_zlp = 0;
2911 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2913 ep0_complete_request(ep0);
2914 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
2917 case EP0_OUT_DATA_PHASE:
2919 DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
2920 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
2921 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
2923 if (core_if->dma_enable != 0) {
2924 if (core_if->dma_desc_enable == 0) {
2926 dwc_read_reg32(&core_if->dev_if->
2927 out_ep_regs[0]->doeptsiz);
2929 ep0->dwc_ep.maxpacket - deptsiz.b.xfersize;
2932 core_if->dev_if->out_desc_addr->status;
2934 ep0->dwc_ep.maxpacket - desc_sts.b.bytes;
2936 ep0->dwc_ep.xfer_count += byte_count;
2937 ep0->dwc_ep.xfer_buff += byte_count;
2938 ep0->dwc_ep.dma_addr += byte_count;
2940 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
2941 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2943 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2944 } else if (ep0->dwc_ep.sent_zlp) {
2945 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2947 ep0->dwc_ep.sent_zlp = 0;
2948 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2950 ep0_complete_request(ep0);
2951 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
2955 case EP0_IN_STATUS_PHASE:
2956 case EP0_OUT_STATUS_PHASE:
2957 DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
2958 ep0_complete_request(ep0);
2959 pcd->ep0state = EP0_IDLE;
2961 ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */
2963 /* Prepare for more SETUP Packets */
2964 if (core_if->dma_enable) {
2965 ep0_out_start(core_if, pcd);
2970 DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
2974 print_ep0_state(pcd);
2981 static void restart_transfer(dwc_otg_pcd_t * pcd, const uint32_t epnum)
2983 dwc_otg_core_if_t *core_if;
2984 dwc_otg_dev_if_t *dev_if;
2985 deptsiz_data_t dieptsiz = {.d32 = 0 };
2986 dwc_otg_pcd_ep_t *ep;
2988 ep = get_in_ep(pcd, epnum);
2991 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2994 #endif /* DWC_EN_ISOC */
2996 core_if = GET_CORE_IF(pcd);
2997 dev_if = core_if->dev_if;
2999 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3001 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
3002 " stopped=%d\n", ep->dwc_ep.xfer_buff,
3003 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len, ep->stopped);
3005 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
3007 if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
3008 ep->dwc_ep.start_xfer_buff != 0) {
3009 if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) {
3010 ep->dwc_ep.xfer_count = 0;
3011 ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
3012 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3014 ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
3015 /* convert packet size to dwords. */
3016 ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
3017 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3020 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
3021 "xfer_len=%0x stopped=%d\n",
3022 ep->dwc_ep.xfer_buff,
3023 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
3026 dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
3028 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
3034 * handle the IN EP disable interrupt.
3036 static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t * pcd,
3037 const uint32_t epnum)
3039 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3040 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3041 deptsiz_data_t dieptsiz = {.d32 = 0 };
3042 dctl_data_t dctl = {.d32 = 0 };
3043 dwc_otg_pcd_ep_t *ep;
3046 ep = get_in_ep(pcd, epnum);
3047 dwc_ep = &ep->dwc_ep;
3049 if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3050 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
3054 DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", epnum,
3055 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl));
3056 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3058 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3059 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3062 /* Flush the Tx FIFO */
3063 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
3064 /* Clear the Global IN NP NAK */
3066 dctl.b.cgnpinnak = 1;
3067 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, 0);
3068 /* Restart the transaction */
3069 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3070 restart_transfer(pcd, epnum);
3073 /* Restart the transaction */
3074 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3075 restart_transfer(pcd, epnum);
3077 DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
3082 * Handler for the IN EP timeout handshake interrupt.
3084 static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t * pcd,
3085 const uint32_t epnum)
3087 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3088 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3091 deptsiz_data_t dieptsiz = {.d32 = 0 };
3094 dctl_data_t dctl = {.d32 = 0 };
3095 dwc_otg_pcd_ep_t *ep;
3097 gintmsk_data_t intr_mask = {.d32 = 0 };
3099 ep = get_in_ep(pcd, epnum);
3101 /* Disable the NP Tx Fifo Empty Interrrupt */
3102 if (!core_if->dma_enable) {
3103 intr_mask.b.nptxfempty = 1;
3104 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
3107 /** @todo NGS Check EP type.
3108 * Implement for Periodic EPs */
3112 /* Enable the Global IN NAK Effective Interrupt */
3113 intr_mask.b.ginnakeff = 1;
3114 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
3116 /* Set Global IN NAK */
3117 dctl.b.sgnpinnak = 1;
3118 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
3123 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz);
3124 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3125 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3128 #ifdef DISABLE_PERIODIC_EP
3130 * Set the NAK bit for this EP to
3131 * start the disable process.
3135 dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32,
3143 * Handler for the IN EP NAK interrupt.
3145 static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t * pcd,
3146 const uint32_t epnum)
3148 /** @todo implement ISR */
3149 dwc_otg_core_if_t *core_if;
3150 diepmsk_data_t intr_mask = {.d32 = 0 };
3152 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
3153 core_if = GET_CORE_IF(pcd);
3154 intr_mask.b.nak = 1;
3156 if (core_if->multiproc_int_enable) {
3157 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3158 diepeachintmsk[epnum], intr_mask.d32, 0);
3160 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk,
3168 * Handler for the OUT EP Babble interrupt.
3170 static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t * pcd,
3171 const uint32_t epnum)
3173 /** @todo implement ISR */
3174 dwc_otg_core_if_t *core_if;
3175 doepmsk_data_t intr_mask = {.d32 = 0 };
3177 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3179 core_if = GET_CORE_IF(pcd);
3180 intr_mask.b.babble = 1;
3182 if (core_if->multiproc_int_enable) {
3183 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3184 doepeachintmsk[epnum], intr_mask.d32, 0);
3186 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
3194 * Handler for the OUT EP NAK interrupt.
3196 static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t * pcd,
3197 const uint32_t epnum)
3199 /** @todo implement ISR */
3200 dwc_otg_core_if_t *core_if;
3201 doepmsk_data_t intr_mask = {.d32 = 0 };
3203 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "OUT EP NAK");
3204 core_if = GET_CORE_IF(pcd);
3205 intr_mask.b.nak = 1;
3207 if (core_if->multiproc_int_enable) {
3208 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3209 doepeachintmsk[epnum], intr_mask.d32, 0);
3211 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
3219 * Handler for the OUT EP NYET interrupt.
3221 static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t * pcd,
3222 const uint32_t epnum)
3224 /** @todo implement ISR */
3225 dwc_otg_core_if_t *core_if;
3226 doepmsk_data_t intr_mask = {.d32 = 0 };
3228 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
3229 core_if = GET_CORE_IF(pcd);
3230 intr_mask.b.nyet = 1;
3232 if (core_if->multiproc_int_enable) {
3233 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3234 doepeachintmsk[epnum], intr_mask.d32, 0);
3236 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
3244 * This interrupt indicates that an IN EP has a pending Interrupt.
3245 * The sequence for handling the IN EP interrupt is shown below:
3246 * -# Read the Device All Endpoint Interrupt register
3247 * -# Repeat the following for each IN EP interrupt bit set (from
3249 * -# Read the Device Endpoint Interrupt (DIEPINTn) register
3250 * -# If "Transfer Complete" call the request complete function
3251 * -# If "Endpoint Disabled" complete the EP disable procedure.
3252 * -# If "AHB Error Interrupt" log error
3253 * -# If "Time-out Handshake" log error
3254 * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
3256 * -# If "IN Token EP Mismatch" (disable, this is handled by EP
3257 * Mismatch Interrupt)
3259 static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t * pcd)
3261 #define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \
3263 diepint_data_t diepint = {.d32=0}; \
3264 diepint.b.__intr = 1; \
3265 dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
3269 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3270 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3271 diepint_data_t diepint = {.d32 = 0 };
3272 dctl_data_t dctl = {.d32 = 0 };
3273 depctl_data_t depctl = {.d32 = 0 };
3276 dwc_otg_pcd_ep_t *ep;
3278 gintmsk_data_t intr_mask = {.d32 = 0 };
3280 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
3282 /* Read in the device interrupt bits */
3283 ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
3285 /* Service the Device IN interrupts for each endpoint */
3287 if (ep_intr & 0x1) {
3288 __maybe_unused uint32_t empty_msk;
3289 /* Get EP pointer */
3290 ep = get_in_ep(pcd, epnum);
3291 dwc_ep = &ep->dwc_ep;
3294 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl);
3296 dwc_read_reg32(&dev_if->dev_global_regs->
3297 dtknqr4_fifoemptymsk);
3299 DWC_DEBUGPL(DBG_PCDV,
3300 "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
3301 epnum, empty_msk, depctl.d32);
3303 DWC_DEBUGPL(DBG_PCD,
3304 "EP%d-%s: type=%d, mps=%d\n",
3305 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
3306 dwc_ep->type, dwc_ep->maxpacket);
3309 dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
3311 DWC_DEBUGPL(DBG_PCDV,
3312 "EP %d Interrupt Register - 0x%x\n", epnum,
3314 /* Transfer complete */
3315 if (diepint.b.xfercompl) {
3316 /* Disable the NP Tx FIFO Empty
3318 if (core_if->en_multiple_tx_fifo == 0) {
3319 intr_mask.b.nptxfempty = 1;
3320 dwc_modify_reg32(&core_if->
3322 gintmsk, intr_mask.d32,
3325 /* Disable the Tx FIFO Empty Interrupt for this EP */
3326 uint32_t fifoemptymsk =
3328 dwc_modify_reg32(&core_if->dev_if->
3330 dtknqr4_fifoemptymsk,
3333 /* Clear the bit in DIEPINTn for this interrupt */
3334 CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
3336 /* Complete the transfer */
3341 else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3343 complete_iso_ep(pcd, ep);
3345 #endif /* DWC_EN_ISOC */
3351 /* Endpoint disable */
3352 if (diepint.b.epdisabled) {
3353 DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
3355 handle_in_ep_disable_intr(pcd, epnum);
3357 /* Clear the bit in DIEPINTn for this interrupt */
3358 CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
3361 if (diepint.b.ahberr) {
3362 DWC_DEBUGPL(DBG_ANY, "EP%d IN AHB Error\n",
3364 /* Clear the bit in DIEPINTn for this interrupt */
3365 CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
3367 /* TimeOUT Handshake (non-ISOC IN EPs) */
3368 if (diepint.b.timeout) {
3369 DWC_DEBUGPL(DBG_ANY, "EP%d IN Time-out\n",
3371 handle_in_ep_timeout_intr(pcd, epnum);
3373 CLEAR_IN_EP_INTR(core_if, epnum, timeout);
3375 /** IN Token received with TxF Empty */
3376 if (diepint.b.intktxfemp) {
3377 DWC_DEBUGPL(DBG_ANY,
3378 "EP%d IN TKN TxFifo Empty\n",
3380 if (!ep->stopped && epnum != 0) {
3382 diepmsk_data_t diepmsk = {.d32 = 0 };
3383 diepmsk.b.intktxfemp = 1;
3385 if (core_if->multiproc_int_enable) {
3386 dwc_modify_reg32(&dev_if->
3393 dwc_modify_reg32(&dev_if->
3399 } else if (core_if->dma_desc_enable
3402 EP0_OUT_STATUS_PHASE) {
3405 dwc_read_reg32(&dev_if->
3409 /* set the disable and stall bits */
3410 if (depctl.b.epena) {
3414 dwc_write_reg32(&dev_if->
3416 diepctl, depctl.d32);
3418 CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
3420 /** IN Token Received with EP mismatch */
3421 if (diepint.b.intknepmis) {
3422 DWC_DEBUGPL(DBG_ANY,
3423 "EP%d IN TKN EP Mismatch\n", epnum);
3424 CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
3426 /** IN Endpoint NAK Effective */
3427 if (diepint.b.inepnakeff) {
3428 DWC_DEBUGPL(DBG_ANY,
3429 "EP%d IN EP NAK Effective\n",
3432 if (ep->disabling) {
3436 dwc_modify_reg32(&dev_if->
3438 diepctl, depctl.d32,
3441 CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
3445 /** IN EP Tx FIFO Empty Intr */
3446 if (diepint.b.emptyintr) {
3447 DWC_DEBUGPL(DBG_ANY,
3448 "EP%d Tx FIFO Empty Intr \n",
3450 write_empty_tx_fifo(pcd, epnum);
3452 CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
3456 /** IN EP BNA Intr */
3457 if (diepint.b.bna) {
3458 CLEAR_IN_EP_INTR(core_if, epnum, bna);
3459 if (core_if->dma_desc_enable) {
3462 DWC_OTG_EP_TYPE_ISOC) {
3464 * This checking is performed to prevent first "false" BNA
3465 * handling occuring right after reconnect
3467 if (dwc_ep->next_frame !=
3469 dwc_otg_pcd_handle_iso_bna
3472 #endif /* DWC_EN_ISOC */
3475 dwc_read_reg32(&dev_if->
3479 /* If Global Continue on BNA is disabled - disable EP */
3480 if (!dctl.b.gcontbna) {
3491 start_next_request(ep);
3497 if (diepint.b.nak) {
3498 DWC_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
3500 handle_in_ep_nak_intr(pcd, epnum);
3502 CLEAR_IN_EP_INTR(core_if, epnum, nak);
3510 #undef CLEAR_IN_EP_INTR
3514 * This interrupt indicates that an OUT EP has a pending Interrupt.
3515 * The sequence for handling the OUT EP interrupt is shown below:
3516 * -# Read the Device All Endpoint Interrupt register
3517 * -# Repeat the following for each OUT EP interrupt bit set (from
3519 * -# Read the Device Endpoint Interrupt (DOEPINTn) register
3520 * -# If "Transfer Complete" call the request complete function
3521 * -# If "Endpoint Disabled" complete the EP disable procedure.
3522 * -# If "AHB Error Interrupt" log error
3523 * -# If "Setup Phase Done" process Setup Packet (See Standard USB
3524 * Command Processing)
3526 static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t * pcd)
3528 #define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \
3530 doepint_data_t doepint = {.d32=0}; \
3531 doepint.b.__intr = 1; \
3532 dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
3536 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3537 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3539 doepint_data_t doepint = {.d32 = 0 };
3540 dctl_data_t dctl = {.d32 = 0 };
3541 depctl_data_t doepctl = {.d32 = 0 };
3543 dwc_otg_pcd_ep_t *ep;
3546 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
3548 /* Read in the device interrupt bits */
3549 ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
3552 if (ep_intr & 0x1) {
3553 /* Get EP pointer */
3554 ep = get_out_ep(pcd, epnum);
3555 dwc_ep = &ep->dwc_ep;
3558 DWC_DEBUGPL(DBG_PCDV,
3559 "EP%d-%s: type=%d, mps=%d\n",
3560 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
3561 dwc_ep->type, dwc_ep->maxpacket);
3564 dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
3566 /* Transfer complete */
3567 if (doepint.b.xfercompl) {
3570 /* Clear the bit in DOEPINTn for this interrupt */
3571 CLEAR_OUT_EP_INTR(core_if, epnum,
3573 if (pcd->ep0state != EP0_IDLE)
3576 } else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3577 if (doepint.b.pktdrpsts == 0) {
3578 /* Clear the bit in DOEPINTn for this interrupt */
3579 CLEAR_OUT_EP_INTR(core_if,
3582 complete_iso_ep(pcd, ep);
3585 doepint_data_t doepint = {.d32 =
3587 doepint.b.xfercompl = 1;
3588 doepint.b.pktdrpsts = 1;
3589 dwc_write_reg32(&core_if->
3595 if (handle_iso_out_pkt_dropped
3596 (core_if, dwc_ep)) {
3597 complete_iso_ep(pcd,
3601 #endif /* DWC_EN_ISOC */
3603 /* Clear the bit in DOEPINTn for this interrupt */
3604 CLEAR_OUT_EP_INTR(core_if, epnum,
3611 /* Endpoint disable */
3612 if (doepint.b.epdisabled) {
3614 /* Clear the bit in DOEPINTn for this interrupt */
3615 CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
3618 if (doepint.b.ahberr) {
3619 DWC_DEBUGPL(DBG_PCD, "EP%d OUT AHB Error\n",
3621 DWC_DEBUGPL(DBG_PCD, "EP DMA REG %d \n",
3623 out_ep_regs[epnum]->doepdma);
3624 CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
3626 /* Setup Phase Done (contorl EPs) */
3627 if (doepint.b.setup) {
3629 DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n",
3632 CLEAR_OUT_EP_INTR(core_if, epnum, setup);
3637 /** OUT EP BNA Intr */
3638 if (doepint.b.bna) {
3639 CLEAR_OUT_EP_INTR(core_if, epnum, bna);
3640 if (core_if->dma_desc_enable) {
3643 DWC_OTG_EP_TYPE_ISOC) {
3645 * This checking is performed to prevent first "false" BNA
3646 * handling occuring right after reconnect
3648 if (dwc_ep->next_frame !=
3650 dwc_otg_pcd_handle_iso_bna
3653 #endif /* DWC_EN_ISOC */
3656 dwc_read_reg32(&dev_if->
3660 /* If Global Continue on BNA is disabled - disable EP */
3661 if (!dctl.b.gcontbna) {
3664 doepctl.b.epdis = 1;
3672 start_next_request(ep);
3677 if (doepint.b.stsphsercvd) {
3678 CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
3679 if (core_if->dma_desc_enable) {
3680 do_setup_in_status_phase(pcd);
3683 /* Babble Interrutp */
3684 if (doepint.b.babble) {
3685 DWC_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
3687 handle_out_ep_babble_intr(pcd, epnum);
3689 CLEAR_OUT_EP_INTR(core_if, epnum, babble);
3692 if (doepint.b.nak) {
3693 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
3694 handle_out_ep_nak_intr(pcd, epnum);
3696 CLEAR_OUT_EP_INTR(core_if, epnum, nak);
3698 /* NYET Interrutp */
3699 if (doepint.b.nyet) {
3700 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
3701 handle_out_ep_nyet_intr(pcd, epnum);
3703 CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
3713 #undef CLEAR_OUT_EP_INTR
3717 * Incomplete ISO IN Transfer Interrupt.
3718 * This interrupt indicates one of the following conditions occurred
3719 * while transmitting an ISOC transaction.
3720 * - Corrupted IN Token for ISOC EP.
3721 * - Packet not complete in FIFO.
3722 * The follow actions will be taken:
3723 * -# Determine the EP
3724 * -# Set incomplete flag in dwc_ep structure
3725 * -# Disable EP; when "Endpoint Disabled" interrupt is received
3728 int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t * pcd)
3730 gintsts_data_t gintsts;
3733 dwc_otg_dev_if_t *dev_if;
3734 deptsiz_data_t deptsiz = {.d32 = 0 };
3735 depctl_data_t depctl = {.d32 = 0 };
3736 dsts_data_t dsts = {.d32 = 0 };
3740 dev_if = GET_CORE_IF(pcd)->dev_if;
3742 for (i = 1; i <= dev_if->num_in_eps; ++i) {
3743 dwc_ep = &pcd->in_ep[i].dwc_ep;
3744 if (dwc_ep->active && dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3746 dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz);
3748 dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
3750 if (depctl.b.epdis && deptsiz.d32) {
3751 set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep);
3752 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3753 dwc_ep->cur_pkt = 0;
3754 dwc_ep->proc_buf_num =
3755 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3757 if (dwc_ep->proc_buf_num) {
3758 dwc_ep->cur_pkt_addr =
3760 dwc_ep->cur_pkt_dma_addr =
3763 dwc_ep->cur_pkt_addr =
3765 dwc_ep->cur_pkt_dma_addr =
3772 dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->
3773 dev_global_regs->dsts);
3774 dwc_ep->next_frame = dsts.b.soffn;
3776 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
3784 gintmsk_data_t intr_mask = {.d32 = 0 };
3785 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3786 "IN ISOC Incomplete");
3788 intr_mask.b.incomplisoin = 1;
3789 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3791 #endif //DWC_EN_ISOC
3793 /* Clear interrupt */
3795 gintsts.b.incomplisoin = 1;
3796 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3803 * Incomplete ISO OUT Transfer Interrupt.
3805 * This interrupt indicates that the core has dropped an ISO OUT
3806 * packet. The following conditions can be the cause:
3807 * - FIFO Full, the entire packet would not fit in the FIFO.
3810 * The follow actions will be taken:
3811 * -# Determine the EP
3812 * -# Set incomplete flag in dwc_ep structure
3813 * -# Read any data from the FIFO
3814 * -# Disable EP. when "Endpoint Disabled" interrupt is received
3817 int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t * pcd)
3820 gintsts_data_t gintsts;
3823 dwc_otg_dev_if_t *dev_if;
3824 deptsiz_data_t deptsiz = {.d32 = 0 };
3825 depctl_data_t depctl = {.d32 = 0 };
3826 dsts_data_t dsts = {.d32 = 0 };
3830 dev_if = GET_CORE_IF(pcd)->dev_if;
3832 for (i = 1; i <= dev_if->num_out_eps; ++i) {
3833 dwc_ep = &pcd->in_ep[i].dwc_ep;
3834 if (pcd->out_ep[i].dwc_ep.active &&
3835 pcd->out_ep[i].dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
3837 dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz);
3839 dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
3841 if (depctl.b.epdis && deptsiz.d32) {
3842 set_current_pkt_info(GET_CORE_IF(pcd),
3843 &pcd->out_ep[i].dwc_ep);
3844 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3845 dwc_ep->cur_pkt = 0;
3846 dwc_ep->proc_buf_num =
3847 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3849 if (dwc_ep->proc_buf_num) {
3850 dwc_ep->cur_pkt_addr =
3852 dwc_ep->cur_pkt_dma_addr =
3855 dwc_ep->cur_pkt_addr =
3857 dwc_ep->cur_pkt_dma_addr =
3864 dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->
3865 dev_global_regs->dsts);
3866 dwc_ep->next_frame = dsts.b.soffn;
3868 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
3875 /** @todo implement ISR */
3876 gintmsk_data_t intr_mask = {.d32 = 0 };
3878 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3879 "OUT ISOC Incomplete");
3881 intr_mask.b.incomplisoout = 1;
3882 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3885 #endif /* DWC_EN_ISOC */
3887 /* Clear interrupt */
3889 gintsts.b.incomplisoout = 1;
3890 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3897 * This function handles the Global IN NAK Effective interrupt.
3900 int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t * pcd)
3902 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
3903 depctl_data_t diepctl = {.d32 = 0 };
3904 depctl_data_t diepctl_rd = {.d32 = 0 };
3905 gintmsk_data_t intr_mask = {.d32 = 0 };
3906 gintsts_data_t gintsts;
3909 DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
3911 /* Disable all active IN EPs */
3912 diepctl.b.epdis = 1;
3915 for (i = 0; i <= dev_if->num_in_eps; i++) {
3917 dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
3918 if (diepctl_rd.b.epena) {
3919 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl,
3923 /* Disable the Global IN NAK Effective Interrupt */
3924 intr_mask.b.ginnakeff = 1;
3925 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3928 /* Clear interrupt */
3930 gintsts.b.ginnakeff = 1;
3931 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3938 * OUT NAK Effective.
3941 int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t * pcd)
3943 gintmsk_data_t intr_mask = {.d32 = 0 };
3944 gintsts_data_t gintsts;
3946 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3947 "Global IN NAK Effective\n");
3948 /* Disable the Global IN NAK Effective Interrupt */
3949 intr_mask.b.goutnakeff = 1;
3950 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3953 /* Clear interrupt */
3955 gintsts.b.goutnakeff = 1;
3956 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3963 * PCD interrupt handler.
3965 * The PCD handles the device interrupts. Many conditions can cause a
3966 * device interrupt. When an interrupt occurs, the device interrupt
3967 * service routine determines the cause of the interrupt and
3968 * dispatches handling to the appropriate function. These interrupt
3969 * handling functions are described below.
3971 * All interrupt registers are processed from LSB to MSB.
3974 extern int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t * core_if);
3975 extern int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t * core_if);
3977 int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t * pcd)
3979 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3981 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3983 gintsts_data_t gintr_status;
3987 DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n",
3989 dwc_read_reg32(&global_regs->gintsts),
3990 dwc_read_reg32(&global_regs->gintmsk));
3993 if (dwc_otg_is_device_mode(core_if)) {
3994 DWC_SPINLOCK(pcd->lock);
3996 gintr_status.d32 = dwc_otg_read_core_intr(core_if);
3998 DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
3999 __func__, gintr_status.d32);
4001 if (gintr_status.b.sofintr) {
4002 retval |= dwc_otg_pcd_handle_sof_intr(pcd);
4004 if (gintr_status.b.rxstsqlvl) {
4006 dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
4008 if (gintr_status.b.nptxfempty) {
4009 retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
4011 if (gintr_status.b.ginnakeff) {
4012 retval |= dwc_otg_pcd_handle_in_nak_effective(pcd);
4014 if (gintr_status.b.goutnakeff) {
4015 retval |= dwc_otg_pcd_handle_out_nak_effective(pcd);
4017 if (gintr_status.b.i2cintr) {
4018 retval |= dwc_otg_pcd_handle_i2c_intr(pcd);
4020 if (gintr_status.b.erlysuspend) {
4021 retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd);
4023 if (gintr_status.b.usbreset) {
4024 retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd);
4026 if (gintr_status.b.enumdone) {
4027 retval |= dwc_otg_pcd_handle_enum_done_intr(pcd);
4029 if (gintr_status.b.isooutdrop) {
4031 dwc_otg_pcd_handle_isoc_out_packet_dropped_intr
4034 if (gintr_status.b.eopframe) {
4036 dwc_otg_pcd_handle_end_periodic_frame_intr(pcd);
4038 if (gintr_status.b.epmismatch) {
4039 retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if);
4041 if (gintr_status.b.inepint) {
4042 if (!core_if->multiproc_int_enable) {
4043 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
4046 if (gintr_status.b.outepintr) {
4047 if (!core_if->multiproc_int_enable) {
4048 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
4051 if (gintr_status.b.incomplisoin) {
4053 dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
4055 if (gintr_status.b.incomplisoout) {
4057 dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
4060 if (gintr_status.b.usbsuspend) {
4061 retval |= dwc_otg_handle_usb_suspend_intr(core_if);
4063 if (gintr_status.b.wkupintr) {
4064 retval |= dwc_otg_handle_wakeup_detected_intr(core_if);
4066 /* In MPI mode De vice Endpoints intterrupts are asserted
4067 * without setting outepintr and inepint bits set, so these
4068 * Interrupt handlers are called without checking these bit-fields
4070 if (core_if->multiproc_int_enable) {
4071 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
4072 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
4075 // dwc_debug( "() gintsts=%0x\n",
4076 // dwc_read_reg32(&global_regs->gintsts));
4078 DWC_SPINUNLOCK(pcd->lock);
4083 #endif /* DWC_HOST_ONLY */