1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
34 #include "dwc_otg_pcd.h"
37 #include "dwc_otg_cfi.h"
40 //#define PRINT_CFI_DMA_DESCS
47 * This function updates OTG.
49 static void dwc_otg_pcd_update_otg(dwc_otg_pcd_t * pcd, const unsigned reset)
53 pcd->b_hnp_enable = 0;
54 pcd->a_hnp_support = 0;
55 pcd->a_alt_hnp_support = 0;
58 if (pcd->fops->hnp_changed) {
59 pcd->fops->hnp_changed(pcd);
64 * This file contains the implementation of the PCD Interrupt handlers.
66 * The PCD handles the device interrupts. Many conditions can cause a
67 * device interrupt. When an interrupt occurs, the device interrupt
68 * service routine determines the cause of the interrupt and
69 * dispatches handling to the appropriate function. These interrupt
70 * handling functions are described below.
71 * All interrupt registers are processed from LSB to MSB.
75 * This function prints the ep0 state for debug purposes.
77 static inline void print_ep0_state(dwc_otg_pcd_t * pcd)
82 switch (pcd->ep0state) {
84 dwc_strcpy(str, "EP0_DISCONNECT");
87 dwc_strcpy(str, "EP0_IDLE");
89 case EP0_IN_DATA_PHASE:
90 dwc_strcpy(str, "EP0_IN_DATA_PHASE");
92 case EP0_OUT_DATA_PHASE:
93 dwc_strcpy(str, "EP0_OUT_DATA_PHASE");
95 case EP0_IN_STATUS_PHASE:
96 dwc_strcpy(str, "EP0_IN_STATUS_PHASE");
98 case EP0_OUT_STATUS_PHASE:
99 dwc_strcpy(str, "EP0_OUT_STATUS_PHASE");
102 dwc_strcpy(str, "EP0_STALL");
105 dwc_strcpy(str, "EP0_INVALID");
108 DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state);
113 static inline void print_desc(struct dwc_otg_dma_desc *ddesc,
114 const uint8_t * epname, int descnum)
117 ("%s DMA_DESC(%d) buf=0x%08x bytes=0x%04x; sp=0x%x; l=0x%x; sts=0x%02x; bs=0x%02x\n",
118 epname, descnum, ddesc->buf, ddesc->status.b.bytes,
119 ddesc->status.b.sp, ddesc->status.b.l, ddesc->status.b.sts,
125 * This function returns pointer to in ep struct with number ep_num
127 static inline dwc_otg_pcd_ep_t *get_in_ep(dwc_otg_pcd_t * pcd, uint32_t ep_num)
130 int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
134 for (i = 0; i < num_in_eps; ++i) {
135 if (pcd->in_ep[i].dwc_ep.num == ep_num)
136 return &pcd->in_ep[i];
143 * This function returns pointer to out ep struct with number ep_num
145 static inline dwc_otg_pcd_ep_t *get_out_ep(dwc_otg_pcd_t * pcd, uint32_t ep_num)
148 int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
152 for (i = 0; i < num_out_eps; ++i) {
153 if (pcd->out_ep[i].dwc_ep.num == ep_num)
154 return &pcd->out_ep[i];
161 * This functions gets a pointer to an EP from the wIndex address
162 * value of the control request.
164 dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t * pcd, u16 wIndex)
166 dwc_otg_pcd_ep_t *ep;
167 uint32_t ep_num = UE_GET_ADDR(wIndex);
171 } else if (UE_GET_DIR(wIndex) == UE_DIR_IN) { /* in ep */
172 ep = &pcd->in_ep[ep_num - 1];
174 ep = &pcd->out_ep[ep_num - 1];
181 * This function checks the EP request queue, if the queue is not
182 * empty the next request is started.
184 void start_next_request(dwc_otg_pcd_ep_t * ep)
186 dwc_otg_pcd_request_t *req = 0;
187 uint32_t max_transfer =
188 GET_CORE_IF(ep->pcd)->core_params->max_transfer_size;
191 struct dwc_otg_pcd *pcd;
195 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
196 req = DWC_CIRCLEQ_FIRST(&ep->queue);
199 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
200 ep->dwc_ep.cfi_req_len = req->length;
201 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd, ep, req);
204 /* Setup and start the Transfer */
205 ep->dwc_ep.dma_addr = req->dma;
206 ep->dwc_ep.start_xfer_buff = req->buf;
207 ep->dwc_ep.xfer_buff = req->buf;
208 ep->dwc_ep.sent_zlp = 0;
209 ep->dwc_ep.total_len = req->length;
210 ep->dwc_ep.xfer_len = 0;
211 ep->dwc_ep.xfer_count = 0;
213 ep->dwc_ep.maxxfer = max_transfer;
214 if (GET_CORE_IF(ep->pcd)->dma_desc_enable) {
215 uint32_t out_max_xfer = DDMA_MAX_TRANSFER_SIZE
216 - (DDMA_MAX_TRANSFER_SIZE % 4);
217 if (ep->dwc_ep.is_in) {
218 if (ep->dwc_ep.maxxfer >
219 DDMA_MAX_TRANSFER_SIZE) {
221 DDMA_MAX_TRANSFER_SIZE;
224 if (ep->dwc_ep.maxxfer > out_max_xfer) {
230 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
231 ep->dwc_ep.maxxfer -=
232 (ep->dwc_ep.maxxfer % ep->dwc_ep.maxpacket);
235 if ((ep->dwc_ep.total_len %
236 ep->dwc_ep.maxpacket == 0)
237 && (ep->dwc_ep.total_len != 0)) {
238 ep->dwc_ep.sent_zlp = 1;
245 dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
250 * This function handles the SOF Interrupts. At this time the SOF
251 * Interrupt is disabled.
253 int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t * pcd)
255 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
257 gintsts_data_t gintsts;
259 DWC_DEBUGPL(DBG_PCD, "SOF\n");
261 /* Clear interrupt */
263 gintsts.b.sofintr = 1;
264 dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
270 * This function handles the Rx Status Queue Level Interrupt, which
271 * indicates that there is a least one packet in the Rx FIFO. The
272 * packets are moved from the FIFO to memory, where they will be
273 * processed when the Endpoint Interrupt Register indicates Transfer
274 * Complete or SETUP Phase Done.
276 * Repeat the following until the Rx Status Queue is empty:
277 * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
279 * -# If Receive FIFO is empty then skip to step Clear the interrupt
281 * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
282 * SETUP data to the buffer
283 * -# If OUT Data Packet call dwc_otg_read_packet to copy the data
284 * to the destination buffer
286 int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t * pcd)
288 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
289 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
290 gintmsk_data_t gintmask = {.d32 = 0 };
291 device_grxsts_data_t status;
292 dwc_otg_pcd_ep_t *ep;
293 gintsts_data_t gintsts;
295 static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
298 //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
299 /* Disable the Rx Status Queue Level interrupt */
300 gintmask.b.rxstsqlvl = 1;
301 dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0);
303 /* Get the Status from the top of the FIFO */
304 status.d32 = dwc_read_reg32(&global_regs->grxstsp);
306 DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
307 "pktsts:%x Frame:%d(0x%0x)\n",
308 status.b.epnum, status.b.bcnt,
309 dpid_str[status.b.dpid],
310 status.b.pktsts, status.b.fn, status.b.fn);
311 /* Get pointer to EP structure */
312 ep = get_out_ep(pcd, status.b.epnum);
314 switch (status.b.pktsts) {
315 case DWC_DSTS_GOUT_NAK:
316 DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
318 case DWC_STS_DATA_UPDT:
319 DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
320 if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
321 /** @todo NGS Check for buffer overflow? */
322 dwc_otg_read_packet(core_if,
323 ep->dwc_ep.xfer_buff,
325 ep->dwc_ep.xfer_count += status.b.bcnt;
326 ep->dwc_ep.xfer_buff += status.b.bcnt;
329 case DWC_STS_XFER_COMP:
330 DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
332 case DWC_DSTS_SETUP_COMP:
334 DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
337 case DWC_DSTS_SETUP_UPDT:
338 dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32);
341 "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
342 pcd->setup_pkt->req.bmRequestType,
343 pcd->setup_pkt->req.bRequest,
344 UGETW(pcd->setup_pkt->req.wValue),
345 UGETW(pcd->setup_pkt->req.wIndex),
346 UGETW(pcd->setup_pkt->req.wLength));
348 ep->dwc_ep.xfer_count += status.b.bcnt;
351 DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
356 /* Enable the Rx Status Queue Level interrupt */
357 dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32);
358 /* Clear interrupt */
360 gintsts.b.rxstsqlvl = 1;
361 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
363 //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__);
368 * This function examines the Device IN Token Learning Queue to
369 * determine the EP number of the last IN token received. This
370 * implementation is for the Mass Storage device where there are only
371 * 2 IN EPs (Control-IN and BULK-IN).
373 * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
374 * are 8 EP Numbers in each of the other possible DTKNQ Registers.
376 * @param core_if Programming view of DWC_otg controller.
379 static inline int get_ep_of_last_in_token(dwc_otg_core_if_t * core_if)
381 dwc_otg_device_global_regs_t *dev_global_regs =
382 core_if->dev_if->dev_global_regs;
383 const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth;
384 /* Number of Token Queue Registers */
385 const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
386 dtknq1_data_t dtknqr1;
387 uint32_t in_tkn_epnums[4];
390 volatile uint32_t *addr = &dev_global_regs->dtknqr1;
393 //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH);
395 /* Read the DTKNQ Registers */
396 for (i = 0; i < DTKNQ_REG_CNT; i++) {
397 in_tkn_epnums[i] = dwc_read_reg32(addr);
398 DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
400 if (addr == &dev_global_regs->dvbusdis) {
401 addr = &dev_global_regs->dtknqr3_dthrctl;
408 /* Copy the DTKNQR1 data to the bit field. */
409 dtknqr1.d32 = in_tkn_epnums[0];
410 /* Get the EP numbers */
411 in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
412 ndx = dtknqr1.b.intknwptr - 1;
414 //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx);
416 /** @todo Find a simpler way to calculate the max
418 int cnt = TOKEN_Q_DEPTH;
419 if (TOKEN_Q_DEPTH <= 6) {
420 cnt = TOKEN_Q_DEPTH - 1;
421 } else if (TOKEN_Q_DEPTH <= 14) {
422 cnt = TOKEN_Q_DEPTH - 7;
423 } else if (TOKEN_Q_DEPTH <= 22) {
424 cnt = TOKEN_Q_DEPTH - 15;
426 cnt = TOKEN_Q_DEPTH - 23;
428 epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
431 epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
432 } else if (ndx <= 13) {
434 epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
435 } else if (ndx <= 21) {
437 epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
438 } else if (ndx <= 29) {
440 epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
443 //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum);
448 * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
449 * The active request is checked for the next packet to be loaded into
450 * the non-periodic Tx FIFO.
452 int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t * pcd)
454 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
455 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
456 dwc_otg_dev_in_ep_regs_t *ep_regs;
457 gnptxsts_data_t txstatus = {.d32 = 0 };
458 gintsts_data_t gintsts;
461 dwc_otg_pcd_ep_t *ep = 0;
465 /* Get the epnum from the IN Token Learning Queue. */
466 epnum = get_ep_of_last_in_token(core_if);
467 ep = get_in_ep(pcd, epnum);
469 DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %d \n", epnum);
471 ep_regs = core_if->dev_if->in_ep_regs[epnum];
473 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
474 if (len > ep->dwc_ep.maxpacket) {
475 len = ep->dwc_ep.maxpacket;
477 dwords = (len + 3) / 4;
479 /* While there is space in the queue and space in the FIFO and
480 * More data to tranfer, Write packets to the Tx FIFO */
481 txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
482 DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
484 while (txstatus.b.nptxqspcavail > 0 &&
485 txstatus.b.nptxfspcavail > dwords &&
486 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
488 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
489 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
491 if (len > ep->dwc_ep.maxpacket) {
492 len = ep->dwc_ep.maxpacket;
495 dwords = (len + 3) / 4;
496 txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
497 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
500 DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
501 dwc_read_reg32(&global_regs->gnptxsts));
503 /* Clear interrupt */
505 gintsts.b.nptxfempty = 1;
506 dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
512 * This function is called when dedicated Tx FIFO Empty interrupt occurs.
513 * The active request is checked for the next packet to be loaded into
514 * apropriate Tx FIFO.
516 static int32_t write_empty_tx_fifo(dwc_otg_pcd_t * pcd, uint32_t epnum)
518 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
519 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
520 dwc_otg_dev_in_ep_regs_t *ep_regs;
521 dtxfsts_data_t txstatus = {.d32 = 0 };
522 dwc_otg_pcd_ep_t *ep = 0;
526 ep = get_in_ep(pcd, epnum);
528 DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %d \n", epnum);
530 ep_regs = core_if->dev_if->in_ep_regs[epnum];
532 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
534 if (len > ep->dwc_ep.maxpacket) {
535 len = ep->dwc_ep.maxpacket;
538 dwords = (len + 3) / 4;
540 /* While there is space in the queue and space in the FIFO and
541 * More data to tranfer, Write packets to the Tx FIFO */
542 txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
543 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
545 while (txstatus.b.txfspcavail > dwords &&
546 ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len &&
547 ep->dwc_ep.xfer_len != 0) {
549 dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
551 len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
552 if (len > ep->dwc_ep.maxpacket) {
553 len = ep->dwc_ep.maxpacket;
556 dwords = (len + 3) / 4;
558 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
559 DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
563 DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
564 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts));
570 * This function is called when the Device is disconnected. It stops
571 * any active requests and informs the Gadget driver of the
574 void dwc_otg_pcd_stop(dwc_otg_pcd_t * pcd)
576 int i, num_in_eps, num_out_eps;
577 dwc_otg_pcd_ep_t *ep;
580 gintmsk_data_t intr_mask = {.d32 = 0 };
582 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
584 num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps;
585 num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps;
587 DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
588 /* don't disconnect drivers more than once */
589 if (pcd->ep0state == EP0_DISCONNECT) {
590 DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
593 pcd->ep0state = EP0_DISCONNECT;
595 /* Reset the OTG state. */
596 dwc_otg_pcd_update_otg(pcd, 1);
598 /* Disable the NP Tx Fifo Empty Interrupt. */
599 intr_mask.b.nptxfempty = 1;
600 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
603 /* Flush the FIFOs */
604 /**@todo NGS Flush Periodic FIFOs */
605 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10);
606 dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd));
608 /* prevent new request submissions, kill any outstanding requests */
610 dwc_otg_request_nuke(ep);
611 /* prevent new request submissions, kill any outstanding requests */
612 for (i = 0; i < num_in_eps; i++) {
613 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i];
614 dwc_otg_request_nuke(ep);
616 /* prevent new request submissions, kill any outstanding requests */
617 for (i = 0; i < num_out_eps; i++) {
618 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i];
619 dwc_otg_request_nuke(ep);
622 /* report disconnect; the driver is already quiesced */
623 if (pcd->fops->disconnect) {
624 DWC_SPINUNLOCK(pcd->lock);
625 pcd->fops->disconnect(pcd);
626 DWC_SPINLOCK(pcd->lock);
628 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
632 * This interrupt indicates that ...
634 int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t * pcd)
636 gintmsk_data_t intr_mask = {.d32 = 0 };
637 gintsts_data_t gintsts;
639 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "i2cintr");
640 intr_mask.b.i2cintr = 1;
641 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
644 /* Clear interrupt */
646 gintsts.b.i2cintr = 1;
647 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
653 * This interrupt indicates that ...
655 int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t * pcd)
657 gintsts_data_t gintsts;
659 DWC_DEBUGPL(DBG_CIL,"Early Suspend Detected\n");
661 /* Clear interrupt */
663 gintsts.b.erlysuspend = 1;
664 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
670 * This function configures EPO to receive SETUP packets.
672 * @todo NGS: Update the comments from the HW FS.
674 * -# Program the following fields in the endpoint specific registers
675 * for Control OUT EP 0, in order to receive a setup packet
676 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
678 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
679 * to back setup packets)
680 * - In DMA mode, DOEPDMA0 Register with a memory address to
681 * store any setup packets received
683 * @param core_if Programming view of DWC_otg controller.
684 * @param pcd Programming view of the PCD.
686 static inline void ep0_out_start(dwc_otg_core_if_t * core_if,
689 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
690 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
691 dwc_otg_dma_desc_t *dma_desc;
692 depctl_data_t doepctl = {.d32 = 0 };
695 DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
696 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
699 doeptsize0.b.supcnt = 3;
700 doeptsize0.b.pktcnt = 1;
701 doeptsize0.b.xfersize = 8 * 3;
703 if (core_if->dma_enable) {
704 if (!core_if->dma_desc_enable) {
705 /** put here as for Hermes mode deptisz register should not be written */
706 dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
709 /** @todo dma needs to handle multiple setup packets (up to 3) */
710 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
711 pcd->setup_pkt_dma_handle);
713 dev_if->setup_desc_index =
714 (dev_if->setup_desc_index + 1) & 1;
716 dev_if->setup_desc_addr[dev_if->setup_desc_index];
718 /** DMA Descriptor Setup */
719 dma_desc->status.b.bs = BS_HOST_BUSY;
720 dma_desc->status.b.l = 1;
721 dma_desc->status.b.ioc = 1;
722 dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket;
723 dma_desc->buf = pcd->setup_pkt_dma_handle;
724 dma_desc->status.b.bs = BS_HOST_READY;
726 /** DOEPDMA0 Register write */
727 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
728 dev_if->dma_setup_desc_addr[dev_if->
733 /** put here as for Hermes mode deptisz register should not be written */
734 dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz,
738 /** DOEPCTL0 Register write */
741 //doepctl.b.snak = 1;
742 dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
745 DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
746 dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
747 DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
748 dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
753 * This interrupt occurs when a USB Reset is detected. When the USB
754 * Reset Interrupt occurs the device state is set to DEFAULT and the
755 * EP0 state is set to IDLE.
756 * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
757 * -# Unmask the following interrupt bits
758 * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
759 * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
760 * - DOEPMSK.SETUP = 1
761 * - DOEPMSK.XferCompl = 1
762 * - DIEPMSK.XferCompl = 1
763 * - DIEPMSK.TimeOut = 1
764 * -# Program the following fields in the endpoint specific registers
765 * for Control OUT EP 0, in order to receive a setup packet
766 * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
768 * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
769 * to back setup packets)
770 * - In DMA mode, DOEPDMA0 Register with a memory address to
771 * store any setup packets received
772 * At this point, all the required initialization, except for enabling
773 * the control 0 OUT endpoint is done, for receiving SETUP packets.
775 int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd)
777 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
778 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
779 depctl_data_t doepctl = {.d32 = 0 };
780 depctl_data_t diepctl = {.d32 = 0 };
781 daint_data_t daintmsk = {.d32 = 0 };
782 doepmsk_data_t doepmsk = {.d32 = 0 };
783 diepmsk_data_t diepmsk = {.d32 = 0 };
784 dcfg_data_t dcfg = {.d32 = 0 };
785 grstctl_t resetctl = {.d32 = 0 };
786 dctl_data_t dctl = {.d32 = 0 };
788 gintsts_data_t gintsts;
789 pcgcctl_data_t power = {.d32 = 0 };
791 power.d32 = dwc_read_reg32(core_if->pcgcctl);
792 if (power.b.stoppclk) {
794 power.b.stoppclk = 1;
795 dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
798 dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
800 power.b.rstpdwnmodule = 1;
801 dwc_modify_reg32(core_if->pcgcctl, power.d32, 0);
804 core_if->lx_state = DWC_OTG_L0;
806 DWC_DEBUGPL(DBG_CIL,"USB RESET\n");
808 for (i = 1; i < 16; ++i) {
809 dwc_otg_pcd_ep_t *ep;
811 ep = get_in_ep(pcd, i);
813 dwc_ep = &ep->dwc_ep;
814 dwc_ep->next_frame = 0xffffffff;
817 #endif /* DWC_EN_ISOC */
819 /* reset the HNP settings */
820 dwc_otg_pcd_update_otg(pcd, 1);
822 /* Clear the Remote Wakeup Signalling */
823 dctl.b.rmtwkupsig = 1;
824 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
826 /* Set NAK for all OUT EPs */
828 for (i = 0; i <= dev_if->num_out_eps; i++) {
829 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
832 /* Set data pid0 for all eps except for ep0 */
833 doepctl.b.setd0pid = 1;
834 for (i = 1; i <= dev_if->num_out_eps; i++) {
835 dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, doepctl.d32);
838 diepctl.b.setd0pid = 1;
839 for (i = 1; i <= dev_if->num_in_eps; i++) {
840 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, diepctl.d32);
842 /* Flush the NP Tx FIFO */
843 dwc_otg_flush_tx_fifo(core_if, 0x10);
844 /* Flush the NP Rx FIFO */
845 dwc_otg_flush_rx_fifo(core_if);
846 /* Flush the Learning Queue */
847 resetctl.b.intknqflsh = 1;
848 dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
850 if (core_if->multiproc_int_enable) {
851 daintmsk.b.inep0 = 1;
852 daintmsk.b.outep0 = 1;
853 dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk,
857 doepmsk.b.xfercompl = 1;
858 doepmsk.b.ahberr = 1;
859 doepmsk.b.epdisabled = 1;
861 if (core_if->dma_desc_enable) {
862 doepmsk.b.stsphsercvd = 1;
866 doepmsk.b.babble = 1;
869 if(core_if->dma_enable) {
873 dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0],
876 diepmsk.b.xfercompl = 1;
877 diepmsk.b.timeout = 1;
878 diepmsk.b.epdisabled = 1;
879 diepmsk.b.ahberr = 1;
880 diepmsk.b.intknepmis = 1;
882 if (core_if->dma_desc_enable) {
886 if(core_if->dma_enable) {
890 dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0],
893 daintmsk.b.inep0 = 1;
894 daintmsk.b.outep0 = 1;
895 dwc_write_reg32(&dev_if->dev_global_regs->daintmsk,
899 doepmsk.b.xfercompl = 1;
900 doepmsk.b.ahberr = 1;
901 doepmsk.b.epdisabled = 1;
903 if (core_if->dma_desc_enable) {
904 doepmsk.b.stsphsercvd = 1;
907 dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
909 diepmsk.b.xfercompl = 1;
910 diepmsk.b.timeout = 1;
911 diepmsk.b.epdisabled = 1;
912 diepmsk.b.ahberr = 1;
913 diepmsk.b.intknepmis = 1;
915 if (core_if->dma_desc_enable) {
919 dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
922 /* Reset Device Address */
923 dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
925 dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
927 /* setup EP0 to receive SETUP packets */
928 ep0_out_start(core_if, pcd);
930 /* Clear interrupt */
932 gintsts.b.usbreset = 1;
933 dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
939 * Get the device speed from the device status register and convert it
940 * to USB speed constant.
942 * @param core_if Programming view of DWC_otg controller.
944 static int get_device_speed(dwc_otg_core_if_t * core_if)
948 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
950 switch (dsts.b.enumspd) {
951 case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
952 speed = USB_SPEED_HIGH;
954 case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
955 case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
956 speed = USB_SPEED_FULL;
959 case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
960 speed = USB_SPEED_LOW;
968 * Read the device status register and set the device speed in the
970 * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
972 int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t * pcd)
974 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
975 gintsts_data_t gintsts;
976 gusbcfg_data_t gusbcfg;
977 dwc_otg_core_global_regs_t *global_regs =
978 GET_CORE_IF(pcd)->core_global_regs;
979 uint8_t utmi16b, utmi8b;
981 DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
983 if (GET_CORE_IF(pcd)->snpsid >= 0x4f54260a) {
990 dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep);
993 print_ep0_state(pcd);
997 pcd->ep0state = EP0_IDLE;
1001 speed = get_device_speed(GET_CORE_IF(pcd));
1002 pcd->fops->connect(pcd, speed);
1004 /* Set USB turnaround time based on device speed and PHY interface. */
1005 gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
1006 if (speed == USB_SPEED_HIGH) {
1007 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1008 DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
1009 /* ULPI interface */
1010 gusbcfg.b.usbtrdtim = 9;
1012 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1013 DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
1014 /* UTMI+ interface */
1015 if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) {
1016 gusbcfg.b.usbtrdtim = utmi8b;
1017 } else if (GET_CORE_IF(pcd)->hwcfg4.b.
1018 utmi_phy_data_width == 1) {
1019 gusbcfg.b.usbtrdtim = utmi16b;
1020 } else if (GET_CORE_IF(pcd)->core_params->
1021 phy_utmi_width == 8) {
1022 gusbcfg.b.usbtrdtim = utmi8b;
1024 gusbcfg.b.usbtrdtim = utmi16b;
1027 if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type ==
1028 DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
1029 /* UTMI+ OR ULPI interface */
1030 if (gusbcfg.b.ulpi_utmi_sel == 1) {
1031 /* ULPI interface */
1032 gusbcfg.b.usbtrdtim = 9;
1034 /* UTMI+ interface */
1035 if (GET_CORE_IF(pcd)->core_params->
1036 phy_utmi_width == 16) {
1037 gusbcfg.b.usbtrdtim = utmi16b;
1039 gusbcfg.b.usbtrdtim = utmi8b;
1044 /* Full or low speed */
1045 gusbcfg.b.usbtrdtim = 9;
1047 dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32);
1049 /* Clear interrupt */
1051 gintsts.b.enumdone = 1;
1052 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1058 * This interrupt indicates that the ISO OUT Packet was dropped due to
1059 * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
1060 * read all the data from the Rx FIFO.
1062 int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t * pcd)
1064 gintmsk_data_t intr_mask = {.d32 = 0 };
1065 gintsts_data_t gintsts;
1067 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
1068 "ISOC Out Dropped");
1070 intr_mask.b.isooutdrop = 1;
1071 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1074 /* Clear interrupt */
1076 gintsts.b.isooutdrop = 1;
1077 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1084 * This interrupt indicates the end of the portion of the micro-frame
1085 * for periodic transactions. If there is a periodic transaction for
1086 * the next frame, load the packets into the EP periodic Tx FIFO.
1088 int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t * pcd)
1090 gintmsk_data_t intr_mask = {.d32 = 0 };
1091 gintsts_data_t gintsts;
1092 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "EOP");
1094 intr_mask.b.eopframe = 1;
1095 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
1098 /* Clear interrupt */
1100 gintsts.b.eopframe = 1;
1101 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
1108 * This interrupt indicates that EP of the packet on the top of the
1109 * non-periodic Tx FIFO does not match EP of the IN Token received.
1111 * The "Device IN Token Queue" Registers are read to determine the
1112 * order the IN Tokens have been received. The non-periodic Tx FIFO
1113 * is flushed, so it can be reloaded in the order seen in the IN Token
1116 int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t * core_if)
1118 gintsts_data_t gintsts;
1119 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if);
1121 /* Clear interrupt */
1123 gintsts.b.epmismatch = 1;
1124 dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
1130 * This funcion stalls EP0.
1132 static inline void ep0_do_stall(dwc_otg_pcd_t * pcd, const int err_val)
1134 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1135 usb_device_request_t *ctrl = &pcd->setup_pkt->req;
1136 DWC_WARN("req %02x.%02x protocol STALL; err %d\n",
1137 ctrl->bmRequestType, ctrl->bRequest, err_val);
1139 ep0->dwc_ep.is_in = 1;
1140 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep0->dwc_ep);
1141 pcd->ep0.stopped = 1;
1142 pcd->ep0state = EP0_IDLE;
1143 ep0_out_start(GET_CORE_IF(pcd), pcd);
1147 * This functions delegates the setup command to the gadget driver.
1149 static inline void do_gadget_setup(dwc_otg_pcd_t * pcd,
1150 usb_device_request_t * ctrl)
1153 DWC_SPINUNLOCK(pcd->lock);
1154 ret = pcd->fops->setup(pcd, (uint8_t *) ctrl);
1155 DWC_SPINLOCK(pcd->lock);
1157 ep0_do_stall(pcd, ret);
1160 /** @todo This is a g_file_storage gadget driver specific
1161 * workaround: a DELAYED_STATUS result from the fsg_setup
1162 * routine will result in the gadget queueing a EP0 IN status
1163 * phase for a two-stage control transfer. Exactly the same as
1164 * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
1165 * specific request. Need a generic way to know when the gadget
1166 * driver will queue the status phase. Can we assume when we
1167 * call the gadget driver setup() function that it will always
1168 * queue and require the following flag? Need to look into
1172 if (ret == 256 + 999) {
1173 pcd->request_config = 1;
1179 * This functions delegates the CFI setup commands to the gadget driver.
1180 * This function will return a negative value to indicate a failure.
1182 static inline int cfi_gadget_setup(dwc_otg_pcd_t * pcd,
1183 struct cfi_usb_ctrlrequest *ctrl_req)
1187 if (pcd->fops && pcd->fops->cfi_setup) {
1188 DWC_SPINUNLOCK(pcd->lock);
1189 ret = pcd->fops->cfi_setup(pcd, ctrl_req);
1190 DWC_SPINLOCK(pcd->lock);
1192 ep0_do_stall(pcd, ret);
1202 * This function starts the Zero-Length Packet for the IN status phase
1203 * of a 2 stage control transfer.
1205 static inline void do_setup_in_status_phase(dwc_otg_pcd_t * pcd)
1207 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1208 if (pcd->ep0state == EP0_STALL) {
1212 pcd->ep0state = EP0_IN_STATUS_PHASE;
1214 /* Prepare for more SETUP Packets */
1215 DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
1216 ep0->dwc_ep.xfer_len = 0;
1217 ep0->dwc_ep.xfer_count = 0;
1218 ep0->dwc_ep.is_in = 1;
1219 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1220 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1222 /* Prepare for more SETUP Packets */
1223 //ep0_out_start(GET_CORE_IF(pcd), pcd);
1227 * This function starts the Zero-Length Packet for the OUT status phase
1228 * of a 2 stage control transfer.
1230 static inline void do_setup_out_status_phase(dwc_otg_pcd_t * pcd)
1232 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1233 if (pcd->ep0state == EP0_STALL) {
1234 DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
1237 pcd->ep0state = EP0_OUT_STATUS_PHASE;
1239 DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
1240 ep0->dwc_ep.xfer_len = 0;
1241 ep0->dwc_ep.xfer_count = 0;
1242 ep0->dwc_ep.is_in = 0;
1243 ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle;
1244 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1246 /* Prepare for more SETUP Packets */
1247 if (GET_CORE_IF(pcd)->dma_enable == 0) {
1248 ep0_out_start(GET_CORE_IF(pcd), pcd);
1253 * Clear the EP halt (STALL) and if pending requests start the
1256 static inline void pcd_clear_halt(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep)
1258 if (ep->dwc_ep.stall_clear_flag == 0)
1259 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1261 /* Reactive the EP */
1262 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1265 /* If there is a request in the EP queue start it */
1267 /** @todo FIXME: this causes an EP mismatch in DMA mode.
1268 * epmismatch not yet implemented. */
1271 * Above fixme is solved by implmenting a tasklet to call the
1272 * start_next_request(), outside of interrupt context at some
1273 * time after the current time, after a clear-halt setup packet.
1274 * Still need to implement ep mismatch in the future if a gadget
1275 * ever uses more than one endpoint at once
1278 DWC_TASK_SCHEDULE(pcd->start_xfer_tasklet);
1280 /* Start Control Status Phase */
1281 do_setup_in_status_phase(pcd);
1285 * This function is called when the SET_FEATURE TEST_MODE Setup packet
1286 * is sent from the host. The Device Control register is written with
1287 * the Test Mode bits set to the specified Test Mode. This is done as
1288 * a tasklet so that the "Status" phase of the control transfer
1289 * completes before transmitting the TEST packets.
1291 * @todo This has not been tested since the tasklet struct was put
1292 * into the PCD struct!
1295 void do_test_mode(void *data)
1298 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1299 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1300 int test_mode = pcd->test_mode;
1302 // DWC_WARN("%s() has not been tested since being rewritten!\n", __func__);
1304 dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
1305 switch (test_mode) {
1314 case 3: // TEST_SE0_NAK
1318 case 4: // TEST_PACKET
1322 case 5: // TEST_FORCE_ENABLE
1326 dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
1330 * This function process the GET_STATUS Setup Commands.
1332 static inline void do_get_status(dwc_otg_pcd_t * pcd)
1334 usb_device_request_t ctrl = pcd->setup_pkt->req;
1335 dwc_otg_pcd_ep_t *ep;
1336 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1337 uint16_t *status = pcd->status_buf;
1340 DWC_DEBUGPL(DBG_PCD,
1341 "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
1342 ctrl.bmRequestType, ctrl.bRequest,
1343 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1344 UGETW(ctrl.wLength));
1347 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1349 *status = 0x1; /* Self powered */
1350 *status |= pcd->remote_wakeup_enable << 1;
1358 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1359 if (ep == 0 || UGETW(ctrl.wLength) > 2) {
1360 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1363 /** @todo check for EP stall */
1364 *status = ep->stopped;
1367 pcd->ep0_pending = 1;
1368 ep0->dwc_ep.start_xfer_buff = (uint8_t *) status;
1369 ep0->dwc_ep.xfer_buff = (uint8_t *) status;
1370 ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle;
1371 ep0->dwc_ep.xfer_len = 2;
1372 ep0->dwc_ep.xfer_count = 0;
1373 ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
1374 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep);
1378 * This function process the SET_FEATURE Setup Commands.
1380 static inline void do_set_feature(dwc_otg_pcd_t * pcd)
1382 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1383 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1384 usb_device_request_t ctrl = pcd->setup_pkt->req;
1385 dwc_otg_pcd_ep_t *ep = 0;
1386 int32_t otg_cap_param = core_if->core_params->otg_cap;
1387 gotgctl_data_t gotgctl = {.d32 = 0 };
1389 DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1390 ctrl.bmRequestType, ctrl.bRequest,
1391 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1392 UGETW(ctrl.wLength));
1393 DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
1395 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1397 switch (UGETW(ctrl.wValue)) {
1398 case UF_DEVICE_REMOTE_WAKEUP:
1399 pcd->remote_wakeup_enable = 1;
1403 /* Setup the Test Mode tasklet to do the Test
1404 * Packet generation after the SETUP Status
1405 * phase has completed. */
1407 /** @todo This has not been tested since the
1408 * tasklet struct was put into the PCD
1410 pcd->test_mode = UGETW(ctrl.wIndex) >> 8;
1411 DWC_TASK_SCHEDULE(pcd->test_mode_tasklet);
1414 case UF_DEVICE_B_HNP_ENABLE:
1415 DWC_DEBUGPL(DBG_PCDV,
1416 "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
1418 /* dev may initiate HNP */
1419 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1420 pcd->b_hnp_enable = 1;
1421 dwc_otg_pcd_update_otg(pcd, 0);
1422 DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
1423 /**@todo Is the gotgctl.devhnpen cleared
1424 * by a USB Reset? */
1425 gotgctl.b.devhnpen = 1;
1426 gotgctl.b.hnpreq = 1;
1427 dwc_write_reg32(&global_regs->gotgctl,
1430 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1434 case UF_DEVICE_A_HNP_SUPPORT:
1435 /* RH port supports HNP */
1436 DWC_DEBUGPL(DBG_PCDV,
1437 "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
1438 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1439 pcd->a_hnp_support = 1;
1440 dwc_otg_pcd_update_otg(pcd, 0);
1442 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1446 case UF_DEVICE_A_ALT_HNP_SUPPORT:
1447 /* other RH port does */
1449 "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
1450 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
1451 pcd->a_alt_hnp_support = 1;
1452 dwc_otg_pcd_update_otg(pcd, 0);
1454 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1458 do_setup_in_status_phase(pcd);
1462 do_gadget_setup(pcd, &ctrl);
1466 if (UGETW(ctrl.wValue) == UF_ENDPOINT_HALT) {
1467 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1469 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1473 dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
1475 do_setup_in_status_phase(pcd);
1481 * This function process the CLEAR_FEATURE Setup Commands.
1483 static inline void do_clear_feature(dwc_otg_pcd_t * pcd)
1485 usb_device_request_t ctrl = pcd->setup_pkt->req;
1486 dwc_otg_pcd_ep_t *ep = 0;
1488 DWC_DEBUGPL(DBG_PCD,
1489 "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
1490 ctrl.bmRequestType, ctrl.bRequest,
1491 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1492 UGETW(ctrl.wLength));
1494 switch (UT_GET_RECIPIENT(ctrl.bmRequestType)) {
1496 switch (UGETW(ctrl.wValue)) {
1497 case UF_DEVICE_REMOTE_WAKEUP:
1498 pcd->remote_wakeup_enable = 0;
1502 /** @todo Add CLEAR_FEATURE for TEST modes. */
1505 do_setup_in_status_phase(pcd);
1509 ep = get_ep_by_addr(pcd, UGETW(ctrl.wIndex));
1511 ep0_do_stall(pcd, -DWC_E_NOT_SUPPORTED);
1515 pcd_clear_halt(pcd, ep);
1522 * This function process the SET_ADDRESS Setup Commands.
1524 static inline void do_set_address(dwc_otg_pcd_t * pcd)
1526 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1527 usb_device_request_t ctrl = pcd->setup_pkt->req;
1529 if (ctrl.bmRequestType == UT_DEVICE) {
1530 dcfg_data_t dcfg = {.d32 = 0 };
1533 // DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue);
1535 dcfg.b.devaddr = UGETW(ctrl.wValue);
1536 dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32);
1537 do_setup_in_status_phase(pcd);
1542 * This function processes SETUP commands. In Linux, the USB Command
1543 * processing is done in two places - the first being the PCD and the
1544 * second in the Gadget Driver (for example, the File-Backed Storage
1548 * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
1550 * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
1551 * defined in chapter 9 of the USB 2.0 Specification chapter 9
1554 * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1555 * requests are the ENDPOINT_HALT feature is procesed, all others the
1556 * interface requests are ignored.</td></tr>
1558 * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
1559 * requests are processed by the PCD. Interface requests are passed
1560 * to the Gadget Driver.</td></tr>
1562 * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
1563 * with device address received </td></tr>
1565 * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
1566 * requested descriptor</td></tr>
1568 * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
1569 * not implemented by any of the existing Gadget Drivers.</td></tr>
1571 * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
1572 * all EPs and enable EPs for new configuration.</td></tr>
1574 * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
1575 * the current configuration</td></tr>
1577 * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
1578 * EPs and enable EPs for new configuration.</td></tr>
1580 * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
1581 * current interface.</td></tr>
1583 * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
1584 * message.</td></tr>
1587 * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
1588 * processed by pcd_setup. Calling the Function Driver's setup function from
1589 * pcd_setup processes the gadget SETUP commands.
1591 static inline void pcd_setup(dwc_otg_pcd_t * pcd)
1593 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1594 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1595 usb_device_request_t ctrl = pcd->setup_pkt->req;
1596 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
1598 deptsiz0_data_t doeptsize0 = {.d32 = 0 };
1602 struct cfi_usb_ctrlrequest cfi_req;
1606 DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1607 ctrl.bmRequestType, ctrl.bRequest,
1608 UGETW(ctrl.wValue), UGETW(ctrl.wIndex),
1609 UGETW(ctrl.wLength));
1612 doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz);
1614 /** @todo handle > 1 setup packet , assert error for now */
1616 if (core_if->dma_enable && core_if->dma_desc_enable == 0
1617 && (doeptsize0.b.supcnt < 2)) {
1619 ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n");
1622 /* Clean up the request queue */
1623 dwc_otg_request_nuke(ep0);
1626 if (ctrl.bmRequestType & UE_DIR_IN) {
1627 ep0->dwc_ep.is_in = 1;
1628 pcd->ep0state = EP0_IN_DATA_PHASE;
1630 ep0->dwc_ep.is_in = 0;
1631 pcd->ep0state = EP0_OUT_DATA_PHASE;
1634 if (UGETW(ctrl.wLength) == 0) {
1635 ep0->dwc_ep.is_in = 1;
1636 pcd->ep0state = EP0_IN_STATUS_PHASE;
1639 if (UT_GET_TYPE(ctrl.bmRequestType) != UT_STANDARD) {
1642 DWC_MEMCPY(&cfi_req, &ctrl, sizeof(usb_device_request_t));
1644 //printk(KERN_ALERT "CFI: req_type=0x%02x; req=0x%02x\n", ctrl.bRequestType, ctrl.bRequest);
1645 if (UT_GET_TYPE(cfi_req.bRequestType) == UT_VENDOR) {
1646 if (cfi_req.bRequest > 0xB0 && cfi_req.bRequest < 0xBF) {
1647 retval = cfi_setup(pcd, &cfi_req);
1649 ep0_do_stall(pcd, retval);
1650 pcd->ep0_pending = 0;
1654 /* if need gadget setup then call it and check the retval */
1655 if (pcd->cfi->need_gadget_att) {
1657 cfi_gadget_setup(pcd,
1661 pcd->ep0_pending = 0;
1666 if (pcd->cfi->need_status_in_complete) {
1667 do_setup_in_status_phase(pcd);
1674 /* handle non-standard (class/vendor) requests in the gadget driver */
1675 do_gadget_setup(pcd, &ctrl);
1679 /** @todo NGS: Handle bad setup packet? */
1681 ///////////////////////////////////////////
1682 //// --- Standard Request handling --- ////
1684 switch (ctrl.bRequest) {
1689 case UR_CLEAR_FEATURE:
1690 do_clear_feature(pcd);
1693 case UR_SET_FEATURE:
1694 do_set_feature(pcd);
1697 case UR_SET_ADDRESS:
1698 do_set_address(pcd);
1701 case UR_SET_INTERFACE:
1703 // _pcd->request_config = 1; /* Configuration changed */
1704 do_gadget_setup(pcd, &ctrl);
1707 case UR_SYNCH_FRAME:
1708 do_gadget_setup(pcd, &ctrl);
1712 /* Call the Gadget Driver's setup functions */
1713 do_gadget_setup(pcd, &ctrl);
1719 * This function completes the ep0 control transfer.
1721 static int32_t ep0_complete_request(dwc_otg_pcd_ep_t * ep)
1723 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1724 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1725 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1726 dev_if->in_ep_regs[ep->dwc_ep.num];
1728 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
1729 dev_if->out_ep_regs[ep->dwc_ep.num];
1731 deptsiz0_data_t deptsiz;
1732 desc_sts_data_t desc_sts;
1733 dwc_otg_pcd_request_t *req;
1735 dwc_otg_pcd_t *pcd = ep->pcd;
1738 struct cfi_usb_ctrlrequest *ctrlreq;
1739 int retval = -DWC_E_NOT_SUPPORTED;
1742 if (pcd->ep0_pending && DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1743 if (ep->dwc_ep.is_in) {
1745 DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
1747 do_setup_out_status_phase(pcd);
1750 DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
1754 ctrlreq = &pcd->cfi->ctrl_req;
1756 if (UT_GET_TYPE(ctrlreq->bRequestType) == UT_VENDOR) {
1757 if (ctrlreq->bRequest > 0xB0
1758 && ctrlreq->bRequest < 0xBF) {
1760 /* Return if the PCD failed to handle the request */
1763 ctrl_write_complete(pcd->cfi,
1766 ("ERROR setting a new value in the PCD(%d)\n",
1768 ep0_do_stall(pcd, retval);
1769 pcd->ep0_pending = 0;
1773 /* If the gadget needs to be notified on the request */
1774 if (pcd->cfi->need_gadget_att == 1) {
1775 //retval = do_gadget_setup(pcd, &pcd->cfi->ctrl_req);
1777 cfi_gadget_setup(pcd,
1781 /* Return from the function if the gadget failed to process
1782 * the request properly - this should never happen !!!
1786 ("ERROR setting a new value in the gadget(%d)\n",
1788 pcd->ep0_pending = 0;
1793 CFI_INFO("%s: RETVAL=%d\n", __func__,
1795 /* If we hit here then the PCD and the gadget has properly
1796 * handled the request - so send the ZLP IN to the host.
1798 /* @todo: MAS - decide whether we need to start the setup
1799 * stage based on the need_setup value of the cfi object
1801 do_setup_in_status_phase(pcd);
1802 pcd->ep0_pending = 0;
1808 do_setup_in_status_phase(pcd);
1810 pcd->ep0_pending = 0;
1814 if (DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1817 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1819 if (pcd->ep0state == EP0_OUT_STATUS_PHASE
1820 || pcd->ep0state == EP0_IN_STATUS_PHASE) {
1822 } else if (ep->dwc_ep.is_in) {
1823 deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
1824 if (core_if->dma_desc_enable != 0)
1825 desc_sts = dev_if->in_desc_addr->status;
1827 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xfersize=%d pktcnt=%d\n",
1828 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
1829 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1832 if (((core_if->dma_desc_enable == 0)
1833 && (deptsiz.b.xfersize == 0))
1834 || ((core_if->dma_desc_enable != 0)
1835 && (desc_sts.b.bytes == 0))) {
1836 req->actual = ep->dwc_ep.xfer_count;
1837 /* Is a Zero Len Packet needed? */
1838 if (req->sent_zlp) {
1840 DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
1844 do_setup_out_status_phase(pcd);
1849 deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz);
1850 DWC_DEBUGPL(DBG_PCDV, "%d len=%d xsize=%d pktcnt=%d\n",
1851 ep->dwc_ep.num, ep->dwc_ep.xfer_len,
1852 deptsiz.b.xfersize, deptsiz.b.pktcnt);
1854 req->actual = ep->dwc_ep.xfer_count;
1856 /* Is a Zero Len Packet needed? */
1857 if (req->sent_zlp) {
1859 DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
1863 if (core_if->dma_desc_enable == 0)
1864 do_setup_in_status_phase(pcd);
1867 /* Complete the request */
1869 dwc_otg_request_done(ep, req, 0);
1870 ep->dwc_ep.start_xfer_buff = 0;
1871 ep->dwc_ep.xfer_buff = 0;
1872 ep->dwc_ep.xfer_len = 0;
1880 * This function calculates traverses all the CFI DMA descriptors and
1881 * and accumulates the bytes that are left to be transfered.
1883 * @return The total bytes left to transfered, or a negative value as failure
1885 static inline int cfi_calc_desc_residue(dwc_otg_pcd_ep_t * ep)
1889 struct dwc_otg_dma_desc *ddesc = NULL;
1890 struct cfi_ep *cfiep;
1892 /* See if the pcd_ep has its respective cfi_ep mapped */
1893 cfiep = get_cfi_ep_by_pcd_ep(ep->pcd->cfi, ep);
1895 CFI_INFO("%s: Failed to find ep\n", __func__);
1899 ddesc = ep->dwc_ep.descs;
1901 for (i = 0; (i < cfiep->desc_count) && (i < MAX_DMA_DESCS_PER_EP); i++) {
1903 #if defined(PRINT_CFI_DMA_DESCS)
1904 print_desc(ddesc, ep->ep.name, i);
1906 ret += ddesc->status.b.bytes;
1911 CFI_INFO("!!!!!!!!!! WARNING (%s) - residue=%d\n", __func__,
1919 * This function completes the request for the EP. If there are
1920 * additional requests for the EP in the queue they will be started.
1922 static void complete_ep(dwc_otg_pcd_ep_t * ep)
1924 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
1925 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
1926 dwc_otg_dev_in_ep_regs_t *in_ep_regs =
1927 dev_if->in_ep_regs[ep->dwc_ep.num];
1928 deptsiz_data_t deptsiz;
1929 desc_sts_data_t desc_sts;
1930 dwc_otg_pcd_request_t *req = 0;
1931 dwc_otg_dma_desc_t *dma_desc;
1932 uint32_t byte_count = 0;
1936 DWC_DEBUGPL(DBG_PCDV, "%s() %d-%s\n", __func__, ep->dwc_ep.num,
1937 (ep->dwc_ep.is_in ? "IN" : "OUT"));
1939 /* Get any pending requests */
1940 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1941 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1943 DWC_DEBUGPL(DBG_CIL,"complete_ep 0x%p, req = NULL!\n", ep);
1947 DWC_DEBUGPL(DBG_CIL,"complete_ep 0x%p, ep->queue empty!\n", ep);
1951 DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending);
1953 if (ep->dwc_ep.is_in) {
1954 deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
1956 if (core_if->dma_enable) {
1957 if (core_if->dma_desc_enable == 0) {
1958 if (deptsiz.b.xfersize == 0
1959 && deptsiz.b.pktcnt == 0) {
1961 ep->dwc_ep.xfer_len -
1962 ep->dwc_ep.xfer_count;
1964 ep->dwc_ep.xfer_buff += byte_count;
1965 ep->dwc_ep.dma_addr += byte_count;
1966 ep->dwc_ep.xfer_count += byte_count;
1968 DWC_DEBUGPL(DBG_PCDV,
1969 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
1972 is_in ? "IN" : "OUT"),
1973 ep->dwc_ep.xfer_len,
1977 if (ep->dwc_ep.xfer_len <
1978 ep->dwc_ep.total_len) {
1979 dwc_otg_ep_start_transfer
1980 (core_if, &ep->dwc_ep);
1981 } else if (ep->dwc_ep.sent_zlp) {
1983 * This fragment of code should initiate 0
1984 * length trasfer in case if it is queued
1985 * a trasfer with size divisible to EPs max
1986 * packet size and with usb_request zero field
1987 * is set, which means that after data is transfered,
1988 * it is also should be transfered
1989 * a 0 length packet at the end. For Slave and
1990 * Buffer DMA modes in this case SW has
1991 * to initiate 2 transfers one with transfer size,
1992 * and the second with 0 size. For Desriptor
1993 * DMA mode SW is able to initiate a transfer,
1994 * which will handle all the packets including
1997 ep->dwc_ep.sent_zlp = 0;
1998 dwc_otg_ep_start_zl_transfer
1999 (core_if, &ep->dwc_ep);
2005 ("Incomplete transfer (%d - %s [siz=%d pkt=%d])\n",
2007 (ep->dwc_ep.is_in ? "IN" : "OUT"),
2012 dma_desc = ep->dwc_ep.desc_addr;
2014 ep->dwc_ep.sent_zlp = 0;
2017 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2018 ep->dwc_ep.buff_mode);
2019 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2022 residue = cfi_calc_desc_residue(ep);
2026 byte_count = residue;
2029 for (i = 0; i < ep->dwc_ep.desc_cnt;
2031 desc_sts = dma_desc->status;
2032 byte_count += desc_sts.b.bytes;
2038 if (byte_count == 0) {
2039 ep->dwc_ep.xfer_count =
2040 ep->dwc_ep.total_len;
2043 DWC_WARN("Incomplete transfer\n");
2047 if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) {
2048 DWC_DEBUGPL(DBG_PCDV,
2049 "%d-%s len=%d xfersize=%d pktcnt=%d\n",
2051 ep->dwc_ep.is_in ? "IN" : "OUT",
2052 ep->dwc_ep.xfer_len,
2056 /* Check if the whole transfer was completed,
2057 * if no, setup transfer for next portion of data
2059 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2060 dwc_otg_ep_start_transfer(core_if,
2062 } else if (ep->dwc_ep.sent_zlp) {
2064 * This fragment of code should initiate 0
2065 * length trasfer in case if it is queued
2066 * a trasfer with size divisible to EPs max
2067 * packet size and with usb_request zero field
2068 * is set, which means that after data is transfered,
2069 * it is also should be transfered
2070 * a 0 length packet at the end. For Slave and
2071 * Buffer DMA modes in this case SW has
2072 * to initiate 2 transfers one with transfer size,
2073 * and the second with 0 size. For Desriptor
2074 * DMA mode SW is able to initiate a transfer,
2075 * which will handle all the packets including
2078 ep->dwc_ep.sent_zlp = 0;
2079 dwc_otg_ep_start_zl_transfer(core_if,
2087 ("Incomplete transfer (%d-%s [siz=%d pkt=%d])\n",
2089 (ep->dwc_ep.is_in ? "IN" : "OUT"),
2090 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2094 dwc_otg_dev_out_ep_regs_t *out_ep_regs =
2095 dev_if->out_ep_regs[ep->dwc_ep.num];
2097 if (core_if->dma_enable) {
2098 if (core_if->dma_desc_enable) {
2099 dma_desc = ep->dwc_ep.desc_addr;
2101 ep->dwc_ep.sent_zlp = 0;
2104 CFI_INFO("%s: BUFFER_MODE=%d\n", __func__,
2105 ep->dwc_ep.buff_mode);
2106 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2108 residue = cfi_calc_desc_residue(ep);
2111 byte_count = residue;
2115 for (i = 0; i < ep->dwc_ep.desc_cnt;
2117 desc_sts = dma_desc->status;
2118 byte_count += desc_sts.b.bytes;
2125 ep->dwc_ep.xfer_count = ep->dwc_ep.total_len
2127 ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3);
2132 dwc_read_reg32(&out_ep_regs->doeptsiz);
2134 byte_count = (ep->dwc_ep.xfer_len -
2135 ep->dwc_ep.xfer_count -
2136 deptsiz.b.xfersize);
2137 ep->dwc_ep.xfer_buff += byte_count;
2138 ep->dwc_ep.dma_addr += byte_count;
2139 ep->dwc_ep.xfer_count += byte_count;
2141 /* Check if the whole transfer was completed,
2142 * if no, setup transfer for next portion of data
2144 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2145 dwc_otg_ep_start_transfer(core_if,
2147 } else if (ep->dwc_ep.sent_zlp) {
2149 * This fragment of code should initiate 0
2150 * length trasfer in case if it is queued
2151 * a trasfer with size divisible to EPs max
2152 * packet size and with usb_request zero field
2153 * is set, which means that after data is transfered,
2154 * it is also should be transfered
2155 * a 0 length packet at the end. For Slave and
2156 * Buffer DMA modes in this case SW has
2157 * to initiate 2 transfers one with transfer size,
2158 * and the second with 0 size. For Desriptor
2159 * DMA mode SW is able to initiate a transfer,
2160 * which will handle all the packets including
2163 ep->dwc_ep.sent_zlp = 0;
2164 dwc_otg_ep_start_zl_transfer(core_if,
2172 /* Check if the whole transfer was completed,
2173 * if no, setup transfer for next portion of data
2175 if (ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) {
2176 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
2177 } else if (ep->dwc_ep.sent_zlp) {
2179 * This fragment of code should initiate 0
2180 * length trasfer in case if it is queued
2181 * a trasfer with size divisible to EPs max
2182 * packet size and with usb_request zero field
2183 * is set, which means that after data is transfered,
2184 * it is also should be transfered
2185 * a 0 length packet at the end. For Slave and
2186 * Buffer DMA modes in this case SW has
2187 * to initiate 2 transfers one with transfer size,
2188 * and the second with 0 size. For Desriptor
2189 * DMA mode SW is able to initiate a transfer,
2190 * which will handle all the packets including
2193 ep->dwc_ep.sent_zlp = 0;
2194 dwc_otg_ep_start_zl_transfer(core_if,
2201 DWC_DEBUGPL(DBG_PCDV,
2202 "addr %p, %d-%s len=%d cnt=%d xsize=%d pktcnt=%d\n",
2203 &out_ep_regs->doeptsiz, ep->dwc_ep.num,
2204 ep->dwc_ep.is_in ? "IN" : "OUT",
2205 ep->dwc_ep.xfer_len, ep->dwc_ep.xfer_count,
2206 deptsiz.b.xfersize, deptsiz.b.pktcnt);
2209 /* Complete the request */
2212 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2213 req->actual = ep->dwc_ep.cfi_req_len - byte_count;
2216 req->actual = ep->dwc_ep.xfer_count;
2221 dwc_otg_request_done(ep, req, 0);
2223 ep->dwc_ep.start_xfer_buff = 0;
2224 ep->dwc_ep.xfer_buff = 0;
2225 ep->dwc_ep.xfer_len = 0;
2227 /* If there is a request in the queue start it. */
2228 start_next_request(ep);
2235 * This function BNA interrupt for Isochronous EPs
2238 static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t * ep)
2240 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2241 volatile uint32_t *addr;
2242 depctl_data_t depctl = {.d32 = 0 };
2243 dwc_otg_pcd_t *pcd = ep->pcd;
2244 dwc_otg_dma_desc_t *dma_desc;
2248 dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num);
2250 if (dwc_ep->is_in) {
2251 desc_sts_data_t sts = {.d32 = 0 };
2252 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2253 sts.d32 = dma_desc->status.d32;
2254 sts.b_iso_in.bs = BS_HOST_READY;
2255 dma_desc->status.d32 = sts.d32;
2258 desc_sts_data_t sts = {.d32 = 0 };
2259 for (i = 0; i < dwc_ep->desc_cnt; ++i, ++dma_desc) {
2260 sts.d32 = dma_desc->status.d32;
2261 sts.b_iso_out.bs = BS_HOST_READY;
2262 dma_desc->status.d32 = sts.d32;
2266 if (dwc_ep->is_in == 0) {
2268 &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->
2272 &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2275 dwc_modify_reg32(addr, depctl.d32, depctl.d32);
2279 * This function sets latest iso packet information(non-PTI mode)
2281 * @param core_if Programming view of DWC_otg controller.
2282 * @param ep The EP to start the transfer on.
2285 void set_current_pkt_info(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
2287 deptsiz_data_t deptsiz = {.d32 = 0 };
2288 dma_addr_t dma_addr;
2291 if (ep->proc_buf_num)
2292 dma_addr = ep->dma_addr1;
2294 dma_addr = ep->dma_addr0;
2298 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->
2300 offset = ep->data_per_frame;
2303 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->
2306 ep->data_per_frame +
2307 (0x4 & (0x4 - (ep->data_per_frame & 0x3)));
2310 if (!deptsiz.b.xfersize) {
2311 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2312 ep->pkt_info[ep->cur_pkt].offset =
2313 ep->cur_pkt_dma_addr - dma_addr;
2314 ep->pkt_info[ep->cur_pkt].status = 0;
2316 ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame;
2317 ep->pkt_info[ep->cur_pkt].offset =
2318 ep->cur_pkt_dma_addr - dma_addr;
2319 ep->pkt_info[ep->cur_pkt].status = -DWC_E_NO_DATA;
2321 ep->cur_pkt_addr += offset;
2322 ep->cur_pkt_dma_addr += offset;
2327 * This function sets latest iso packet information(DDMA mode)
2329 * @param core_if Programming view of DWC_otg controller.
2330 * @param dwc_ep The EP to start the transfer on.
2333 static void set_ddma_iso_pkts_info(dwc_otg_core_if_t * core_if,
2336 dwc_otg_dma_desc_t *dma_desc;
2337 desc_sts_data_t sts = {.d32 = 0 };
2338 iso_pkt_info_t *iso_packet;
2339 uint32_t data_per_desc;
2343 iso_packet = dwc_ep->pkt_info;
2345 /** Reinit closed DMA Descriptors*/
2347 if (dwc_ep->is_in == 0) {
2349 dwc_ep->iso_desc_addr +
2350 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2353 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2354 i += dwc_ep->pkt_per_frm) {
2355 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2357 ((j + 1) * dwc_ep->maxpacket >
2358 dwc_ep->data_per_frame) ? dwc_ep->
2360 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2362 (data_per_desc % 4) ? (4 -
2366 sts.d32 = dma_desc->status.d32;
2368 /* Write status in iso_packet_decsriptor */
2369 iso_packet->status =
2370 sts.b_iso_out.rxsts +
2371 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2372 if (iso_packet->status) {
2373 iso_packet->status = -DWC_E_NO_DATA;
2376 /* Received data length */
2377 if (!sts.b_iso_out.rxbytes) {
2378 iso_packet->length =
2380 sts.b_iso_out.rxbytes;
2382 iso_packet->length =
2384 sts.b_iso_out.rxbytes + (4 -
2390 iso_packet->offset = offset;
2392 offset += data_per_desc;
2398 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2400 ((j + 1) * dwc_ep->maxpacket >
2401 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2402 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2404 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2406 sts.d32 = dma_desc->status.d32;
2408 /* Write status in iso_packet_decsriptor */
2409 iso_packet->status =
2410 sts.b_iso_out.rxsts +
2411 (sts.b_iso_out.bs ^ BS_DMA_DONE);
2412 if (iso_packet->status) {
2413 iso_packet->status = -DWC_E_NO_DATA;
2416 /* Received data length */
2417 iso_packet->length =
2418 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2420 iso_packet->offset = offset;
2422 offset += data_per_desc;
2427 sts.d32 = dma_desc->status.d32;
2429 /* Write status in iso_packet_decsriptor */
2430 iso_packet->status =
2431 sts.b_iso_out.rxsts + (sts.b_iso_out.bs ^ BS_DMA_DONE);
2432 if (iso_packet->status) {
2433 iso_packet->status = -DWC_E_NO_DATA;
2435 /* Received data length */
2436 if (!sts.b_iso_out.rxbytes) {
2437 iso_packet->length =
2438 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes;
2440 iso_packet->length =
2441 dwc_ep->data_per_frame - sts.b_iso_out.rxbytes +
2442 (4 - dwc_ep->data_per_frame % 4);
2445 iso_packet->offset = offset;
2450 dwc_ep->iso_desc_addr +
2451 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2453 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2454 sts.d32 = dma_desc->status.d32;
2456 /* Write status in iso packet descriptor */
2457 iso_packet->status =
2458 sts.b_iso_in.txsts +
2459 (sts.b_iso_in.bs ^ BS_DMA_DONE);
2460 if (iso_packet->status != 0) {
2461 iso_packet->status = -DWC_E_NO_DATA;
2464 /* Bytes has been transfered */
2465 iso_packet->length =
2466 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2472 sts.d32 = dma_desc->status.d32;
2473 while (sts.b_iso_in.bs == BS_DMA_BUSY) {
2474 sts.d32 = dma_desc->status.d32;
2477 /* Write status in iso packet descriptor ??? do be done with ERROR codes */
2478 iso_packet->status =
2479 sts.b_iso_in.txsts + (sts.b_iso_in.bs ^ BS_DMA_DONE);
2480 if (iso_packet->status != 0) {
2481 iso_packet->status = -DWC_E_NO_DATA;
2484 /* Bytes has been transfered */
2485 iso_packet->length =
2486 dwc_ep->data_per_frame - sts.b_iso_in.txbytes;
2491 * This function reinitialize DMA Descriptors for Isochronous transfer
2493 * @param core_if Programming view of DWC_otg controller.
2494 * @param dwc_ep The EP to start the transfer on.
2497 static void reinit_ddma_iso_xfer(dwc_otg_core_if_t * core_if, dwc_ep_t * dwc_ep)
2500 dwc_otg_dma_desc_t *dma_desc;
2502 volatile uint32_t *addr;
2503 desc_sts_data_t sts = {.d32 = 0 };
2504 uint32_t data_per_desc;
2506 if (dwc_ep->is_in == 0) {
2507 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
2509 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
2512 if (dwc_ep->proc_buf_num == 0) {
2513 /** Buffer 0 descriptors setup */
2514 dma_ad = dwc_ep->dma_addr0;
2516 /** Buffer 1 descriptors setup */
2517 dma_ad = dwc_ep->dma_addr1;
2520 /** Reinit closed DMA Descriptors*/
2522 if (dwc_ep->is_in == 0) {
2524 dwc_ep->iso_desc_addr +
2525 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2527 sts.b_iso_out.bs = BS_HOST_READY;
2528 sts.b_iso_out.rxsts = 0;
2529 sts.b_iso_out.l = 0;
2530 sts.b_iso_out.sp = 0;
2531 sts.b_iso_out.ioc = 0;
2532 sts.b_iso_out.pid = 0;
2533 sts.b_iso_out.framenum = 0;
2535 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
2536 i += dwc_ep->pkt_per_frm) {
2537 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
2539 ((j + 1) * dwc_ep->maxpacket >
2540 dwc_ep->data_per_frame) ? dwc_ep->
2542 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2544 (data_per_desc % 4) ? (4 -
2547 sts.b_iso_out.rxbytes = data_per_desc;
2548 dma_desc->buf = dma_ad;
2549 dma_desc->status.d32 = sts.d32;
2551 dma_ad += data_per_desc;
2556 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
2559 ((j + 1) * dwc_ep->maxpacket >
2560 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2561 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2563 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2564 sts.b_iso_out.rxbytes = data_per_desc;
2566 dma_desc->buf = dma_ad;
2567 dma_desc->status.d32 = sts.d32;
2570 dma_ad += data_per_desc;
2573 sts.b_iso_out.ioc = 1;
2574 sts.b_iso_out.l = dwc_ep->proc_buf_num;
2577 ((j + 1) * dwc_ep->maxpacket >
2578 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
2579 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
2581 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
2582 sts.b_iso_out.rxbytes = data_per_desc;
2584 dma_desc->buf = dma_ad;
2585 dma_desc->status.d32 = sts.d32;
2590 dwc_ep->iso_desc_addr +
2591 dwc_ep->desc_cnt * dwc_ep->proc_buf_num;
2593 sts.b_iso_in.bs = BS_HOST_READY;
2594 sts.b_iso_in.txsts = 0;
2595 sts.b_iso_in.sp = 0;
2596 sts.b_iso_in.ioc = 0;
2597 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
2598 sts.b_iso_in.framenum = dwc_ep->next_frame;
2599 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
2602 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
2603 dma_desc->buf = dma_ad;
2604 dma_desc->status.d32 = sts.d32;
2606 sts.b_iso_in.framenum += dwc_ep->bInterval;
2607 dma_ad += dwc_ep->data_per_frame;
2611 sts.b_iso_in.ioc = 1;
2612 sts.b_iso_in.l = dwc_ep->proc_buf_num;
2614 dma_desc->buf = dma_ad;
2615 dma_desc->status.d32 = sts.d32;
2617 dwc_ep->next_frame =
2618 sts.b_iso_in.framenum + dwc_ep->bInterval * 1;
2620 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2624 * This function is to handle Iso EP transfer complete interrupt
2625 * in case Iso out packet was dropped
2627 * @param core_if Programming view of DWC_otg controller.
2628 * @param dwc_ep The EP for wihich transfer complete was asserted
2631 static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t * core_if,
2636 uint32_t drp_pkt_cnt;
2637 deptsiz_data_t deptsiz = {.d32 = 0 };
2638 depctl_data_t depctl = {.d32 = 0 };
2642 dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->
2645 drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt;
2646 drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm);
2648 /* Setting dropped packets status */
2649 for (i = 0; i < drp_pkt_cnt; ++i) {
2650 dwc_ep->pkt_info[drp_pkt].status = -DWC_E_NO_DATA;
2655 if (deptsiz.b.pktcnt > 0) {
2656 deptsiz.b.xfersize =
2657 dwc_ep->xfer_len - (dwc_ep->pkt_cnt -
2658 deptsiz.b.pktcnt) * dwc_ep->maxpacket;
2660 deptsiz.b.xfersize = 0;
2661 deptsiz.b.pktcnt = 0;
2664 dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz,
2667 if (deptsiz.b.pktcnt > 0) {
2668 if (dwc_ep->proc_buf_num) {
2670 dwc_ep->dma_addr1 + dwc_ep->xfer_len -
2674 dwc_ep->dma_addr0 + dwc_ep->xfer_len -
2675 deptsiz.b.xfersize;;
2678 dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->
2681 /** Re-enable endpoint, clear nak */
2686 dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->
2687 doepctl, depctl.d32, depctl.d32);
2695 * This function sets iso packets information(PTI mode)
2697 * @param core_if Programming view of DWC_otg controller.
2698 * @param ep The EP to start the transfer on.
2701 static uint32_t set_iso_pkts_info(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
2705 iso_pkt_info_t *packet_info = ep->pkt_info;
2707 uint32_t frame_data;
2708 deptsiz_data_t deptsiz;
2710 if (ep->proc_buf_num == 0) {
2711 /** Buffer 0 descriptors setup */
2712 dma_ad = ep->dma_addr0;
2714 /** Buffer 1 descriptors setup */
2715 dma_ad = ep->dma_addr1;
2720 dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->
2724 dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->
2728 if (!deptsiz.b.xfersize) {
2730 for (i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) {
2731 frame_data = ep->data_per_frame;
2732 for (j = 0; j < ep->pkt_per_frm; ++j) {
2734 /* Packet status - is not set as initially
2735 * it is set to 0 and if packet was sent
2736 successfully, status field will remain 0*/
2738 /* Bytes has been transfered */
2739 packet_info->length =
2741 frame_data) ? ep->maxpacket : frame_data;
2743 /* Received packet offset */
2744 packet_info->offset = offset;
2745 offset += packet_info->length;
2746 frame_data -= packet_info->length;
2753 /* This is a workaround for in case of Transfer Complete with
2754 * PktDrpSts interrupts merging - in this case Transfer complete
2755 * interrupt for Isoc Out Endpoint is asserted without PktDrpSts
2756 * set and with DOEPTSIZ register non zero. Investigations showed,
2757 * that this happens when Out packet is dropped, but because of
2758 * interrupts merging during first interrupt handling PktDrpSts
2759 * bit is cleared and for next merged interrupts it is not reset.
2760 * In this case SW hadles the interrupt as if PktDrpSts bit is set.
2765 return handle_iso_out_pkt_dropped(core_if, ep);
2771 * This function is to handle Iso EP transfer complete interrupt
2773 * @param pcd The PCD
2774 * @param ep The EP for which transfer complete was asserted
2777 static void complete_iso_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep)
2779 dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd);
2780 dwc_ep_t *dwc_ep = &ep->dwc_ep;
2781 uint8_t is_last = 0;
2783 if (core_if->dma_enable) {
2784 if (core_if->dma_desc_enable) {
2785 set_ddma_iso_pkts_info(core_if, dwc_ep);
2786 reinit_ddma_iso_xfer(core_if, dwc_ep);
2789 if (core_if->pti_enh_enable) {
2790 if (set_iso_pkts_info(core_if, dwc_ep)) {
2791 dwc_ep->proc_buf_num =
2792 (dwc_ep->proc_buf_num ^ 1) & 0x1;
2793 dwc_otg_iso_ep_start_buf_transfer
2798 set_current_pkt_info(core_if, dwc_ep);
2799 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
2801 dwc_ep->cur_pkt = 0;
2802 dwc_ep->proc_buf_num =
2803 (dwc_ep->proc_buf_num ^ 1) & 0x1;
2804 if (dwc_ep->proc_buf_num) {
2805 dwc_ep->cur_pkt_addr =
2807 dwc_ep->cur_pkt_dma_addr =
2810 dwc_ep->cur_pkt_addr =
2812 dwc_ep->cur_pkt_dma_addr =
2817 dwc_otg_iso_ep_start_frm_transfer(core_if,
2822 set_current_pkt_info(core_if, dwc_ep);
2823 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
2825 dwc_ep->cur_pkt = 0;
2826 dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1;
2827 if (dwc_ep->proc_buf_num) {
2828 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1;
2829 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1;
2831 dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0;
2832 dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0;
2836 dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep);
2839 dwc_otg_iso_buffer_done(pcd, ep, ep->iso_req_handle);
2841 #endif /* DWC_EN_ISOC */
2844 * This function handles EP0 Control transfers.
2846 * The state of the control tranfers are tracked in
2847 * <code>ep0state</code>.
2849 static void handle_ep0(dwc_otg_pcd_t * pcd)
2851 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2852 dwc_otg_pcd_ep_t *ep0 = &pcd->ep0;
2853 desc_sts_data_t desc_sts;
2854 deptsiz0_data_t deptsiz;
2855 uint32_t byte_count;
2858 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
2859 print_ep0_state(pcd);
2862 // DWC_DEBUGPL(DBG_CIL,"HANDLE EP0\n");
2864 switch (pcd->ep0state) {
2865 case EP0_DISCONNECT:
2869 pcd->request_config = 0;
2874 case EP0_IN_DATA_PHASE:
2876 DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
2877 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
2878 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
2881 if (core_if->dma_enable != 0) {
2883 * For EP0 we can only program 1 packet at a time so we
2884 * need to do the make calculations after each complete.
2885 * Call write_packet to make the calculations, as in
2886 * slave mode, and use those values to determine if we
2889 if (core_if->dma_desc_enable == 0) {
2891 dwc_read_reg32(&core_if->dev_if->
2892 in_ep_regs[0]->dieptsiz);
2894 ep0->dwc_ep.xfer_len - deptsiz.b.xfersize;
2897 core_if->dev_if->in_desc_addr->status;
2899 ep0->dwc_ep.xfer_len - desc_sts.b.bytes;
2901 ep0->dwc_ep.xfer_count += byte_count;
2902 ep0->dwc_ep.xfer_buff += byte_count;
2903 ep0->dwc_ep.dma_addr += byte_count;
2905 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
2906 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2908 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2909 } else if (ep0->dwc_ep.sent_zlp) {
2910 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2912 ep0->dwc_ep.sent_zlp = 0;
2913 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2915 ep0_complete_request(ep0);
2916 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
2919 case EP0_OUT_DATA_PHASE:
2921 DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
2922 ep0->dwc_ep.num, (ep0->dwc_ep.is_in ? "IN" : "OUT"),
2923 ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
2925 if (core_if->dma_enable != 0) {
2926 if (core_if->dma_desc_enable == 0) {
2928 dwc_read_reg32(&core_if->dev_if->
2929 out_ep_regs[0]->doeptsiz);
2931 ep0->dwc_ep.maxpacket - deptsiz.b.xfersize;
2934 core_if->dev_if->out_desc_addr->status;
2936 ep0->dwc_ep.maxpacket - desc_sts.b.bytes;
2938 ep0->dwc_ep.xfer_count += byte_count;
2939 ep0->dwc_ep.xfer_buff += byte_count;
2940 ep0->dwc_ep.dma_addr += byte_count;
2942 if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
2943 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2945 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2946 } else if (ep0->dwc_ep.sent_zlp) {
2947 dwc_otg_ep0_continue_transfer(GET_CORE_IF(pcd),
2949 ep0->dwc_ep.sent_zlp = 0;
2950 DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
2952 ep0_complete_request(ep0);
2953 DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
2957 case EP0_IN_STATUS_PHASE:
2958 case EP0_OUT_STATUS_PHASE:
2959 DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
2960 ep0_complete_request(ep0);
2961 pcd->ep0state = EP0_IDLE;
2963 ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */
2965 /* Prepare for more SETUP Packets */
2966 if (core_if->dma_enable) {
2967 ep0_out_start(core_if, pcd);
2972 DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
2976 print_ep0_state(pcd);
2983 static void restart_transfer(dwc_otg_pcd_t * pcd, const uint32_t epnum)
2985 dwc_otg_core_if_t *core_if;
2986 dwc_otg_dev_if_t *dev_if;
2987 deptsiz_data_t dieptsiz = {.d32 = 0 };
2988 dwc_otg_pcd_ep_t *ep;
2990 ep = get_in_ep(pcd, epnum);
2993 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2996 #endif /* DWC_EN_ISOC */
2998 core_if = GET_CORE_IF(pcd);
2999 dev_if = core_if->dev_if;
3001 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3003 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
3004 " stopped=%d\n", ep->dwc_ep.xfer_buff,
3005 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len, ep->stopped);
3007 * If xfersize is 0 and pktcnt in not 0, resend the last packet.
3009 if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 &&
3010 ep->dwc_ep.start_xfer_buff != 0) {
3011 if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) {
3012 ep->dwc_ep.xfer_count = 0;
3013 ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
3014 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3016 ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
3017 /* convert packet size to dwords. */
3018 ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
3019 ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count;
3022 DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
3023 "xfer_len=%0x stopped=%d\n",
3024 ep->dwc_ep.xfer_buff,
3025 ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
3028 dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
3030 dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
3036 * handle the IN EP disable interrupt.
3038 static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t * pcd,
3039 const uint32_t epnum)
3041 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3042 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3043 deptsiz_data_t dieptsiz = {.d32 = 0 };
3044 dctl_data_t dctl = {.d32 = 0 };
3045 dwc_otg_pcd_ep_t *ep;
3048 ep = get_in_ep(pcd, epnum);
3049 dwc_ep = &ep->dwc_ep;
3051 if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3052 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
3056 DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", epnum,
3057 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl));
3058 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
3060 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3061 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3064 /* Flush the Tx FIFO */
3065 dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num);
3066 /* Clear the Global IN NP NAK */
3068 dctl.b.cgnpinnak = 1;
3069 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, 0);
3070 /* Restart the transaction */
3071 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3072 restart_transfer(pcd, epnum);
3075 /* Restart the transaction */
3076 if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
3077 restart_transfer(pcd, epnum);
3079 DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
3084 * Handler for the IN EP timeout handshake interrupt.
3086 static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t * pcd,
3087 const uint32_t epnum)
3089 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3090 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3093 deptsiz_data_t dieptsiz = {.d32 = 0 };
3096 dctl_data_t dctl = {.d32 = 0 };
3097 dwc_otg_pcd_ep_t *ep;
3099 gintmsk_data_t intr_mask = {.d32 = 0 };
3101 ep = get_in_ep(pcd, epnum);
3103 /* Disable the NP Tx Fifo Empty Interrrupt */
3104 if (!core_if->dma_enable) {
3105 intr_mask.b.nptxfempty = 1;
3106 dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
3109 /** @todo NGS Check EP type.
3110 * Implement for Periodic EPs */
3114 /* Enable the Global IN NAK Effective Interrupt */
3115 intr_mask.b.ginnakeff = 1;
3116 dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, intr_mask.d32);
3118 /* Set Global IN NAK */
3119 dctl.b.sgnpinnak = 1;
3120 dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
3125 dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz);
3126 DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n",
3127 dieptsiz.b.pktcnt, dieptsiz.b.xfersize);
3130 #ifdef DISABLE_PERIODIC_EP
3132 * Set the NAK bit for this EP to
3133 * start the disable process.
3137 dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32,
3145 * Handler for the IN EP NAK interrupt.
3147 static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t * pcd,
3148 const uint32_t epnum)
3150 /** @todo implement ISR */
3151 dwc_otg_core_if_t *core_if;
3152 diepmsk_data_t intr_mask = {.d32 = 0 };
3154 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "IN EP NAK");
3155 core_if = GET_CORE_IF(pcd);
3156 intr_mask.b.nak = 1;
3158 if (core_if->multiproc_int_enable) {
3159 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3160 diepeachintmsk[epnum], intr_mask.d32, 0);
3162 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk,
3170 * Handler for the OUT EP Babble interrupt.
3172 static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t * pcd,
3173 const uint32_t epnum)
3175 /** @todo implement ISR */
3176 dwc_otg_core_if_t *core_if;
3177 doepmsk_data_t intr_mask = {.d32 = 0 };
3179 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3181 core_if = GET_CORE_IF(pcd);
3182 intr_mask.b.babble = 1;
3184 if (core_if->multiproc_int_enable) {
3185 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3186 doepeachintmsk[epnum], intr_mask.d32, 0);
3188 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
3196 * Handler for the OUT EP NAK interrupt.
3198 static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t * pcd,
3199 const uint32_t epnum)
3201 /** @todo implement ISR */
3202 dwc_otg_core_if_t *core_if;
3203 doepmsk_data_t intr_mask = {.d32 = 0 };
3205 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "OUT EP NAK");
3206 core_if = GET_CORE_IF(pcd);
3207 intr_mask.b.nak = 1;
3209 if (core_if->multiproc_int_enable) {
3210 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3211 doepeachintmsk[epnum], intr_mask.d32, 0);
3213 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
3221 * Handler for the OUT EP NYET interrupt.
3223 static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t * pcd,
3224 const uint32_t epnum)
3226 /** @todo implement ISR */
3227 dwc_otg_core_if_t *core_if;
3228 doepmsk_data_t intr_mask = {.d32 = 0 };
3230 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n", "OUT EP NYET");
3231 core_if = GET_CORE_IF(pcd);
3232 intr_mask.b.nyet = 1;
3234 if (core_if->multiproc_int_enable) {
3235 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
3236 doepeachintmsk[epnum], intr_mask.d32, 0);
3238 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk,
3246 * This interrupt indicates that an IN EP has a pending Interrupt.
3247 * The sequence for handling the IN EP interrupt is shown below:
3248 * -# Read the Device All Endpoint Interrupt register
3249 * -# Repeat the following for each IN EP interrupt bit set (from
3251 * -# Read the Device Endpoint Interrupt (DIEPINTn) register
3252 * -# If "Transfer Complete" call the request complete function
3253 * -# If "Endpoint Disabled" complete the EP disable procedure.
3254 * -# If "AHB Error Interrupt" log error
3255 * -# If "Time-out Handshake" log error
3256 * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
3258 * -# If "IN Token EP Mismatch" (disable, this is handled by EP
3259 * Mismatch Interrupt)
3261 static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t * pcd)
3263 #define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \
3265 diepint_data_t diepint = {.d32=0}; \
3266 diepint.b.__intr = 1; \
3267 dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
3271 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3272 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3273 diepint_data_t diepint = {.d32 = 0 };
3274 dctl_data_t dctl = {.d32 = 0 };
3275 depctl_data_t depctl = {.d32 = 0 };
3278 dwc_otg_pcd_ep_t *ep;
3280 gintmsk_data_t intr_mask = {.d32 = 0 };
3282 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd);
3284 /* Read in the device interrupt bits */
3285 ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
3287 /* Service the Device IN interrupts for each endpoint */
3289 if (ep_intr & 0x1) {
3291 /* Get EP pointer */
3292 ep = get_in_ep(pcd, epnum);
3293 dwc_ep = &ep->dwc_ep;
3296 dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl);
3298 dwc_read_reg32(&dev_if->dev_global_regs->
3299 dtknqr4_fifoemptymsk);
3301 DWC_DEBUGPL(DBG_PCDV,
3302 "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
3303 epnum, empty_msk, depctl.d32);
3305 DWC_DEBUGPL(DBG_PCD,
3306 "EP%d-%s: type=%d, mps=%d\n",
3307 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
3308 dwc_ep->type, dwc_ep->maxpacket);
3311 dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
3313 DWC_DEBUGPL(DBG_PCDV,
3314 "EP %d Interrupt Register - 0x%x\n", epnum,
3316 /* Transfer complete */
3317 if (diepint.b.xfercompl) {
3318 /* Disable the NP Tx FIFO Empty
3320 if (core_if->en_multiple_tx_fifo == 0) {
3321 intr_mask.b.nptxfempty = 1;
3322 dwc_modify_reg32(&core_if->
3324 gintmsk, intr_mask.d32,
3327 /* Disable the Tx FIFO Empty Interrupt for this EP */
3328 uint32_t fifoemptymsk =
3330 dwc_modify_reg32(&core_if->dev_if->
3332 dtknqr4_fifoemptymsk,
3335 /* Clear the bit in DIEPINTn for this interrupt */
3336 CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
3338 /* Complete the transfer */
3343 else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3345 complete_iso_ep(pcd, ep);
3347 #endif /* DWC_EN_ISOC */
3353 /* Endpoint disable */
3354 if (diepint.b.epdisabled) {
3355 DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n",
3357 handle_in_ep_disable_intr(pcd, epnum);
3359 /* Clear the bit in DIEPINTn for this interrupt */
3360 CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
3363 if (diepint.b.ahberr) {
3364 DWC_DEBUGPL(DBG_ANY, "EP%d IN AHB Error\n",
3366 /* Clear the bit in DIEPINTn for this interrupt */
3367 CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
3369 /* TimeOUT Handshake (non-ISOC IN EPs) */
3370 if (diepint.b.timeout) {
3371 DWC_DEBUGPL(DBG_ANY, "EP%d IN Time-out\n",
3373 handle_in_ep_timeout_intr(pcd, epnum);
3375 CLEAR_IN_EP_INTR(core_if, epnum, timeout);
3377 /** IN Token received with TxF Empty */
3378 if (diepint.b.intktxfemp) {
3379 DWC_DEBUGPL(DBG_ANY,
3380 "EP%d IN TKN TxFifo Empty\n",
3382 if (!ep->stopped && epnum != 0) {
3384 diepmsk_data_t diepmsk = {.d32 = 0 };
3385 diepmsk.b.intktxfemp = 1;
3387 if (core_if->multiproc_int_enable) {
3388 dwc_modify_reg32(&dev_if->
3395 dwc_modify_reg32(&dev_if->
3401 } else if (core_if->dma_desc_enable
3404 EP0_OUT_STATUS_PHASE) {
3407 dwc_read_reg32(&dev_if->
3411 /* set the disable and stall bits */
3412 if (depctl.b.epena) {
3416 dwc_write_reg32(&dev_if->
3418 diepctl, depctl.d32);
3420 CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
3422 /** IN Token Received with EP mismatch */
3423 if (diepint.b.intknepmis) {
3424 DWC_DEBUGPL(DBG_ANY,
3425 "EP%d IN TKN EP Mismatch\n", epnum);
3426 CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
3428 /** IN Endpoint NAK Effective */
3429 if (diepint.b.inepnakeff) {
3430 DWC_DEBUGPL(DBG_ANY,
3431 "EP%d IN EP NAK Effective\n",
3434 if (ep->disabling) {
3438 dwc_modify_reg32(&dev_if->
3440 diepctl, depctl.d32,
3443 CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
3447 /** IN EP Tx FIFO Empty Intr */
3448 if (diepint.b.emptyintr) {
3449 DWC_DEBUGPL(DBG_ANY,
3450 "EP%d Tx FIFO Empty Intr \n",
3452 write_empty_tx_fifo(pcd, epnum);
3454 CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
3458 /** IN EP BNA Intr */
3459 if (diepint.b.bna) {
3460 CLEAR_IN_EP_INTR(core_if, epnum, bna);
3461 if (core_if->dma_desc_enable) {
3464 DWC_OTG_EP_TYPE_ISOC) {
3466 * This checking is performed to prevent first "false" BNA
3467 * handling occuring right after reconnect
3469 if (dwc_ep->next_frame !=
3471 dwc_otg_pcd_handle_iso_bna
3474 #endif /* DWC_EN_ISOC */
3477 dwc_read_reg32(&dev_if->
3481 /* If Global Continue on BNA is disabled - disable EP */
3482 if (!dctl.b.gcontbna) {
3493 start_next_request(ep);
3499 if (diepint.b.nak) {
3500 DWC_DEBUGPL(DBG_ANY, "EP%d IN NAK Interrupt\n",
3502 handle_in_ep_nak_intr(pcd, epnum);
3504 CLEAR_IN_EP_INTR(core_if, epnum, nak);
3512 #undef CLEAR_IN_EP_INTR
3516 * This interrupt indicates that an OUT EP has a pending Interrupt.
3517 * The sequence for handling the OUT EP interrupt is shown below:
3518 * -# Read the Device All Endpoint Interrupt register
3519 * -# Repeat the following for each OUT EP interrupt bit set (from
3521 * -# Read the Device Endpoint Interrupt (DOEPINTn) register
3522 * -# If "Transfer Complete" call the request complete function
3523 * -# If "Endpoint Disabled" complete the EP disable procedure.
3524 * -# If "AHB Error Interrupt" log error
3525 * -# If "Setup Phase Done" process Setup Packet (See Standard USB
3526 * Command Processing)
3528 static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t * pcd)
3530 #define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \
3532 doepint_data_t doepint = {.d32=0}; \
3533 doepint.b.__intr = 1; \
3534 dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
3538 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3539 dwc_otg_dev_if_t *dev_if = core_if->dev_if;
3541 doepint_data_t doepint = {.d32 = 0 };
3542 dctl_data_t dctl = {.d32 = 0 };
3543 depctl_data_t doepctl = {.d32 = 0 };
3545 dwc_otg_pcd_ep_t *ep;
3548 DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
3550 /* Read in the device interrupt bits */
3551 ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
3554 if (ep_intr & 0x1) {
3555 /* Get EP pointer */
3556 ep = get_out_ep(pcd, epnum);
3557 dwc_ep = &ep->dwc_ep;
3560 DWC_DEBUGPL(DBG_PCDV,
3561 "EP%d-%s: type=%d, mps=%d\n",
3562 dwc_ep->num, (dwc_ep->is_in ? "IN" : "OUT"),
3563 dwc_ep->type, dwc_ep->maxpacket);
3566 dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
3568 /* Transfer complete */
3569 if (doepint.b.xfercompl) {
3572 /* Clear the bit in DOEPINTn for this interrupt */
3573 CLEAR_OUT_EP_INTR(core_if, epnum,
3575 if (pcd->ep0state != EP0_IDLE)
3578 } else if (dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3579 if (doepint.b.pktdrpsts == 0) {
3580 /* Clear the bit in DOEPINTn for this interrupt */
3581 CLEAR_OUT_EP_INTR(core_if,
3584 complete_iso_ep(pcd, ep);
3587 doepint_data_t doepint = {.d32 =
3589 doepint.b.xfercompl = 1;
3590 doepint.b.pktdrpsts = 1;
3591 dwc_write_reg32(&core_if->
3597 if (handle_iso_out_pkt_dropped
3598 (core_if, dwc_ep)) {
3599 complete_iso_ep(pcd,
3603 #endif /* DWC_EN_ISOC */
3605 /* Clear the bit in DOEPINTn for this interrupt */
3606 CLEAR_OUT_EP_INTR(core_if, epnum,
3613 /* Endpoint disable */
3614 if (doepint.b.epdisabled) {
3616 /* Clear the bit in DOEPINTn for this interrupt */
3617 CLEAR_OUT_EP_INTR(core_if, epnum, epdisabled);
3620 if (doepint.b.ahberr) {
3621 DWC_DEBUGPL(DBG_PCD, "EP%d OUT AHB Error\n",
3623 DWC_DEBUGPL(DBG_PCD, "EP DMA REG %d \n",
3625 out_ep_regs[epnum]->doepdma);
3626 CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
3628 /* Setup Phase Done (contorl EPs) */
3629 if (doepint.b.setup) {
3631 DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n",
3634 CLEAR_OUT_EP_INTR(core_if, epnum, setup);
3639 /** OUT EP BNA Intr */
3640 if (doepint.b.bna) {
3641 CLEAR_OUT_EP_INTR(core_if, epnum, bna);
3642 if (core_if->dma_desc_enable) {
3645 DWC_OTG_EP_TYPE_ISOC) {
3647 * This checking is performed to prevent first "false" BNA
3648 * handling occuring right after reconnect
3650 if (dwc_ep->next_frame !=
3652 dwc_otg_pcd_handle_iso_bna
3655 #endif /* DWC_EN_ISOC */
3658 dwc_read_reg32(&dev_if->
3662 /* If Global Continue on BNA is disabled - disable EP */
3663 if (!dctl.b.gcontbna) {
3666 doepctl.b.epdis = 1;
3674 start_next_request(ep);
3679 if (doepint.b.stsphsercvd) {
3680 CLEAR_OUT_EP_INTR(core_if, epnum, stsphsercvd);
3681 if (core_if->dma_desc_enable) {
3682 do_setup_in_status_phase(pcd);
3685 /* Babble Interrutp */
3686 if (doepint.b.babble) {
3687 DWC_DEBUGPL(DBG_ANY, "EP%d OUT Babble\n",
3689 handle_out_ep_babble_intr(pcd, epnum);
3691 CLEAR_OUT_EP_INTR(core_if, epnum, babble);
3694 if (doepint.b.nak) {
3695 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NAK\n", epnum);
3696 handle_out_ep_nak_intr(pcd, epnum);
3698 CLEAR_OUT_EP_INTR(core_if, epnum, nak);
3700 /* NYET Interrutp */
3701 if (doepint.b.nyet) {
3702 DWC_DEBUGPL(DBG_ANY, "EP%d OUT NYET\n", epnum);
3703 handle_out_ep_nyet_intr(pcd, epnum);
3705 CLEAR_OUT_EP_INTR(core_if, epnum, nyet);
3715 #undef CLEAR_OUT_EP_INTR
3719 * Incomplete ISO IN Transfer Interrupt.
3720 * This interrupt indicates one of the following conditions occurred
3721 * while transmitting an ISOC transaction.
3722 * - Corrupted IN Token for ISOC EP.
3723 * - Packet not complete in FIFO.
3724 * The follow actions will be taken:
3725 * -# Determine the EP
3726 * -# Set incomplete flag in dwc_ep structure
3727 * -# Disable EP; when "Endpoint Disabled" interrupt is received
3730 int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t * pcd)
3732 gintsts_data_t gintsts;
3735 dwc_otg_dev_if_t *dev_if;
3736 deptsiz_data_t deptsiz = {.d32 = 0 };
3737 depctl_data_t depctl = {.d32 = 0 };
3738 dsts_data_t dsts = {.d32 = 0 };
3742 dev_if = GET_CORE_IF(pcd)->dev_if;
3744 for (i = 1; i <= dev_if->num_in_eps; ++i) {
3745 dwc_ep = &pcd->in_ep[i].dwc_ep;
3746 if (dwc_ep->active && dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) {
3748 dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz);
3750 dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
3752 if (depctl.b.epdis && deptsiz.d32) {
3753 set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep);
3754 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3755 dwc_ep->cur_pkt = 0;
3756 dwc_ep->proc_buf_num =
3757 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3759 if (dwc_ep->proc_buf_num) {
3760 dwc_ep->cur_pkt_addr =
3762 dwc_ep->cur_pkt_dma_addr =
3765 dwc_ep->cur_pkt_addr =
3767 dwc_ep->cur_pkt_dma_addr =
3774 dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->
3775 dev_global_regs->dsts);
3776 dwc_ep->next_frame = dsts.b.soffn;
3778 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
3786 gintmsk_data_t intr_mask = {.d32 = 0 };
3787 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3788 "IN ISOC Incomplete");
3790 intr_mask.b.incomplisoin = 1;
3791 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3793 #endif //DWC_EN_ISOC
3795 /* Clear interrupt */
3797 gintsts.b.incomplisoin = 1;
3798 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3805 * Incomplete ISO OUT Transfer Interrupt.
3807 * This interrupt indicates that the core has dropped an ISO OUT
3808 * packet. The following conditions can be the cause:
3809 * - FIFO Full, the entire packet would not fit in the FIFO.
3812 * The follow actions will be taken:
3813 * -# Determine the EP
3814 * -# Set incomplete flag in dwc_ep structure
3815 * -# Read any data from the FIFO
3816 * -# Disable EP. when "Endpoint Disabled" interrupt is received
3819 int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t * pcd)
3822 gintsts_data_t gintsts;
3825 dwc_otg_dev_if_t *dev_if;
3826 deptsiz_data_t deptsiz = {.d32 = 0 };
3827 depctl_data_t depctl = {.d32 = 0 };
3828 dsts_data_t dsts = {.d32 = 0 };
3832 dev_if = GET_CORE_IF(pcd)->dev_if;
3834 for (i = 1; i <= dev_if->num_out_eps; ++i) {
3835 dwc_ep = &pcd->in_ep[i].dwc_ep;
3836 if (pcd->out_ep[i].dwc_ep.active &&
3837 pcd->out_ep[i].dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
3839 dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz);
3841 dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
3843 if (depctl.b.epdis && deptsiz.d32) {
3844 set_current_pkt_info(GET_CORE_IF(pcd),
3845 &pcd->out_ep[i].dwc_ep);
3846 if (dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) {
3847 dwc_ep->cur_pkt = 0;
3848 dwc_ep->proc_buf_num =
3849 (dwc_ep->proc_buf_num ^ 1) & 0x1;
3851 if (dwc_ep->proc_buf_num) {
3852 dwc_ep->cur_pkt_addr =
3854 dwc_ep->cur_pkt_dma_addr =
3857 dwc_ep->cur_pkt_addr =
3859 dwc_ep->cur_pkt_dma_addr =
3866 dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->
3867 dev_global_regs->dsts);
3868 dwc_ep->next_frame = dsts.b.soffn;
3870 dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF
3877 /** @todo implement ISR */
3878 gintmsk_data_t intr_mask = {.d32 = 0 };
3880 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3881 "OUT ISOC Incomplete");
3883 intr_mask.b.incomplisoout = 1;
3884 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3887 #endif /* DWC_EN_ISOC */
3889 /* Clear interrupt */
3891 gintsts.b.incomplisoout = 1;
3892 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3899 * This function handles the Global IN NAK Effective interrupt.
3902 int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t * pcd)
3904 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
3905 depctl_data_t diepctl = {.d32 = 0 };
3906 depctl_data_t diepctl_rd = {.d32 = 0 };
3907 gintmsk_data_t intr_mask = {.d32 = 0 };
3908 gintsts_data_t gintsts;
3911 DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
3913 /* Disable all active IN EPs */
3914 diepctl.b.epdis = 1;
3917 for (i = 0; i <= dev_if->num_in_eps; i++) {
3919 dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
3920 if (diepctl_rd.b.epena) {
3921 dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl,
3925 /* Disable the Global IN NAK Effective Interrupt */
3926 intr_mask.b.ginnakeff = 1;
3927 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3930 /* Clear interrupt */
3932 gintsts.b.ginnakeff = 1;
3933 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3940 * OUT NAK Effective.
3943 int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t * pcd)
3945 gintmsk_data_t intr_mask = {.d32 = 0 };
3946 gintsts_data_t gintsts;
3948 DWC_DEBUGPL(DBG_CIL,"INTERRUPT Handler not implemented for %s\n",
3949 "Global IN NAK Effective\n");
3950 /* Disable the Global IN NAK Effective Interrupt */
3951 intr_mask.b.goutnakeff = 1;
3952 dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk,
3955 /* Clear interrupt */
3957 gintsts.b.goutnakeff = 1;
3958 dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts,
3965 * PCD interrupt handler.
3967 * The PCD handles the device interrupts. Many conditions can cause a
3968 * device interrupt. When an interrupt occurs, the device interrupt
3969 * service routine determines the cause of the interrupt and
3970 * dispatches handling to the appropriate function. These interrupt
3971 * handling functions are described below.
3973 * All interrupt registers are processed from LSB to MSB.
3976 extern int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t * core_if);
3977 extern int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t * core_if);
3979 int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t * pcd)
3981 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
3983 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
3985 gintsts_data_t gintr_status;
3989 DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n",
3991 dwc_read_reg32(&global_regs->gintsts),
3992 dwc_read_reg32(&global_regs->gintmsk));
3995 if (dwc_otg_is_device_mode(core_if)) {
3996 DWC_SPINLOCK(pcd->lock);
3998 gintr_status.d32 = dwc_otg_read_core_intr(core_if);
4000 DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n",
4001 __func__, gintr_status.d32);
4003 if (gintr_status.b.sofintr) {
4004 retval |= dwc_otg_pcd_handle_sof_intr(pcd);
4006 if (gintr_status.b.rxstsqlvl) {
4008 dwc_otg_pcd_handle_rx_status_q_level_intr(pcd);
4010 if (gintr_status.b.nptxfempty) {
4011 retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd);
4013 if (gintr_status.b.ginnakeff) {
4014 retval |= dwc_otg_pcd_handle_in_nak_effective(pcd);
4016 if (gintr_status.b.goutnakeff) {
4017 retval |= dwc_otg_pcd_handle_out_nak_effective(pcd);
4019 if (gintr_status.b.i2cintr) {
4020 retval |= dwc_otg_pcd_handle_i2c_intr(pcd);
4022 if (gintr_status.b.erlysuspend) {
4023 retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd);
4025 if (gintr_status.b.usbreset) {
4026 retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd);
4028 if (gintr_status.b.enumdone) {
4029 retval |= dwc_otg_pcd_handle_enum_done_intr(pcd);
4031 if (gintr_status.b.isooutdrop) {
4033 dwc_otg_pcd_handle_isoc_out_packet_dropped_intr
4036 if (gintr_status.b.eopframe) {
4038 dwc_otg_pcd_handle_end_periodic_frame_intr(pcd);
4040 if (gintr_status.b.epmismatch) {
4041 retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if);
4043 if (gintr_status.b.inepint) {
4044 if (!core_if->multiproc_int_enable) {
4045 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
4048 if (gintr_status.b.outepintr) {
4049 if (!core_if->multiproc_int_enable) {
4050 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
4053 if (gintr_status.b.incomplisoin) {
4055 dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd);
4057 if (gintr_status.b.incomplisoout) {
4059 dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd);
4062 if (gintr_status.b.usbsuspend) {
4063 retval |= dwc_otg_handle_usb_suspend_intr(core_if);
4065 if (gintr_status.b.wkupintr) {
4066 retval |= dwc_otg_handle_wakeup_detected_intr(core_if);
4068 /* In MPI mode De vice Endpoints intterrupts are asserted
4069 * without setting outepintr and inepint bits set, so these
4070 * Interrupt handlers are called without checking these bit-fields
4072 if (core_if->multiproc_int_enable) {
4073 retval |= dwc_otg_pcd_handle_in_ep_intr(pcd);
4074 retval |= dwc_otg_pcd_handle_out_ep_intr(pcd);
4077 // dwc_debug( "() gintsts=%0x\n",
4078 // dwc_read_reg32(&global_regs->gintsts));
4080 DWC_SPINUNLOCK(pcd->lock);
4085 #endif /* DWC_HOST_ONLY */