1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
36 * This file implements PCD Core. All code in this file is portable and don't
37 * use any OS specific functions.
38 * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39 * header file, which can be used to implement OS specific PCD interface.
41 * An important function of the PCD is managing interrupts generated
42 * by the DWC_otg controller. The implementation of the DWC_otg device
43 * mode interrupt service routines is in dwc_otg_pcd_intr.c.
45 * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46 * @todo Does it work when the request size is greater than DEPTSIZ
51 #include "dwc_otg_pcd.h"
54 #include "dwc_otg_cfi.h"
56 extern int init_cfi(cfiobject_t * cfiobj);
59 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
61 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
64 if (pcd->ep0.priv == handle) {
67 for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
68 if (pcd->in_ep[i].priv == handle)
69 return &pcd->in_ep[i];
70 if (pcd->out_ep[i].priv == handle)
71 return &pcd->out_ep[i];
77 static void dump_log(unsigned char * buf, int len, int direction)
82 dwc_debug("**log_buf :%s len:%d\n", (direction ? "in" : "out"), len);
85 for (i = 0; i < len; i += 4) {
86 trace_printk("%02x %02x %02x %02x\n",
87 buf[i],buf[i+1],buf[i+2],buf[i+3]
93 * This function completes a request. It call's the request call back.
95 void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
98 unsigned stopped = ep->stopped;
100 DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep);
101 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
103 /* don't modify queue heads during completion callback */
105 DWC_SPINUNLOCK(ep->pcd->lock);
109 dma_unmap_single(NULL, req->dma, req->length,
110 (ep->dwc_ep.is_in) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
111 req->dma = DMA_ADDR_INVALID;
115 ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
117 DWC_SPINLOCK(ep->pcd->lock);
119 if (ep->pcd->request_pending > 0) {
120 --ep->pcd->request_pending;
123 ep->stopped = stopped;
128 * This function terminates all the requsts in the EP request queue.
130 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
132 dwc_otg_pcd_request_t *req;
136 /* called with irqs blocked?? */
137 while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
138 req = DWC_CIRCLEQ_FIRST(&ep->queue);
139 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
143 void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
144 const struct dwc_otg_pcd_function_ops *fops)
150 * PCD Callback function for initializing the PCD when switching to
153 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
155 static int32_t dwc_otg_pcd_start_cb(void *p)
157 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
160 * Initialized the Core for Device mode.
162 if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
163 dwc_otg_core_dev_init(GET_CORE_IF(pcd));
168 /** CFI-specific buffer allocation function for EP */
170 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
171 size_t buflen, int flags)
173 dwc_otg_pcd_ep_t *ep;
174 ep = get_ep_from_handle(pcd, pep);
175 return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
179 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
180 size_t buflen, int flags);
184 * PCD Callback function for notifying the PCD when resuming from
187 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
189 static int32_t dwc_otg_pcd_resume_cb(void *p)
191 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
193 if (pcd->fops->resume) {
194 pcd->fops->resume(pcd);
197 /* Stop the SRP timeout timer. */
198 if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
199 || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
200 if (GET_CORE_IF(pcd)->srp_timer_started) {
201 GET_CORE_IF(pcd)->srp_timer_started = 0;
202 DWC_TIMER_CANCEL(pcd->srp_timer);
209 * PCD Callback function for notifying the PCD device is suspended.
211 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
213 static int32_t dwc_otg_pcd_suspend_cb(void *p)
215 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
217 if (pcd->fops->suspend) {
218 pcd->fops->suspend(pcd);
225 * PCD Callback function for stopping the PCD when switching to Host
228 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
230 static int32_t dwc_otg_pcd_stop_cb(void *p)
232 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
233 extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
235 dwc_otg_pcd_stop(pcd);
240 * PCD Callback structure for handling mode switching.
242 static dwc_otg_cil_callbacks_t pcd_callbacks = {
243 .start = dwc_otg_pcd_start_cb,
244 .stop = dwc_otg_pcd_stop_cb,
245 .suspend = dwc_otg_pcd_suspend_cb,
246 .resume_wakeup = dwc_otg_pcd_resume_cb,
247 .p = 0, /* Set at registration */
251 * This function allocates a DMA Descriptor chain for the Endpoint
252 * buffer to be used for a transfer to/from the specified endpoint.
254 dwc_otg_dma_desc_t *dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr,
258 return dwc_dma_alloc(count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr);
262 * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
264 void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t * desc_addr,
265 uint32_t dma_desc_addr, uint32_t count)
267 dwc_dma_free(count * sizeof(dwc_otg_dma_desc_t), desc_addr,
274 * This function initializes a descriptor chain for Isochronous transfer
276 * @param core_if Programming view of DWC_otg controller.
277 * @param dwc_ep The EP to start the transfer on.
280 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
284 dsts_data_t dsts = {.d32 = 0 };
285 depctl_data_t depctl = {.d32 = 0 };
286 volatile uint32_t *addr;
290 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
293 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
296 /** Allocate descriptors for double buffering */
297 dwc_ep->iso_desc_addr =
298 dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
299 dwc_ep->desc_cnt * 2);
300 if (dwc_ep->desc_addr) {
301 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
305 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
308 if (dwc_ep->is_in == 0) {
309 desc_sts_data_t sts = {.d32 = 0 };
310 dwc_otg_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
312 uint32_t data_per_desc;
313 dwc_otg_dev_out_ep_regs_t *out_regs =
314 core_if->dev_if->out_ep_regs[dwc_ep->num];
317 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
318 dma_ad = (dma_addr_t) dwc_read_reg32(&(out_regs->doepdma));
320 /** Buffer 0 descriptors setup */
321 dma_ad = dwc_ep->dma_addr0;
323 sts.b_iso_out.bs = BS_HOST_READY;
324 sts.b_iso_out.rxsts = 0;
326 sts.b_iso_out.sp = 0;
327 sts.b_iso_out.ioc = 0;
328 sts.b_iso_out.pid = 0;
329 sts.b_iso_out.framenum = 0;
332 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
333 i += dwc_ep->pkt_per_frm) {
335 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
337 ((j + 1) * dwc_ep->maxpacket >
338 dwc_ep->data_per_frame) ? dwc_ep->
340 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
343 (data_per_desc % 4) ? (4 -
346 sts.b_iso_out.rxbytes = data_per_desc;
347 dma_desc->buf = dma_ad;
348 dma_desc->status.d32 = sts.d32;
350 offset += data_per_desc;
352 dma_ad += data_per_desc;
356 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
358 ((j + 1) * dwc_ep->maxpacket >
359 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
360 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
362 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
363 sts.b_iso_out.rxbytes = data_per_desc;
364 dma_desc->buf = dma_ad;
365 dma_desc->status.d32 = sts.d32;
367 offset += data_per_desc;
369 dma_ad += data_per_desc;
372 sts.b_iso_out.ioc = 1;
374 ((j + 1) * dwc_ep->maxpacket >
375 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
376 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
378 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
379 sts.b_iso_out.rxbytes = data_per_desc;
381 dma_desc->buf = dma_ad;
382 dma_desc->status.d32 = sts.d32;
385 /** Buffer 1 descriptors setup */
386 sts.b_iso_out.ioc = 0;
387 dma_ad = dwc_ep->dma_addr1;
390 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
391 i += dwc_ep->pkt_per_frm) {
392 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
394 ((j + 1) * dwc_ep->maxpacket >
395 dwc_ep->data_per_frame) ? dwc_ep->
397 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
399 (data_per_desc % 4) ? (4 -
402 sts.b_iso_out.rxbytes = data_per_desc;
403 dma_desc->buf = dma_ad;
404 dma_desc->status.d32 = sts.d32;
406 offset += data_per_desc;
408 dma_ad += data_per_desc;
411 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
413 ((j + 1) * dwc_ep->maxpacket >
414 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
415 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
417 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
418 sts.b_iso_out.rxbytes = data_per_desc;
419 dma_desc->buf = dma_ad;
420 dma_desc->status.d32 = sts.d32;
422 offset += data_per_desc;
424 dma_ad += data_per_desc;
427 sts.b_iso_out.ioc = 1;
430 ((j + 1) * dwc_ep->maxpacket >
431 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
432 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
434 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
435 sts.b_iso_out.rxbytes = data_per_desc;
437 dma_desc->buf = dma_ad;
438 dma_desc->status.d32 = sts.d32;
440 dwc_ep->next_frame = 0;
442 /** Write dma_ad into DOEPDMA register */
443 dwc_write_reg32(&(out_regs->doepdma),
444 (uint32_t) dwc_ep->iso_dma_desc_addr);
449 desc_sts_data_t sts = {.d32 = 0 };
450 dwc_otg_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
452 dwc_otg_dev_in_ep_regs_t *in_regs =
453 core_if->dev_if->in_ep_regs[dwc_ep->num];
454 unsigned int frmnumber;
455 fifosize_data_t txfifosize, rxfifosize;
458 dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->
461 dwc_read_reg32(&core_if->core_global_regs->grxfsiz);
463 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
465 dma_ad = dwc_ep->dma_addr0;
468 dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
470 sts.b_iso_in.bs = BS_HOST_READY;
471 sts.b_iso_in.txsts = 0;
473 (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
474 sts.b_iso_in.ioc = 0;
475 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
477 frmnumber = dwc_ep->next_frame;
479 sts.b_iso_in.framenum = frmnumber;
480 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
483 /** Buffer 0 descriptors setup */
484 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
485 dma_desc->buf = dma_ad;
486 dma_desc->status.d32 = sts.d32;
489 dma_ad += dwc_ep->data_per_frame;
490 sts.b_iso_in.framenum += dwc_ep->bInterval;
493 sts.b_iso_in.ioc = 1;
494 dma_desc->buf = dma_ad;
495 dma_desc->status.d32 = sts.d32;
498 /** Buffer 1 descriptors setup */
499 sts.b_iso_in.ioc = 0;
500 dma_ad = dwc_ep->dma_addr1;
502 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
503 i += dwc_ep->pkt_per_frm) {
504 dma_desc->buf = dma_ad;
505 dma_desc->status.d32 = sts.d32;
508 dma_ad += dwc_ep->data_per_frame;
509 sts.b_iso_in.framenum += dwc_ep->bInterval;
511 sts.b_iso_in.ioc = 0;
513 sts.b_iso_in.ioc = 1;
516 dma_desc->buf = dma_ad;
517 dma_desc->status.d32 = sts.d32;
519 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
521 /** Write dma_ad into diepdma register */
522 dwc_write_reg32(&(in_regs->diepdma),
523 (uint32_t) dwc_ep->iso_dma_desc_addr);
525 /** Enable endpoint, clear nak */
528 depctl.b.usbactep = 1;
531 dwc_modify_reg32(addr, depctl.d32, depctl.d32);
532 depctl.d32 = dwc_read_reg32(addr);
536 * This function initializes a descriptor chain for Isochronous transfer
538 * @param core_if Programming view of DWC_otg controller.
539 * @param ep The EP to start the transfer on.
543 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
546 depctl_data_t depctl = {.d32 = 0 };
547 volatile uint32_t *addr;
550 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
552 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
555 if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
558 deptsiz_data_t deptsiz = {.d32 = 0 };
561 ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
563 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
566 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
568 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
571 /* Program the transfer size and packet count
572 * as follows: xfersize = N * maxpacket +
573 * short_packet pktcnt = N + (short_packet
576 deptsiz.b.mc = ep->pkt_per_frm;
577 deptsiz.b.xfersize = ep->xfer_len;
579 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
580 dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->
581 dieptsiz, deptsiz.d32);
583 /* Write the DMA register */
585 (core_if->dev_if->in_ep_regs[ep->num]->
586 diepdma), (uint32_t) ep->dma_addr);
590 (ep->xfer_len + (ep->maxpacket - 1)) /
592 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
594 dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->
595 doeptsiz, deptsiz.d32);
597 /* Write the DMA register */
599 (core_if->dev_if->out_ep_regs[ep->num]->
600 doepdma), (uint32_t) ep->dma_addr);
603 /** Enable endpoint, clear nak */
605 dwc_modify_reg32(addr, depctl.d32, depctl.d32);
610 dwc_modify_reg32(addr, depctl.d32, depctl.d32);
615 * This function does the setup for a data transfer for an EP and
616 * starts the transfer. For an IN transfer, the packets will be
617 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
618 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
620 * @param core_if Programming view of DWC_otg controller.
621 * @param ep The EP to start the transfer on.
624 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
627 if (core_if->dma_enable) {
628 if (core_if->dma_desc_enable) {
630 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
632 ep->desc_cnt = ep->pkt_cnt;
634 dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
636 if (core_if->pti_enh_enable) {
637 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
640 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
642 ep->cur_pkt_dma_addr =
643 (ep->proc_buf_num) ? ep->dma_addr1 : ep->
645 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
650 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
651 ep->cur_pkt_dma_addr =
652 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
653 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
658 * This function does the setup for a data transfer for an EP and
659 * starts the transfer. For an IN transfer, the packets will be
660 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
661 * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
663 * @param core_if Programming view of DWC_otg controller.
664 * @param ep The EP to start the transfer on.
667 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
669 depctl_data_t depctl = {.d32 = 0 };
670 volatile uint32_t *addr;
672 if (ep->is_in == 1) {
673 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
675 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
679 depctl.d32 = dwc_read_reg32(addr);
684 dwc_write_reg32(addr, depctl.d32);
686 if (core_if->dma_desc_enable &&
687 ep->iso_desc_addr && ep->iso_dma_desc_addr) {
688 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
689 ep->iso_dma_desc_addr,
693 /* reset varibales */
698 ep->data_per_frame = 0;
699 ep->data_pattern_frame = 0;
701 ep->buf_proc_intrvl = 0;
703 ep->proc_buf_num = 0;
707 ep->iso_desc_addr = 0;
708 ep->iso_dma_desc_addr = 0;
711 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
712 uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
713 dwc_dma_t dma1, int sync_frame, int dp_frame,
714 int data_per_frame, int start_frame,
715 int buf_proc_intrvl, void *req_handle,
718 dwc_otg_pcd_ep_t *ep;
719 unsigned long flags = 0;
723 dwc_otg_core_if_t *core_if;
725 ep = get_ep_from_handle(pcd, ep_handle);
727 if (!ep->desc || ep->dwc_ep.num == 0) {
728 DWC_WARN("bad ep\n");
729 return -DWC_E_INVALID;
732 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
733 core_if = GET_CORE_IF(pcd);
734 dwc_ep = &ep->dwc_ep;
736 if (ep->iso_req_handle) {
737 DWC_WARN("ISO request in progress\n");
740 dwc_ep->dma_addr0 = dma0;
741 dwc_ep->dma_addr1 = dma1;
743 dwc_ep->xfer_buff0 = buf0;
744 dwc_ep->xfer_buff1 = buf1;
746 dwc_ep->data_per_frame = data_per_frame;
748 /** @todo - pattern data support is to be implemented in the future */
749 dwc_ep->data_pattern_frame = dp_frame;
750 dwc_ep->sync_frame = sync_frame;
752 dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
754 dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
756 dwc_ep->proc_buf_num = 0;
758 dwc_ep->pkt_per_frm = 0;
759 frm_data = ep->dwc_ep.data_per_frame;
760 while (frm_data > 0) {
761 dwc_ep->pkt_per_frm++;
762 frm_data -= ep->dwc_ep.maxpacket;
765 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
767 if (start_frame == -1) {
768 dwc_ep->next_frame = dsts.b.soffn + 1;
769 if (dwc_ep->bInterval != 1) {
771 dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
776 dwc_ep->next_frame = start_frame;
779 if (!core_if->pti_enh_enable) {
781 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
785 (dwc_ep->data_per_frame *
786 (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
787 - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
790 if (core_if->dma_desc_enable) {
792 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
798 dwc_alloc_atomic(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
801 dwc_alloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
803 if (!dwc_ep->pkt_info) {
804 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
805 return -DWC_E_NO_MEMORY;
807 if (core_if->pti_enh_enable) {
808 dwc_memset(dwc_ep->pkt_info, 0,
809 sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
813 ep->iso_req_handle = req_handle;
815 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
816 dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
820 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
823 unsigned long flags = 0;
824 dwc_otg_pcd_ep_t *ep;
827 ep = get_ep_from_handle(pcd, ep_handle);
828 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
829 DWC_WARN("bad ep\n");
830 return -DWC_E_INVALID;
832 dwc_ep = &ep->dwc_ep;
834 dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
836 dwc_free(dwc_ep->pkt_info);
837 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
838 if (ep->iso_req_handle != req_handle) {
839 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
840 return -DWC_E_INVALID;
843 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
845 ep->iso_req_handle = 0;
850 * This function is used for perodical data exchnage between PCD and gadget drivers.
851 * for Isochronous EPs
853 * - Every time a sync period completes this function is called to
854 * perform data exchange between PCD and gadget
856 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
862 dwc_ep = &ep->dwc_ep;
864 DWC_SPINUNLOCK(ep->pcd->lock);
865 pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
866 dwc_ep->proc_buf_num ^ 0x1);
867 DWC_SPINLOCK(ep->pcd->lock);
869 for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
870 dwc_ep->pkt_info[i].status = 0;
871 dwc_ep->pkt_info[i].offset = 0;
872 dwc_ep->pkt_info[i].length = 0;
876 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
877 void *iso_req_handle)
879 dwc_otg_pcd_ep_t *ep;
882 ep = get_ep_from_handle(pcd, ep_handle);
883 dwc_ep = &ep->dwc_ep;
885 return dwc_ep->pkt_cnt;
888 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
889 void *iso_req_handle, int packet,
890 int *status, int *actual, int *offset)
892 dwc_otg_pcd_ep_t *ep;
895 ep = get_ep_from_handle(pcd, ep_handle);
896 dwc_ep = &ep->dwc_ep;
898 *status = dwc_ep->pkt_info[packet].status;
899 *actual = dwc_ep->pkt_info[packet].length;
900 *offset = dwc_ep->pkt_info[packet].offset;
903 #endif /* DWC_EN_ISOC */
905 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
906 uint32_t is_in, uint32_t ep_num)
908 /* Init EP structure */
912 pcd_ep->queue_sof = 0;
914 /* Init DWC ep structure */
915 pcd_ep->dwc_ep.is_in = is_in;
916 pcd_ep->dwc_ep.num = ep_num;
917 pcd_ep->dwc_ep.active = 0;
918 pcd_ep->dwc_ep.tx_fifo_num = 0;
919 /* Control until ep is actvated */
920 pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
921 pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
922 pcd_ep->dwc_ep.dma_addr = 0;
923 pcd_ep->dwc_ep.start_xfer_buff = 0;
924 pcd_ep->dwc_ep.xfer_buff = 0;
925 pcd_ep->dwc_ep.xfer_len = 0;
926 pcd_ep->dwc_ep.xfer_count = 0;
927 pcd_ep->dwc_ep.sent_zlp = 0;
928 pcd_ep->dwc_ep.total_len = 0;
929 pcd_ep->dwc_ep.desc_addr = 0;
930 pcd_ep->dwc_ep.dma_desc_addr = 0;
931 DWC_CIRCLEQ_INIT(&pcd_ep->queue);
937 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
941 dwc_otg_pcd_ep_t *ep;
942 int in_ep_cntr, out_ep_cntr;
943 uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
944 uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
947 * Initialize the EP0 structure.
950 dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
953 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
954 for (i = 1; in_ep_cntr < num_in_eps; i++) {
955 if ((hwcfg1 & 0x1) == 0) {
956 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
959 * @todo NGS: Add direction to EP, based on contents
960 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
963 dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
965 DWC_CIRCLEQ_INIT(&ep->queue);
971 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
972 for (i = 1; out_ep_cntr < num_out_eps; i++) {
973 if ((hwcfg1 & 0x1) == 0) {
974 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
977 * @todo NGS: Add direction to EP, based on contents
978 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
981 dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
982 DWC_CIRCLEQ_INIT(&ep->queue);
987 pcd->ep0state = EP0_DISCONNECT;
988 pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
989 pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
993 * This function is called when the SRP timer expires. The SRP should
994 * complete within 6 seconds.
996 static void srp_timeout(void *ptr)
998 gotgctl_data_t gotgctl;
999 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1000 volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1002 gotgctl.d32 = dwc_read_reg32(addr);
1004 core_if->srp_timer_started = 0;
1006 if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1007 (core_if->core_params->i2c_enable)) {
1008 DWC_DEBUGPL(DBG_CIL,"SRP Timeout\n");
1010 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1011 if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1012 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
1016 /* Clear Session Request */
1018 gotgctl.b.sesreq = 1;
1019 dwc_modify_reg32(&core_if->core_global_regs->gotgctl,
1022 core_if->srp_success = 0;
1024 DWC_ERROR("Device not connected/responding\n");
1025 gotgctl.b.sesreq = 0;
1026 dwc_write_reg32(addr, gotgctl.d32);
1028 } else if (gotgctl.b.sesreq) {
1029 DWC_DEBUGPL(DBG_CIL,"SRP Timeout\n");
1031 DWC_ERROR("Device not connected/responding\n");
1032 gotgctl.b.sesreq = 0;
1033 dwc_write_reg32(addr, gotgctl.d32);
1035 DWC_DEBUGPL(DBG_CIL," SRP GOTGCTL=%0x\n", gotgctl.d32);
1043 extern void start_next_request(dwc_otg_pcd_ep_t * ep);
1045 static void start_xfer_tasklet_func(void *data)
1047 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1048 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1051 depctl_data_t diepctl;
1053 DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1055 diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1057 if (pcd->ep0.queue_sof) {
1058 pcd->ep0.queue_sof = 0;
1059 start_next_request(&pcd->ep0);
1063 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1064 depctl_data_t diepctl;
1066 dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1068 if (pcd->in_ep[i].queue_sof) {
1069 pcd->in_ep[i].queue_sof = 0;
1070 start_next_request(&pcd->in_ep[i]);
1079 * This function initialized the PCD portion of the driver.
1082 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t * core_if)
1084 dwc_otg_pcd_t *pcd = 0;
1085 dwc_otg_dev_if_t *dev_if;
1088 * Allocate PCD structure
1090 pcd = dwc_alloc(sizeof(dwc_otg_pcd_t));
1096 pcd->lock = DWC_SPINLOCK_ALLOC();
1097 pcd->core_if = core_if;
1099 DWC_ERROR("Could not allocate lock for pcd");
1103 dev_if = core_if->dev_if;
1105 if (core_if->hwcfg4.b.ded_fifo_en) {
1106 DWC_DEBUGPL(DBG_CIL,"Dedicated Tx FIFOs mode\n");
1108 DWC_DEBUGPL(DBG_CIL,"Shared Tx FIFO mode\n");
1112 * Initialized the Core for Device mode.
1114 if (dwc_otg_is_device_mode(core_if)) {
1115 dwc_otg_core_dev_init(core_if);
1119 * Register the PCD Callbacks.
1121 dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1124 * Initialize the DMA buffer for SETUP packets
1126 if (GET_CORE_IF(pcd)->dma_enable) {
1128 dwc_dma_alloc(sizeof(*pcd->setup_pkt) * 5,
1129 &pcd->setup_pkt_dma_handle);
1130 if (pcd->setup_pkt == 0) {
1136 dwc_dma_alloc(sizeof(uint16_t),
1137 &pcd->status_buf_dma_handle);
1138 if (pcd->status_buf == 0) {
1139 dwc_dma_free(sizeof(*pcd->setup_pkt) * 5,
1140 pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1145 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1146 dev_if->setup_desc_addr[0] =
1147 dwc_otg_ep_alloc_desc_chain(&dev_if->
1148 dma_setup_desc_addr[0],
1150 dev_if->setup_desc_addr[1] =
1151 dwc_otg_ep_alloc_desc_chain(&dev_if->
1152 dma_setup_desc_addr[1],
1154 dev_if->in_desc_addr =
1155 dwc_otg_ep_alloc_desc_chain(&dev_if->
1156 dma_in_desc_addr, 1);
1157 dev_if->out_desc_addr =
1158 dwc_otg_ep_alloc_desc_chain(&dev_if->
1159 dma_out_desc_addr, 1);
1161 if (dev_if->setup_desc_addr[0] == 0
1162 || dev_if->setup_desc_addr[1] == 0
1163 || dev_if->in_desc_addr == 0
1164 || dev_if->out_desc_addr == 0) {
1166 if (dev_if->out_desc_addr)
1167 dwc_otg_ep_free_desc_chain(dev_if->
1172 if (dev_if->in_desc_addr)
1173 dwc_otg_ep_free_desc_chain(dev_if->
1178 if (dev_if->setup_desc_addr[1])
1179 dwc_otg_ep_free_desc_chain(dev_if->
1185 if (dev_if->setup_desc_addr[0])
1186 dwc_otg_ep_free_desc_chain(dev_if->
1193 dwc_dma_free(sizeof(*pcd->setup_pkt) * 5,
1195 pcd->setup_pkt_dma_handle);
1196 dwc_dma_free(sizeof(*pcd->status_buf),
1198 pcd->status_buf_dma_handle);
1206 pcd->setup_pkt = dwc_alloc(sizeof(*pcd->setup_pkt) * 5);
1207 if (pcd->setup_pkt == 0) {
1212 pcd->status_buf = dwc_alloc(sizeof(uint16_t));
1213 if (pcd->status_buf == 0) {
1214 dwc_free(pcd->setup_pkt);
1220 dwc_otg_pcd_reinit(pcd);
1222 /* Allocate the cfi object for the PCD */
1224 pcd->cfi = dwc_alloc(sizeof(cfiobject_t));
1225 if (NULL == pcd->cfi)
1227 if (init_cfi(pcd->cfi)) {
1228 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1234 /* Initialize tasklets */
1235 pcd->start_xfer_tasklet = DWC_TASK_ALLOC(start_xfer_tasklet_func, pcd);
1236 pcd->test_mode_tasklet = DWC_TASK_ALLOC(do_test_mode, pcd);
1237 /* Initialize timer */
1238 pcd->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1243 void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
1245 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1247 if (GET_CORE_IF(pcd)->dma_enable) {
1248 dwc_dma_free(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1249 pcd->setup_pkt_dma_handle);
1250 dwc_dma_free(sizeof(uint16_t), pcd->status_buf,
1251 pcd->status_buf_dma_handle);
1252 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1253 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1255 dma_setup_desc_addr[0], 1);
1256 dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1258 dma_setup_desc_addr[1], 1);
1259 dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1260 dev_if->dma_in_desc_addr, 1);
1261 dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1262 dev_if->dma_out_desc_addr,
1266 dwc_free(pcd->setup_pkt);
1267 dwc_free(pcd->status_buf);
1269 DWC_SPINLOCK_FREE(pcd->lock);
1270 DWC_TASK_FREE(pcd->start_xfer_tasklet);
1271 DWC_TASK_FREE(pcd->test_mode_tasklet);
1272 DWC_TIMER_FREE(pcd->srp_timer);
1274 /* Release the CFI object's dynamic memory */
1276 if (pcd->cfi->ops.release) {
1277 pcd->cfi->ops.release(pcd->cfi);
1284 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
1286 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1288 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1289 ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1290 (core_if->hwcfg2.b.fs_phy_type == 1) &&
1291 (core_if->core_params->ulpi_fs_ls))) {
1298 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
1300 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1301 gusbcfg_data_t usbcfg = {.d32 = 0 };
1303 usbcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->gusbcfg);
1304 if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap) {
1312 * This function assigns periodic Tx FIFO to an periodic EP
1313 * in shared Tx FIFO mode
1315 static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
1320 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1321 if ((TxMsk & core_if->tx_msk) == 0) {
1322 core_if->tx_msk |= TxMsk;
1331 * This function assigns periodic Tx FIFO to an periodic EP
1332 * in shared Tx FIFO mode
1334 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
1336 uint32_t PerTxMsk = 1;
1338 for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1339 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1340 core_if->p_tx_msk |= PerTxMsk;
1349 * This function releases periodic Tx FIFO
1350 * in shared Tx FIFO mode
1352 static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
1356 (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1360 * This function releases periodic Tx FIFO
1361 * in shared Tx FIFO mode
1363 static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
1366 (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1369 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
1370 const uint8_t * ep_desc, void *usb_ep)
1373 dwc_otg_pcd_ep_t *ep = 0;
1374 const usb_endpoint_descriptor_t *desc;
1375 unsigned long flags;
1378 desc = (const usb_endpoint_descriptor_t *)ep_desc;
1381 pcd->ep0.priv = usb_ep;
1383 retval = -DWC_E_INVALID;
1387 num = UE_GET_ADDR(desc->bEndpointAddress);
1388 dir = UE_GET_DIR(desc->bEndpointAddress);
1390 if (!desc->wMaxPacketSize) {
1391 DWC_WARN("bad maxpacketsize\n");
1392 retval = -DWC_E_INVALID;
1396 if (dir == UE_DIR_IN) {
1397 ep = &pcd->in_ep[num - 1];
1399 ep = &pcd->out_ep[num - 1];
1402 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1412 ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1413 ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1415 ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1417 if (ep->dwc_ep.is_in) {
1418 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1419 ep->dwc_ep.tx_fifo_num = 0;
1421 if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1423 * if ISOC EP then assign a Periodic Tx FIFO.
1425 ep->dwc_ep.tx_fifo_num =
1426 assign_perio_tx_fifo(GET_CORE_IF(pcd));
1430 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1432 ep->dwc_ep.tx_fifo_num =
1433 assign_tx_fifo(GET_CORE_IF(pcd));
1437 /* Set initial data PID. */
1438 if (ep->dwc_ep.type == UE_BULK) {
1439 ep->dwc_ep.data_pid_start = 0;
1442 /* Alloc DMA Descriptors */
1443 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1444 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1445 ep->dwc_ep.desc_addr =
1446 dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.
1449 if (!ep->dwc_ep.desc_addr) {
1450 DWC_WARN("%s, can't allocate DMA descriptor\n",
1452 retval = -DWC_E_SHUTDOWN;
1453 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1459 DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1460 (ep->dwc_ep.is_in ? "IN" : "OUT"),
1461 ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1463 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1466 if (pcd->cfi->ops.ep_enable) {
1467 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1471 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1477 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
1479 dwc_otg_pcd_ep_t *ep;
1480 unsigned long flags;
1481 dwc_otg_dma_desc_t *desc_addr;
1482 dwc_dma_t dma_desc_addr;
1484 ep = get_ep_from_handle(pcd, ep_handle);
1486 if (!ep || !ep->desc) {
1487 DWC_DEBUGPL(DBG_PCD, "%s, %d %s not enabled\n", __func__,
1488 ep->dwc_ep.num, ep->dwc_ep.is_in ? "IN" : "OUT");
1489 return -DWC_E_INVALID;
1492 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1494 dwc_otg_request_nuke(ep);
1496 dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1500 if (ep->dwc_ep.is_in) {
1501 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1502 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1503 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1506 /* Free DMA Descriptors */
1507 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1508 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1509 desc_addr = ep->dwc_ep.desc_addr;
1510 dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1512 /* Cannot call dma_free_coherent() with IRQs disabled */
1513 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1514 dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1521 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1524 DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1525 ep->dwc_ep.is_in ? "IN" : "OUT");
1530 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
1531 uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
1532 int zero, void *req_handle, int atomic_alloc)
1535 unsigned long flags;
1536 dwc_otg_pcd_request_t *req;
1537 dwc_otg_pcd_ep_t *ep;
1538 uint32_t max_transfer;
1540 ep = get_ep_from_handle(pcd, ep_handle);
1541 if ((!ep->desc && ep->dwc_ep.num != 0)) {
1542 DWC_WARN("bad ep %p num :%d\n", ep->desc, ep->dwc_ep.num);
1543 return -DWC_E_INVALID;
1547 req = dwc_alloc_atomic(sizeof(*req));
1549 req = dwc_alloc(sizeof(*req));
1553 return -DWC_E_NO_MEMORY;
1555 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
1556 if (!GET_CORE_IF(pcd)->core_params->opt) {
1557 if (ep->dwc_ep.num != 0) {
1558 DWC_ERROR("queue req %p, len %d buf %p\n",
1559 req_handle, buflen, buf);
1564 //dump_log(buf, buflen, ep->dwc_ep.is_in);
1565 req->dma = dma_map_single(NULL,
1568 ep->dwc_ep.is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1574 //req->dma = dma_buf;
1575 req->length = buflen;
1576 req->sent_zlp = zero;
1577 req->priv = req_handle;
1579 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1582 * For EP0 IN without premature status, zlp is required?
1584 if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
1585 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
1589 /* Start the transfer */
1590 if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
1592 if (ep->dwc_ep.num == 0) {
1593 switch (pcd->ep0state) {
1594 case EP0_IN_DATA_PHASE:
1595 DWC_DEBUGPL(DBG_PCD,
1596 "%s ep0: EP0_IN_DATA_PHASE\n",
1600 case EP0_OUT_DATA_PHASE:
1601 DWC_DEBUGPL(DBG_PCD,
1602 "%s ep0: EP0_OUT_DATA_PHASE\n",
1604 if (pcd->request_config) {
1605 /* Complete STATUS PHASE */
1606 ep->dwc_ep.is_in = 1;
1607 pcd->ep0state = EP0_IN_STATUS_PHASE;
1611 case EP0_IN_STATUS_PHASE:
1612 DWC_DEBUGPL(DBG_PCD,
1613 "%s ep0: EP0_IN_STATUS_PHASE\n",
1618 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
1620 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1622 return -DWC_E_SHUTDOWN;
1625 ep->dwc_ep.dma_addr = dma_buf;
1626 ep->dwc_ep.start_xfer_buff = buf;
1627 ep->dwc_ep.xfer_buff = buf;
1628 ep->dwc_ep.xfer_len = buflen;
1629 ep->dwc_ep.xfer_count = 0;
1630 ep->dwc_ep.sent_zlp = 0;
1631 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
1634 if ((ep->dwc_ep.xfer_len %
1635 ep->dwc_ep.maxpacket == 0)
1636 && (ep->dwc_ep.xfer_len != 0)) {
1637 ep->dwc_ep.sent_zlp = 1;
1642 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
1644 } // non-ep0 endpoints
1647 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
1648 /* store the request length */
1649 ep->dwc_ep.cfi_req_len = buflen;
1650 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
1655 GET_CORE_IF(ep->pcd)->core_params->
1658 /* Setup and start the Transfer */
1659 ep->dwc_ep.dma_addr = dma_buf;
1660 ep->dwc_ep.start_xfer_buff = buf;
1661 ep->dwc_ep.xfer_buff = buf;
1662 ep->dwc_ep.xfer_len = 0;
1663 ep->dwc_ep.xfer_count = 0;
1664 ep->dwc_ep.sent_zlp = 0;
1665 ep->dwc_ep.total_len = buflen;
1667 ep->dwc_ep.maxxfer = max_transfer;
1668 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1669 uint32_t out_max_xfer =
1670 DDMA_MAX_TRANSFER_SIZE -
1671 (DDMA_MAX_TRANSFER_SIZE % 4);
1672 if (ep->dwc_ep.is_in) {
1673 if (ep->dwc_ep.maxxfer >
1674 DDMA_MAX_TRANSFER_SIZE) {
1675 ep->dwc_ep.maxxfer =
1676 DDMA_MAX_TRANSFER_SIZE;
1679 if (ep->dwc_ep.maxxfer >
1681 ep->dwc_ep.maxxfer =
1686 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
1687 ep->dwc_ep.maxxfer -=
1688 (ep->dwc_ep.maxxfer %
1689 ep->dwc_ep.maxpacket);
1693 if ((ep->dwc_ep.total_len %
1694 ep->dwc_ep.maxpacket == 0)
1695 && (ep->dwc_ep.total_len != 0)) {
1696 ep->dwc_ep.sent_zlp = 1;
1702 dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
1707 ++pcd->request_pending;
1708 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
1709 if (ep->dwc_ep.is_in && ep->stopped
1710 && !(GET_CORE_IF(pcd)->dma_enable)) {
1711 /** @todo NGS Create a function for this. */
1712 diepmsk_data_t diepmsk = {.d32 = 0 };
1713 diepmsk.b.intktxfemp = 1;
1714 if (GET_CORE_IF(pcd)->multiproc_int_enable) {
1715 dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->
1717 diepeachintmsk[ep->dwc_ep.num],
1720 dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->
1721 dev_global_regs->diepmsk, 0,
1727 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1731 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
1734 unsigned long flags;
1735 dwc_otg_pcd_request_t *req;
1736 dwc_otg_pcd_ep_t *ep;
1738 ep = get_ep_from_handle(pcd, ep_handle);
1739 if (!ep->desc && ep->dwc_ep.num != 0) {
1740 DWC_WARN("bad argument\n");
1741 return -DWC_E_INVALID;
1744 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1746 /* make sure it's actually queued on this endpoint */
1747 DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
1748 if (req->priv == (void *)req_handle) {
1753 if (req->priv != (void *)req_handle) {
1754 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1755 return -DWC_E_INVALID;
1758 if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
1759 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
1764 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1766 return req ? 0 : -DWC_E_SHUTDOWN;
1770 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
1772 dwc_otg_pcd_ep_t *ep;
1773 unsigned long flags;
1776 ep = get_ep_from_handle(pcd, ep_handle);
1778 if ((!ep->desc && ep != &pcd->ep0) ||
1779 (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
1780 DWC_WARN("%s, bad ep\n", __func__);
1781 return -DWC_E_INVALID;
1784 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1785 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1786 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
1787 ep->dwc_ep.is_in ? "IN" : "OUT");
1788 retval = -DWC_E_AGAIN;
1789 } else if (value == 0) {
1790 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1791 } else if (value == 1) {
1792 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
1793 dtxfsts_data_t txstatus;
1794 fifosize_data_t txfifosize;
1797 dwc_read_reg32(&GET_CORE_IF(pcd)->core_global_regs->
1798 dptxfsiz_dieptxf[ep->dwc_ep.
1801 dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->
1802 in_ep_regs[ep->dwc_ep.num]->dtxfsts);
1804 if (txstatus.b.txfspcavail < txfifosize.b.depth) {
1805 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
1806 retval = -DWC_E_AGAIN;
1808 if (ep->dwc_ep.num == 0) {
1809 pcd->ep0state = EP0_STALL;
1813 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
1817 if (ep->dwc_ep.num == 0) {
1818 pcd->ep0state = EP0_STALL;
1822 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
1824 } else if (value == 2) {
1825 ep->dwc_ep.stall_clear_flag = 0;
1826 } else if (value == 3) {
1827 ep->dwc_ep.stall_clear_flag = 1;
1830 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1836 * This function initiates remote wakeup of the host from suspend state.
1838 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
1840 dctl_data_t dctl = { 0 };
1841 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1844 dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts);
1845 if (!dsts.b.suspsts) {
1846 DWC_WARN("Remote wakeup while is not in suspend state\n");
1848 /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
1849 if (pcd->remote_wakeup_enable) {
1851 dctl.b.rmtwkupsig = 1;
1852 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
1854 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
1856 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
1858 DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
1861 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
1865 #ifdef CONFIG_USB_DWC_OTG_LPM
1867 * This function initiates remote wakeup of the host from L1 sleep state.
1869 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
1871 glpmcfg_data_t lpmcfg;
1872 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1874 lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
1876 /* Check if we are in L1 state */
1877 if (!lpmcfg.b.prt_sleep_sts) {
1878 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
1882 /* Check if host allows remote wakeup */
1883 if (!lpmcfg.b.rem_wkup_en) {
1884 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
1888 /* Check if Resume OK */
1889 if (!lpmcfg.b.sleep_state_resumeok) {
1890 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
1894 lpmcfg.d32 = dwc_read_reg32(&core_if->core_global_regs->glpmcfg);
1895 lpmcfg.b.en_utmi_sleep = 0;
1896 lpmcfg.b.hird_thres &= (~(1 << 4));
1897 dwc_write_reg32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
1900 dctl_data_t dctl = {.d32 = 0 };
1901 dctl.b.rmtwkupsig = 1;
1902 /* Set RmtWkUpSig bit to start remote wakup signaling.
1903 * Hardware will automatically clear this bit.
1905 dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl,
1907 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
1914 * Performs remote wakeup.
1916 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
1918 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1919 if (dwc_otg_is_device_mode(core_if)) {
1920 #ifdef CONFIG_USB_DWC_OTG_LPM
1921 if (core_if->lx_state == DWC_OTG_L1) {
1922 dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
1925 dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
1926 #ifdef CONFIG_USB_DWC_OTG_LPM
1933 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
1936 gotgctl_data_t gotgctl;
1937 unsigned long flags;
1939 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1942 * This function starts the Protocol if no session is in progress. If
1943 * a session is already in progress, but the device is suspended,
1944 * remote wakeup signaling is started.
1947 /* Check if valid session */
1949 dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
1950 if (gotgctl.b.bsesvld) {
1951 /* Check if suspend state */
1954 (GET_CORE_IF(pcd)->dev_if->dev_global_regs->
1956 if (dsts.b.suspsts) {
1957 dwc_otg_pcd_remote_wakeup(pcd, 1);
1960 dwc_otg_pcd_initiate_srp(pcd);
1963 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1969 * Start the SRP timer to detect when the SRP does not complete within
1972 * @param pcd the pcd structure.
1974 void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t * pcd)
1976 GET_CORE_IF(pcd)->srp_timer_started = 1;
1977 DWC_TIMER_SCHEDULE(pcd->srp_timer, 6000 /* 6 secs */ );
1980 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
1983 (uint32_t *) & (GET_CORE_IF(pcd)->core_global_regs->gotgctl);
1987 val.d32 = dwc_read_reg32(addr);
1989 DWC_ERROR("Session Request Already active!\n");
1993 DWC_INFO("Session Request Initated\n"); //NOTICE
1994 mem.d32 = dwc_read_reg32(addr);
1996 dwc_write_reg32(addr, mem.d32);
1998 /* Start the SRP timer */
1999 dwc_otg_pcd_start_srp_timer(pcd);
2003 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
2005 return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2008 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
2010 return GET_CORE_IF(pcd)->core_params->lpm_enable;
2013 uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
2015 return pcd->b_hnp_enable;
2018 uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
2020 return pcd->a_hnp_support;
2023 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
2025 return pcd->a_alt_hnp_support;
2028 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
2030 return pcd->remote_wakeup_enable;
2033 #endif /* DWC_HOST_ONLY */