1 /* ==========================================================================
2 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
7 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9 * otherwise expressly agreed to in writing between Synopsys and you.
11 * The Software IS NOT an item of Licensed Software or Licensed Product under
12 * any End User Software License Agreement or Agreement for Licensed Product
13 * with Synopsys or any supplement thereto. You are permitted to use and
14 * redistribute this Software in source and binary forms, with or without
15 * modification, provided that redistributions of source code must retain this
16 * notice. You may not view, use, disclose, copy or distribute this file or
17 * any information contained herein except pursuant to this license grant from
18 * Synopsys. If you do not agree with this notice, including the disclaimer
19 * below, then you are not authorized to use the Software.
21 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
32 * ========================================================================== */
36 * This file implements PCD Core. All code in this file is portable and doesn't
37 * use any OS specific functions.
38 * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39 * header file, which can be used to implement OS specific PCD interface.
41 * An important function of the PCD is managing interrupts generated
42 * by the DWC_otg controller. The implementation of the DWC_otg device
43 * mode interrupt service routines is in dwc_otg_pcd_intr.c.
45 * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46 * @todo Does it work when the request size is greater than DEPTSIZ
51 #include "dwc_otg_pcd.h"
54 #include "dwc_otg_cfi.h"
56 extern int init_cfi(cfiobject_t * cfiobj);
60 * Choose endpoint from ep arrays using usb_ep structure.
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
65 if (pcd->ep0.priv == handle) {
68 for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69 if (pcd->in_ep[i].priv == handle)
70 return &pcd->in_ep[i];
71 if (pcd->out_ep[i].priv == handle)
72 return &pcd->out_ep[i];
79 * This function completes a request. It call's the request call back.
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
84 unsigned stopped = ep->stopped;
86 DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
89 /* don't modify queue heads during completion callback */
91 /* spin_unlock/spin_lock now done in fops->complete() */
92 ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
95 if (ep->pcd->request_pending > 0) {
96 --ep->pcd->request_pending;
99 ep->stopped = stopped;
104 * This function terminates all the requsts in the EP request queue.
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
108 dwc_otg_pcd_request_t *req;
112 /* called with irqs blocked?? */
113 while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
119 void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
120 const struct dwc_otg_pcd_function_ops *fops)
126 * PCD Callback function for initializing the PCD when switching to
129 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
131 static int32_t dwc_otg_pcd_start_cb(void *p)
133 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
137 * Initialized the Core for Device mode.
139 if (dwc_otg_is_device_mode(core_if)) {
140 dwc_otg_core_dev_init(core_if);
141 /* Set core_if's lock pointer to the pcd->lock */
142 core_if->lock = pcd->lock;
147 /** CFI-specific buffer allocation function for EP */
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
150 size_t buflen, int flags)
152 dwc_otg_pcd_ep_t *ep;
153 ep = get_ep_from_handle(pcd, pep);
155 DWC_WARN("bad ep\n");
156 return -DWC_E_INVALID;
159 return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
164 size_t buflen, int flags);
168 * PCD Callback function for notifying the PCD when resuming from
171 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
175 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
177 if (pcd->fops->resume) {
178 pcd->fops->resume(pcd);
181 /* Stop the SRP timeout timer. */
182 if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183 || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184 if (GET_CORE_IF(pcd)->srp_timer_started) {
185 GET_CORE_IF(pcd)->srp_timer_started = 0;
186 DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
193 * PCD Callback function for notifying the PCD device is suspended.
195 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
199 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
201 if (pcd->fops->suspend) {
202 DWC_SPINUNLOCK(pcd->lock);
203 pcd->fops->suspend(pcd);
204 DWC_SPINLOCK(pcd->lock);
211 * PCD Callback function for stopping the PCD when switching to Host
214 * @param p void pointer to the <code>dwc_otg_pcd_t</code>
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
218 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219 extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
221 dwc_otg_pcd_stop(pcd);
226 * PCD Callback structure for handling mode switching.
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229 .start = dwc_otg_pcd_start_cb,
230 .stop = dwc_otg_pcd_stop_cb,
231 .suspend = dwc_otg_pcd_suspend_cb,
232 .resume_wakeup = dwc_otg_pcd_resume_cb,
233 .p = 0, /* Set at registration */
237 * This function allocates a DMA Descriptor chain for the Endpoint
238 * buffer to be used for a transfer to/from the specified endpoint.
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(struct device *dev,
241 dwc_dma_t * dma_desc_addr,
244 return DWC_DMA_ALLOC_ATOMIC(dev, count * sizeof(dwc_otg_dev_dma_desc_t),
249 * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
251 void dwc_otg_ep_free_desc_chain(struct device *dev,
252 dwc_otg_dev_dma_desc_t * desc_addr,
253 uint32_t dma_desc_addr, uint32_t count)
255 DWC_DMA_FREE(dev, count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
262 * This function initializes a descriptor chain for Isochronous transfer
264 * @param core_if Programming view of DWC_otg controller.
265 * @param dwc_ep The EP to start the transfer on.
268 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
272 dsts_data_t dsts = {.d32 = 0 };
273 depctl_data_t depctl = {.d32 = 0 };
274 volatile uint32_t *addr;
279 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
282 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
285 /** Allocate descriptors for double buffering */
286 dwc_ep->iso_desc_addr =
287 dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
288 dwc_ep->desc_cnt * 2);
289 if (dwc_ep->desc_addr) {
290 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
294 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
297 if (dwc_ep->is_in == 0) {
298 dev_dma_desc_sts_t sts = {.d32 = 0 };
299 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
301 uint32_t data_per_desc;
302 dwc_otg_dev_out_ep_regs_t *out_regs =
303 core_if->dev_if->out_ep_regs[dwc_ep->num];
306 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
307 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
309 /** Buffer 0 descriptors setup */
310 dma_ad = dwc_ep->dma_addr0;
312 sts.b_iso_out.bs = BS_HOST_READY;
313 sts.b_iso_out.rxsts = 0;
315 sts.b_iso_out.sp = 0;
316 sts.b_iso_out.ioc = 0;
317 sts.b_iso_out.pid = 0;
318 sts.b_iso_out.framenum = 0;
321 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
322 i += dwc_ep->pkt_per_frm) {
324 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
325 uint32_t len = (j + 1) * dwc_ep->maxpacket;
326 if (len > dwc_ep->data_per_frame)
328 dwc_ep->data_per_frame -
329 j * dwc_ep->maxpacket;
331 data_per_desc = dwc_ep->maxpacket;
332 len = data_per_desc % 4;
334 data_per_desc += 4 - len;
336 sts.b_iso_out.rxbytes = data_per_desc;
337 dma_desc->buf = dma_ad;
338 dma_desc->status.d32 = sts.d32;
340 offset += data_per_desc;
342 dma_ad += data_per_desc;
346 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
347 uint32_t len = (j + 1) * dwc_ep->maxpacket;
348 if (len > dwc_ep->data_per_frame)
350 dwc_ep->data_per_frame -
351 j * dwc_ep->maxpacket;
353 data_per_desc = dwc_ep->maxpacket;
354 len = data_per_desc % 4;
356 data_per_desc += 4 - len;
357 sts.b_iso_out.rxbytes = data_per_desc;
358 dma_desc->buf = dma_ad;
359 dma_desc->status.d32 = sts.d32;
361 offset += data_per_desc;
363 dma_ad += data_per_desc;
366 sts.b_iso_out.ioc = 1;
367 len = (j + 1) * dwc_ep->maxpacket;
368 if (len > dwc_ep->data_per_frame)
370 dwc_ep->data_per_frame - j * dwc_ep->maxpacket;
372 data_per_desc = dwc_ep->maxpacket;
373 len = data_per_desc % 4;
375 data_per_desc += 4 - len;
376 sts.b_iso_out.rxbytes = data_per_desc;
378 dma_desc->buf = dma_ad;
379 dma_desc->status.d32 = sts.d32;
382 /** Buffer 1 descriptors setup */
383 sts.b_iso_out.ioc = 0;
384 dma_ad = dwc_ep->dma_addr1;
387 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
388 i += dwc_ep->pkt_per_frm) {
389 for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
390 uint32_t len = (j + 1) * dwc_ep->maxpacket;
391 if (len > dwc_ep->data_per_frame)
393 dwc_ep->data_per_frame -
394 j * dwc_ep->maxpacket;
396 data_per_desc = dwc_ep->maxpacket;
397 len = data_per_desc % 4;
399 data_per_desc += 4 - len;
402 sts.b_iso_out.rxbytes = data_per_desc;
403 dma_desc->buf = dma_ad;
404 dma_desc->status.d32 = sts.d32;
406 offset += data_per_desc;
408 dma_ad += data_per_desc;
411 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
413 ((j + 1) * dwc_ep->maxpacket >
414 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
415 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
417 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
418 sts.b_iso_out.rxbytes = data_per_desc;
419 dma_desc->buf = dma_ad;
420 dma_desc->status.d32 = sts.d32;
422 offset += data_per_desc;
424 dma_ad += data_per_desc;
427 sts.b_iso_out.ioc = 1;
430 ((j + 1) * dwc_ep->maxpacket >
431 dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
432 j * dwc_ep->maxpacket : dwc_ep->maxpacket;
434 (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
435 sts.b_iso_out.rxbytes = data_per_desc;
437 dma_desc->buf = dma_ad;
438 dma_desc->status.d32 = sts.d32;
440 dwc_ep->next_frame = 0;
442 /** Write dma_ad into DOEPDMA register */
443 DWC_WRITE_REG32(&(out_regs->doepdma),
444 (uint32_t) dwc_ep->iso_dma_desc_addr);
449 dev_dma_desc_sts_t sts = {.d32 = 0 };
450 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
452 dwc_otg_dev_in_ep_regs_t *in_regs =
453 core_if->dev_if->in_ep_regs[dwc_ep->num];
454 unsigned int frmnumber;
455 fifosize_data_t txfifosize, rxfifosize;
458 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->
461 DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
463 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
465 dma_ad = dwc_ep->dma_addr0;
468 DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
470 sts.b_iso_in.bs = BS_HOST_READY;
471 sts.b_iso_in.txsts = 0;
473 (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
474 sts.b_iso_in.ioc = 0;
475 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
477 frmnumber = dwc_ep->next_frame;
479 sts.b_iso_in.framenum = frmnumber;
480 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
483 /** Buffer 0 descriptors setup */
484 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
485 dma_desc->buf = dma_ad;
486 dma_desc->status.d32 = sts.d32;
489 dma_ad += dwc_ep->data_per_frame;
490 sts.b_iso_in.framenum += dwc_ep->bInterval;
493 sts.b_iso_in.ioc = 1;
494 dma_desc->buf = dma_ad;
495 dma_desc->status.d32 = sts.d32;
498 /** Buffer 1 descriptors setup */
499 sts.b_iso_in.ioc = 0;
500 dma_ad = dwc_ep->dma_addr1;
502 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
503 i += dwc_ep->pkt_per_frm) {
504 dma_desc->buf = dma_ad;
505 dma_desc->status.d32 = sts.d32;
508 dma_ad += dwc_ep->data_per_frame;
509 sts.b_iso_in.framenum += dwc_ep->bInterval;
511 sts.b_iso_in.ioc = 0;
513 sts.b_iso_in.ioc = 1;
516 dma_desc->buf = dma_ad;
517 dma_desc->status.d32 = sts.d32;
519 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
521 /** Write dma_ad into diepdma register */
522 DWC_WRITE_REG32(&(in_regs->diepdma),
523 (uint32_t) dwc_ep->iso_dma_desc_addr);
525 /** Enable endpoint, clear nak */
528 depctl.b.usbactep = 1;
531 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
532 depctl.d32 = DWC_READ_REG32(addr);
536 * This function initializes a descriptor chain for Isochronous transfer
538 * @param core_if Programming view of DWC_otg controller.
539 * @param ep The EP to start the transfer on.
542 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
545 depctl_data_t depctl = {.d32 = 0 };
546 volatile uint32_t *addr;
549 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
551 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
554 if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
557 deptsiz_data_t deptsiz = {.d32 = 0 };
560 ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
562 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
565 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
567 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
570 /* Program the transfer size and packet count
571 * as follows: xfersize = N * maxpacket +
572 * short_packet pktcnt = N + (short_packet
575 deptsiz.b.mc = ep->pkt_per_frm;
576 deptsiz.b.xfersize = ep->xfer_len;
578 (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
579 DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
580 dieptsiz, deptsiz.d32);
582 /* Write the DMA register */
584 (core_if->dev_if->in_ep_regs[ep->num]->
585 diepdma), (uint32_t) ep->dma_addr);
589 (ep->xfer_len + (ep->maxpacket - 1)) /
591 deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
593 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
594 doeptsiz, deptsiz.d32);
596 /* Write the DMA register */
598 (core_if->dev_if->out_ep_regs[ep->num]->
599 doepdma), (uint32_t) ep->dma_addr);
602 /** Enable endpoint, clear nak */
607 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
612 * This function does the setup for a data transfer for an EP and
613 * starts the transfer. For an IN transfer, the packets will be
614 * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
615 * the packets are unloaded from the Rx FIFO in the ISR.
617 * @param core_if Programming view of DWC_otg controller.
618 * @param ep The EP to start the transfer on.
621 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
624 if (core_if->dma_enable) {
625 if (core_if->dma_desc_enable) {
627 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
629 ep->desc_cnt = ep->pkt_cnt;
631 dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
633 if (core_if->pti_enh_enable) {
634 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
637 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
639 ep->cur_pkt_dma_addr =
640 (ep->proc_buf_num) ? ep->dma_addr1 : ep->
642 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
647 (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
648 ep->cur_pkt_dma_addr =
649 (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
650 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
655 * This function stops transfer for an EP and
656 * resets the ep's variables.
658 * @param core_if Programming view of DWC_otg controller.
659 * @param ep The EP to start the transfer on.
662 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
664 depctl_data_t depctl = {.d32 = 0 };
665 volatile uint32_t *addr;
667 if (ep->is_in == 1) {
668 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
670 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
674 depctl.d32 = DWC_READ_REG32(addr);
679 DWC_WRITE_REG32(addr, depctl.d32);
681 if (core_if->dma_desc_enable &&
682 ep->iso_desc_addr && ep->iso_dma_desc_addr) {
683 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
684 ep->iso_dma_desc_addr,
688 /* reset varibales */
693 ep->data_per_frame = 0;
694 ep->data_pattern_frame = 0;
696 ep->buf_proc_intrvl = 0;
698 ep->proc_buf_num = 0;
702 ep->iso_desc_addr = 0;
703 ep->iso_dma_desc_addr = 0;
706 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
707 uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
708 dwc_dma_t dma1, int sync_frame, int dp_frame,
709 int data_per_frame, int start_frame,
710 int buf_proc_intrvl, void *req_handle,
713 dwc_otg_pcd_ep_t *ep;
714 dwc_irqflags_t flags = 0;
718 dwc_otg_core_if_t *core_if;
720 ep = get_ep_from_handle(pcd, ep_handle);
722 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
723 DWC_WARN("bad ep\n");
724 return -DWC_E_INVALID;
727 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
728 core_if = GET_CORE_IF(pcd);
729 dwc_ep = &ep->dwc_ep;
731 if (ep->iso_req_handle) {
732 DWC_WARN("ISO request in progress\n");
735 dwc_ep->dma_addr0 = dma0;
736 dwc_ep->dma_addr1 = dma1;
738 dwc_ep->xfer_buff0 = buf0;
739 dwc_ep->xfer_buff1 = buf1;
741 dwc_ep->data_per_frame = data_per_frame;
743 /** @todo - pattern data support is to be implemented in the future */
744 dwc_ep->data_pattern_frame = dp_frame;
745 dwc_ep->sync_frame = sync_frame;
747 dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
749 dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
751 dwc_ep->proc_buf_num = 0;
753 dwc_ep->pkt_per_frm = 0;
754 frm_data = ep->dwc_ep.data_per_frame;
755 while (frm_data > 0) {
756 dwc_ep->pkt_per_frm++;
757 frm_data -= ep->dwc_ep.maxpacket;
760 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
762 if (start_frame == -1) {
763 dwc_ep->next_frame = dsts.b.soffn + 1;
764 if (dwc_ep->bInterval != 1) {
766 dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
771 dwc_ep->next_frame = start_frame;
774 if (!core_if->pti_enh_enable) {
776 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
780 (dwc_ep->data_per_frame *
781 (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
782 - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
785 if (core_if->dma_desc_enable) {
787 dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
793 DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
796 DWC_ALLOC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
798 if (!dwc_ep->pkt_info) {
799 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
800 return -DWC_E_NO_MEMORY;
802 if (core_if->pti_enh_enable) {
803 dwc_memset(dwc_ep->pkt_info, 0,
804 sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
808 ep->iso_req_handle = req_handle;
810 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
811 dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
815 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
818 dwc_irqflags_t flags = 0;
819 dwc_otg_pcd_ep_t *ep;
822 ep = get_ep_from_handle(pcd, ep_handle);
823 if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
824 DWC_WARN("bad ep\n");
825 return -DWC_E_INVALID;
827 dwc_ep = &ep->dwc_ep;
829 dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
831 DWC_FREE(dwc_ep->pkt_info);
832 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
833 if (ep->iso_req_handle != req_handle) {
834 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
835 return -DWC_E_INVALID;
838 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
840 ep->iso_req_handle = 0;
845 * This function is used for perodical data exchnage between PCD and gadget drivers.
846 * for Isochronous EPs
848 * - Every time a sync period completes this function is called to
849 * perform data exchange between PCD and gadget
851 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
857 dwc_ep = &ep->dwc_ep;
859 DWC_SPINUNLOCK(ep->pcd->lock);
860 pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
861 dwc_ep->proc_buf_num ^ 0x1);
862 DWC_SPINLOCK(ep->pcd->lock);
864 for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
865 dwc_ep->pkt_info[i].status = 0;
866 dwc_ep->pkt_info[i].offset = 0;
867 dwc_ep->pkt_info[i].length = 0;
871 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
872 void *iso_req_handle)
874 dwc_otg_pcd_ep_t *ep;
877 ep = get_ep_from_handle(pcd, ep_handle);
878 if (!ep->desc || ep->dwc_ep.num == 0) {
879 DWC_WARN("bad ep\n");
880 return -DWC_E_INVALID;
882 dwc_ep = &ep->dwc_ep;
884 return dwc_ep->pkt_cnt;
887 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
888 void *iso_req_handle, int packet,
889 int *status, int *actual, int *offset)
891 dwc_otg_pcd_ep_t *ep;
894 ep = get_ep_from_handle(pcd, ep_handle);
896 DWC_WARN("bad ep\n");
898 dwc_ep = &ep->dwc_ep;
900 *status = dwc_ep->pkt_info[packet].status;
901 *actual = dwc_ep->pkt_info[packet].length;
902 *offset = dwc_ep->pkt_info[packet].offset;
905 #endif /* DWC_EN_ISOC */
907 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
908 uint32_t is_in, uint32_t ep_num)
910 /* Init EP structure */
914 pcd_ep->queue_sof = 0;
916 /* Init DWC ep structure */
917 pcd_ep->dwc_ep.is_in = is_in;
918 pcd_ep->dwc_ep.num = ep_num;
919 pcd_ep->dwc_ep.active = 0;
920 pcd_ep->dwc_ep.tx_fifo_num = 0;
921 /* Control until ep is actvated */
922 pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
923 pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
924 pcd_ep->dwc_ep.dma_addr = 0;
925 pcd_ep->dwc_ep.start_xfer_buff = 0;
926 pcd_ep->dwc_ep.xfer_buff = 0;
927 pcd_ep->dwc_ep.xfer_len = 0;
928 pcd_ep->dwc_ep.xfer_count = 0;
929 pcd_ep->dwc_ep.sent_zlp = 0;
930 pcd_ep->dwc_ep.total_len = 0;
931 pcd_ep->dwc_ep.desc_addr = 0;
932 pcd_ep->dwc_ep.dma_desc_addr = 0;
933 DWC_CIRCLEQ_INIT(&pcd_ep->queue);
939 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
943 dwc_otg_pcd_ep_t *ep;
944 int in_ep_cntr, out_ep_cntr;
945 uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
946 uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
949 * Initialize the EP0 structure.
952 dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
955 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
956 for (i = 1; in_ep_cntr < num_in_eps; i++) {
957 if ((hwcfg1 & 0x1) == 0) {
958 dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
961 * @todo NGS: Add direction to EP, based on contents
962 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
965 dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
967 DWC_CIRCLEQ_INIT(&ep->queue);
973 hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
974 for (i = 1; out_ep_cntr < num_out_eps; i++) {
975 if ((hwcfg1 & 0x1) == 0) {
976 dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
979 * @todo NGS: Add direction to EP, based on contents
980 * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
983 dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
984 DWC_CIRCLEQ_INIT(&ep->queue);
989 pcd->ep0state = EP0_DISCONNECT;
990 pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
991 pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
995 * This function is called when the SRP timer expires. The SRP should
996 * complete within 6 seconds.
998 static void srp_timeout(void *ptr)
1000 gotgctl_data_t gotgctl;
1001 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1002 volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1004 gotgctl.d32 = DWC_READ_REG32(addr);
1006 core_if->srp_timer_started = 0;
1008 if (core_if->adp_enable) {
1009 if (gotgctl.b.bsesvld == 0) {
1010 gpwrdn_data_t gpwrdn = {.d32 = 0 };
1011 DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1012 /* Power off the core */
1013 if (core_if->power_down == 2) {
1014 gpwrdn.b.pwrdnswtch = 1;
1015 DWC_MODIFY_REG32(&core_if->
1016 core_global_regs->gpwrdn,
1021 gpwrdn.b.pmuintsel = 1;
1022 gpwrdn.b.pmuactv = 1;
1023 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1025 dwc_otg_adp_probe_start(core_if);
1027 DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1028 core_if->op_state = B_PERIPHERAL;
1029 dwc_otg_core_init(core_if);
1030 dwc_otg_enable_global_interrupts(core_if);
1031 cil_pcd_start(core_if);
1035 if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1036 (core_if->core_params->i2c_enable)) {
1037 DWC_PRINTF("SRP Timeout\n");
1039 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1040 if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1041 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
1044 /* Clear Session Request */
1046 gotgctl.b.sesreq = 1;
1047 DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1050 core_if->srp_success = 0;
1052 __DWC_ERROR("Device not connected/responding\n");
1053 gotgctl.b.sesreq = 0;
1054 DWC_WRITE_REG32(addr, gotgctl.d32);
1056 } else if (gotgctl.b.sesreq) {
1057 DWC_PRINTF("SRP Timeout\n");
1059 __DWC_ERROR("Device not connected/responding\n");
1060 gotgctl.b.sesreq = 0;
1061 DWC_WRITE_REG32(addr, gotgctl.d32);
1063 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1071 extern void start_next_request(dwc_otg_pcd_ep_t * ep);
1073 static void start_xfer_tasklet_func(void *data)
1075 dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1076 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1079 depctl_data_t diepctl;
1081 DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1083 diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1085 if (pcd->ep0.queue_sof) {
1086 pcd->ep0.queue_sof = 0;
1087 start_next_request(&pcd->ep0);
1091 for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1092 depctl_data_t diepctl;
1094 DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1096 if (pcd->in_ep[i].queue_sof) {
1097 pcd->in_ep[i].queue_sof = 0;
1098 start_next_request(&pcd->in_ep[i]);
1107 * This function initialized the PCD portion of the driver.
1110 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_device_t *otg_dev)
1112 struct device *dev = &otg_dev->os_dep.platformdev->dev;
1113 dwc_otg_core_if_t *core_if = otg_dev->core_if;
1114 dwc_otg_pcd_t *pcd = NULL;
1115 dwc_otg_dev_if_t *dev_if;
1119 * Allocate PCD structure
1121 pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1127 #if (defined(DWC_LINUX) && defined(CONFIG_DEBUG_SPINLOCK))
1128 DWC_SPINLOCK_ALLOC_LINUX_DEBUG(pcd->lock);
1130 pcd->lock = DWC_SPINLOCK_ALLOC();
1132 DWC_DEBUGPL(DBG_HCDV, "Init of PCD %p given core_if %p\n",
1133 pcd, core_if);//GRAYG
1135 DWC_ERROR("Could not allocate lock for pcd");
1139 /* Set core_if's lock pointer to hcd->lock */
1140 core_if->lock = pcd->lock;
1141 pcd->core_if = core_if;
1143 dev_if = core_if->dev_if;
1144 dev_if->isoc_ep = NULL;
1146 if (core_if->hwcfg4.b.ded_fifo_en) {
1147 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1149 DWC_PRINTF("Shared Tx FIFO mode\n");
1153 * Initialized the Core for Device mode here if there is nod ADP support.
1154 * Otherwise it will be done later in dwc_otg_adp_start routine.
1156 if (dwc_otg_is_device_mode(core_if) /*&& !core_if->adp_enable*/) {
1157 dwc_otg_core_dev_init(core_if);
1161 * Register the PCD Callbacks.
1163 dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1166 * Initialize the DMA buffer for SETUP packets
1168 if (GET_CORE_IF(pcd)->dma_enable) {
1170 DWC_DMA_ALLOC(dev, sizeof(*pcd->setup_pkt) * 5,
1171 &pcd->setup_pkt_dma_handle);
1172 if (pcd->setup_pkt == NULL) {
1178 DWC_DMA_ALLOC(dev, sizeof(uint16_t),
1179 &pcd->status_buf_dma_handle);
1180 if (pcd->status_buf == NULL) {
1181 DWC_DMA_FREE(dev, sizeof(*pcd->setup_pkt) * 5,
1182 pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1187 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1188 dev_if->setup_desc_addr[0] =
1189 dwc_otg_ep_alloc_desc_chain(dev,
1190 &dev_if->dma_setup_desc_addr[0], 1);
1191 dev_if->setup_desc_addr[1] =
1192 dwc_otg_ep_alloc_desc_chain(dev,
1193 &dev_if->dma_setup_desc_addr[1], 1);
1194 dev_if->in_desc_addr =
1195 dwc_otg_ep_alloc_desc_chain(dev,
1196 &dev_if->dma_in_desc_addr, 1);
1197 dev_if->out_desc_addr =
1198 dwc_otg_ep_alloc_desc_chain(dev,
1199 &dev_if->dma_out_desc_addr, 1);
1200 pcd->data_terminated = 0;
1202 if (dev_if->setup_desc_addr[0] == 0
1203 || dev_if->setup_desc_addr[1] == 0
1204 || dev_if->in_desc_addr == 0
1205 || dev_if->out_desc_addr == 0) {
1207 if (dev_if->out_desc_addr)
1208 dwc_otg_ep_free_desc_chain(dev,
1209 dev_if->out_desc_addr,
1210 dev_if->dma_out_desc_addr, 1);
1211 if (dev_if->in_desc_addr)
1212 dwc_otg_ep_free_desc_chain(dev,
1213 dev_if->in_desc_addr,
1214 dev_if->dma_in_desc_addr, 1);
1215 if (dev_if->setup_desc_addr[1])
1216 dwc_otg_ep_free_desc_chain(dev,
1217 dev_if->setup_desc_addr[1],
1218 dev_if->dma_setup_desc_addr[1], 1);
1219 if (dev_if->setup_desc_addr[0])
1220 dwc_otg_ep_free_desc_chain(dev,
1221 dev_if->setup_desc_addr[0],
1222 dev_if->dma_setup_desc_addr[0], 1);
1224 DWC_DMA_FREE(dev, sizeof(*pcd->setup_pkt) * 5,
1226 pcd->setup_pkt_dma_handle);
1227 DWC_DMA_FREE(dev, sizeof(*pcd->status_buf),
1229 pcd->status_buf_dma_handle);
1237 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1238 if (pcd->setup_pkt == NULL) {
1243 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1244 if (pcd->status_buf == NULL) {
1245 DWC_FREE(pcd->setup_pkt);
1251 dwc_otg_pcd_reinit(pcd);
1253 /* Allocate the cfi object for the PCD */
1255 pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1256 if (NULL == pcd->cfi)
1258 if (init_cfi(pcd->cfi)) {
1259 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1264 /* Initialize tasklets */
1265 pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1266 start_xfer_tasklet_func, pcd);
1267 pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1270 /* Initialize SRP timer */
1271 core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1273 if (core_if->core_params->dev_out_nak) {
1275 * Initialize xfer timeout timer. Implemented for
1276 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1278 for(i = 0; i < MAX_EPS_CHANNELS; i++) {
1279 pcd->core_if->ep_xfer_timer[i] =
1280 DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1281 &pcd->core_if->ep_xfer_info[i]);
1290 DWC_FREE(pcd->setup_pkt);
1291 if (pcd->status_buf)
1292 DWC_FREE(pcd->status_buf);
1304 * Remove PCD specific data
1306 void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
1308 dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1309 struct device *dev = dwc_otg_pcd_to_dev(pcd);
1312 if (pcd->core_if->core_params->dev_out_nak) {
1313 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1314 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1315 pcd->core_if->ep_xfer_info[i].state = 0;
1319 if (GET_CORE_IF(pcd)->dma_enable) {
1320 DWC_DMA_FREE(dev, sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1321 pcd->setup_pkt_dma_handle);
1322 DWC_DMA_FREE(dev, sizeof(uint16_t), pcd->status_buf,
1323 pcd->status_buf_dma_handle);
1324 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1325 dwc_otg_ep_free_desc_chain(dev,
1326 dev_if->setup_desc_addr[0],
1327 dev_if->dma_setup_desc_addr
1329 dwc_otg_ep_free_desc_chain(dev,
1330 dev_if->setup_desc_addr[1],
1331 dev_if->dma_setup_desc_addr
1333 dwc_otg_ep_free_desc_chain(dev,
1334 dev_if->in_desc_addr,
1335 dev_if->dma_in_desc_addr, 1);
1336 dwc_otg_ep_free_desc_chain(dev,
1337 dev_if->out_desc_addr,
1338 dev_if->dma_out_desc_addr,
1342 DWC_FREE(pcd->setup_pkt);
1343 DWC_FREE(pcd->status_buf);
1345 DWC_SPINLOCK_FREE(pcd->lock);
1346 /* Set core_if's lock pointer to NULL */
1347 pcd->core_if->lock = NULL;
1349 DWC_TASK_FREE(pcd->start_xfer_tasklet);
1350 DWC_TASK_FREE(pcd->test_mode_tasklet);
1351 if (pcd->core_if->core_params->dev_out_nak) {
1352 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1353 if (pcd->core_if->ep_xfer_timer[i]) {
1354 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1359 /* Release the CFI object's dynamic memory */
1361 if (pcd->cfi->ops.release) {
1362 pcd->cfi->ops.release(pcd->cfi);
1370 * Returns whether registered pcd is dual speed or not
1372 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
1374 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1376 if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1377 ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1378 (core_if->hwcfg2.b.fs_phy_type == 1) &&
1379 (core_if->core_params->ulpi_fs_ls))) {
1387 * Returns whether registered pcd is OTG capable or not
1389 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
1391 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1392 gusbcfg_data_t usbcfg = {.d32 = 0 };
1394 usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1395 if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap) {
1403 * This function assigns periodic Tx FIFO to an periodic EP
1404 * in shared Tx FIFO mode
1406 static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
1411 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1412 if ((TxMsk & core_if->tx_msk) == 0) {
1413 core_if->tx_msk |= TxMsk;
1422 * This function assigns periodic Tx FIFO to an periodic EP
1423 * in shared Tx FIFO mode
1425 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
1427 uint32_t PerTxMsk = 1;
1429 for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1430 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1431 core_if->p_tx_msk |= PerTxMsk;
1440 * This function releases periodic Tx FIFO
1441 * in shared Tx FIFO mode
1443 static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
1447 (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1451 * This function releases periodic Tx FIFO
1452 * in shared Tx FIFO mode
1454 static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
1457 (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1461 * This function is being called from gadget
1462 * to enable PCD endpoint.
1464 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
1465 const uint8_t * ep_desc, void *usb_ep)
1468 dwc_otg_pcd_ep_t *ep = NULL;
1469 const usb_endpoint_descriptor_t *desc;
1470 dwc_irqflags_t flags;
1471 fifosize_data_t dptxfsiz = {.d32 = 0 };
1472 gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1473 gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1476 struct device *dev = dwc_otg_pcd_to_dev(pcd);
1478 desc = (const usb_endpoint_descriptor_t *)ep_desc;
1481 pcd->ep0.priv = usb_ep;
1483 retval = -DWC_E_INVALID;
1487 num = UE_GET_ADDR(desc->bEndpointAddress);
1488 dir = UE_GET_DIR(desc->bEndpointAddress);
1490 if (!UGETW(desc->wMaxPacketSize)) {
1491 DWC_WARN("bad maxpacketsize\n");
1492 retval = -DWC_E_INVALID;
1496 if (dir == UE_DIR_IN) {
1497 epcount = pcd->core_if->dev_if->num_in_eps;
1498 for (i = 0; i < epcount; i++) {
1499 if (num == pcd->in_ep[i].dwc_ep.num) {
1500 ep = &pcd->in_ep[i];
1505 epcount = pcd->core_if->dev_if->num_out_eps;
1506 for (i = 0; i < epcount; i++) {
1507 if (num == pcd->out_ep[i].dwc_ep.num) {
1508 ep = &pcd->out_ep[i];
1515 DWC_WARN("bad address\n");
1516 retval = -DWC_E_INVALID;
1520 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1530 ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1531 ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1533 ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1535 if (ep->dwc_ep.is_in) {
1536 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1537 ep->dwc_ep.tx_fifo_num = 0;
1539 if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1541 * if ISOC EP then assign a Periodic Tx FIFO.
1543 ep->dwc_ep.tx_fifo_num =
1544 assign_perio_tx_fifo(GET_CORE_IF(pcd));
1548 * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1550 ep->dwc_ep.tx_fifo_num =
1551 assign_tx_fifo(GET_CORE_IF(pcd));
1554 /* Calculating EP info controller base address */
1555 if (ep->dwc_ep.tx_fifo_num
1556 && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1558 DWC_READ_REG32(&GET_CORE_IF(pcd)->
1559 core_global_regs->gdfifocfg);
1560 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1563 (&GET_CORE_IF(pcd)->core_global_regs->
1564 dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1565 gdfifocfg.b.epinfobase =
1566 gdfifocfgbase.d32 + dptxfsiz.d32;
1567 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1568 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1569 core_global_regs->gdfifocfg,
1574 /* Set initial data PID. */
1575 if (ep->dwc_ep.type == UE_BULK) {
1576 ep->dwc_ep.data_pid_start = 0;
1579 /* Alloc DMA Descriptors */
1580 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1581 #ifndef DWC_UTE_PER_IO
1582 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1584 ep->dwc_ep.desc_addr =
1585 dwc_otg_ep_alloc_desc_chain(dev,
1586 &ep->dwc_ep.dma_desc_addr,
1588 if (!ep->dwc_ep.desc_addr) {
1589 DWC_WARN("%s, can't allocate DMA descriptor\n",
1591 retval = -DWC_E_SHUTDOWN;
1592 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1595 #ifndef DWC_UTE_PER_IO
1600 DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1601 (ep->dwc_ep.is_in ? "IN" : "OUT"),
1602 ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1603 #ifdef DWC_UTE_PER_IO
1604 ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1606 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1607 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1608 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1611 dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1614 if (pcd->cfi->ops.ep_enable) {
1615 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1619 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1626 * This function is being called from gadget
1627 * to disable PCD endpoint.
1629 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
1631 dwc_otg_pcd_ep_t *ep;
1632 dwc_irqflags_t flags;
1633 dwc_otg_dev_dma_desc_t *desc_addr;
1634 dwc_dma_t dma_desc_addr;
1635 gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1636 gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1637 fifosize_data_t dptxfsiz = {.d32 = 0 };
1638 struct device *dev = dwc_otg_pcd_to_dev(pcd);
1640 ep = get_ep_from_handle(pcd, ep_handle);
1642 if (!ep || !ep->desc) {
1643 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1644 return -DWC_E_INVALID;
1647 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1649 dwc_otg_request_nuke(ep);
1651 dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1652 if (pcd->core_if->core_params->dev_out_nak) {
1653 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1654 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1660 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1661 gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1663 if (ep->dwc_ep.is_in) {
1664 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1665 /* Flush the Tx FIFO */
1666 dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1667 ep->dwc_ep.tx_fifo_num);
1669 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1670 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1671 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1672 /* Decreasing EPinfo Base Addr */
1675 (&GET_CORE_IF(pcd)->
1676 core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num-1]) >> 16);
1677 gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
1678 if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1679 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
1685 /* Free DMA Descriptors */
1686 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1687 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1688 desc_addr = ep->dwc_ep.desc_addr;
1689 dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1691 /* Cannot call dma_free_coherent() with IRQs disabled */
1692 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1693 dwc_otg_ep_free_desc_chain(dev, desc_addr, dma_desc_addr,
1699 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1702 DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1703 ep->dwc_ep.is_in ? "IN" : "OUT");
1708 /******************************************************************************/
1709 #ifdef DWC_UTE_PER_IO
1712 * Free the request and its extended parts
1715 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req)
1717 DWC_FREE(req->ext_req.per_io_frame_descs);
1722 * Start the next request in the endpoint's queue.
1725 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t * pcd,
1726 dwc_otg_pcd_ep_t * ep)
1729 dwc_otg_pcd_request_t *req = NULL;
1730 dwc_ep_t *dwcep = NULL;
1731 struct dwc_iso_xreq_port *ereq = NULL;
1732 struct dwc_iso_pkt_desc_port *ddesc_iso;
1734 depctl_data_t diepctl;
1736 dwcep = &ep->dwc_ep;
1738 if (dwcep->xiso_active_xfers > 0) {
1739 #if 0 //Disable this to decrease s/w overhead that is crucial for Isoc transfers
1740 DWC_WARN("There are currently active transfers for EP%d \
1741 (active=%d; queued=%d)", dwcep->num, dwcep->xiso_active_xfers,
1742 dwcep->xiso_queued_xfers);
1747 nat = UGETW(ep->desc->wMaxPacketSize);
1748 nat = (nat >> 11) & 0x03;
1750 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1751 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1752 ereq = &req->ext_req;
1755 /* Get the frame number */
1756 dwcep->xiso_frame_num =
1757 dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1758 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1760 ddesc_iso = ereq->per_io_frame_descs;
1763 /* Setup DMA Descriptor chain for IN Isoc request */
1764 for (i = 0; i < ereq->pio_pkt_count; i++) {
1765 //if ((i % (nat + 1)) == 0)
1767 dwcep->xiso_frame_num =
1768 (dwcep->xiso_bInterval +
1769 dwcep->xiso_frame_num) & 0x3FFF;
1770 dwcep->desc_addr[i].buf =
1771 req->dma + ddesc_iso[i].offset;
1772 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1773 ddesc_iso[i].length;
1774 dwcep->desc_addr[i].status.b_iso_in.framenum =
1775 dwcep->xiso_frame_num;
1776 dwcep->desc_addr[i].status.b_iso_in.bs =
1778 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1779 dwcep->desc_addr[i].status.b_iso_in.sp =
1780 (ddesc_iso[i].length %
1781 dwcep->maxpacket) ? 1 : 0;
1782 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1783 dwcep->desc_addr[i].status.b_iso_in.pid = nat + 1;
1784 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1786 /* Process the last descriptor */
1787 if (i == ereq->pio_pkt_count - 1) {
1788 dwcep->desc_addr[i].status.b_iso_in.ioc = 1;
1789 dwcep->desc_addr[i].status.b_iso_in.l = 1;
1793 /* Setup and start the transfer for this endpoint */
1794 dwcep->xiso_active_xfers++;
1795 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1796 in_ep_regs[dwcep->num]->diepdma,
1797 dwcep->dma_desc_addr);
1799 diepctl.b.epena = 1;
1801 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1802 in_ep_regs[dwcep->num]->diepctl, 0,
1805 /* Setup DMA Descriptor chain for OUT Isoc request */
1806 for (i = 0; i < ereq->pio_pkt_count; i++) {
1807 //if ((i % (nat + 1)) == 0)
1808 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1809 dwcep->xiso_frame_num) & 0x3FFF;
1810 dwcep->desc_addr[i].buf =
1811 req->dma + ddesc_iso[i].offset;
1812 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1813 ddesc_iso[i].length;
1814 dwcep->desc_addr[i].status.b_iso_out.framenum =
1815 dwcep->xiso_frame_num;
1816 dwcep->desc_addr[i].status.b_iso_out.bs =
1818 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1819 dwcep->desc_addr[i].status.b_iso_out.sp =
1820 (ddesc_iso[i].length %
1821 dwcep->maxpacket) ? 1 : 0;
1822 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1823 dwcep->desc_addr[i].status.b_iso_out.pid = nat + 1;
1824 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1826 /* Process the last descriptor */
1827 if (i == ereq->pio_pkt_count - 1) {
1828 dwcep->desc_addr[i].status.b_iso_out.ioc = 1;
1829 dwcep->desc_addr[i].status.b_iso_out.l = 1;
1833 /* Setup and start the transfer for this endpoint */
1834 dwcep->xiso_active_xfers++;
1835 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1836 dev_if->out_ep_regs[dwcep->num]->
1837 doepdma, dwcep->dma_desc_addr);
1839 diepctl.b.epena = 1;
1841 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1842 dev_if->out_ep_regs[dwcep->num]->
1843 doepctl, 0, diepctl.d32);
1854 * - Remove the request from the queue
1856 void complete_xiso_ep(dwc_otg_pcd_ep_t * ep)
1858 dwc_otg_pcd_request_t *req = NULL;
1859 struct dwc_iso_xreq_port *ereq = NULL;
1860 struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
1861 dwc_ep_t *dwcep = NULL;
1865 dwcep = &ep->dwc_ep;
1867 /* Get the first pending request from the queue */
1868 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1869 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1871 DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
1874 dwcep->xiso_active_xfers--;
1875 dwcep->xiso_queued_xfers--;
1876 /* Remove this request from the queue */
1877 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
1879 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
1884 ereq = &req->ext_req;
1885 ddesc_iso = ereq->per_io_frame_descs;
1887 if (dwcep->xiso_active_xfers < 0) {
1888 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
1889 dwcep->xiso_active_xfers);
1892 /* Fill the Isoc descs of portable extended req from dma descriptors */
1893 for (i = 0; i < ereq->pio_pkt_count; i++) {
1894 if (dwcep->is_in) { /* IN endpoints */
1895 ddesc_iso[i].actual_length = ddesc_iso[i].length -
1896 dwcep->desc_addr[i].status.b_iso_in.txbytes;
1897 ddesc_iso[i].status =
1898 dwcep->desc_addr[i].status.b_iso_in.txsts;
1899 } else { /* OUT endpoints */
1900 ddesc_iso[i].actual_length = ddesc_iso[i].length -
1901 dwcep->desc_addr[i].status.b_iso_out.rxbytes;
1902 ddesc_iso[i].status =
1903 dwcep->desc_addr[i].status.b_iso_out.rxsts;
1907 DWC_SPINUNLOCK(ep->pcd->lock);
1909 /* Call the completion function in the non-portable logic */
1910 ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
1913 DWC_SPINLOCK(ep->pcd->lock);
1915 /* Free the request - specific freeing needed for extended request object */
1916 dwc_pcd_xiso_ereq_free(ep, req);
1918 /* Start the next request */
1919 dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
1925 * Create and initialize the Isoc pkt descriptors of the extended request.
1928 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t * req,
1932 struct dwc_iso_xreq_port *ereq = NULL;
1933 struct dwc_iso_xreq_port *req_mapped = NULL;
1934 struct dwc_iso_pkt_desc_port *ipds = NULL; /* To be created in this function */
1938 ereq = &req->ext_req;
1939 req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
1940 pkt_count = req_mapped->pio_pkt_count;
1942 /* Create the isoc descs */
1944 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
1946 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
1950 DWC_ERROR("Failed to allocate isoc descriptors");
1951 return -DWC_E_NO_MEMORY;
1954 /* Initialize the extended request fields */
1955 ereq->per_io_frame_descs = ipds;
1956 ereq->error_count = 0;
1957 ereq->pio_alloc_pkt_count = pkt_count;
1958 ereq->pio_pkt_count = pkt_count;
1959 ereq->tr_sub_flags = req_mapped->tr_sub_flags;
1961 /* Init the Isoc descriptors */
1962 for (i = 0; i < pkt_count; i++) {
1963 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
1964 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
1965 ipds[i].status = req_mapped->per_io_frame_descs[i].status; /* 0 */
1966 ipds[i].actual_length =
1967 req_mapped->per_io_frame_descs[i].actual_length;
1973 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
1975 struct dwc_iso_pkt_desc_port *xfd = NULL;
1978 DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
1979 DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
1980 DWC_DEBUG("error_count=%d", ereq->error_count);
1981 DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
1982 DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
1983 DWC_DEBUG("res=%d", ereq->res);
1985 for (i = 0; i < ereq->pio_pkt_count; i++) {
1986 xfd = &ereq->per_io_frame_descs[0];
1987 DWC_DEBUG("FD #%d", i);
1989 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
1990 DWC_DEBUG("xfd->length=%d", xfd->length);
1991 DWC_DEBUG("xfd->offset=%d", xfd->offset);
1992 DWC_DEBUG("xfd->status=%d", xfd->status);
1999 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2000 uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2001 int zero, void *req_handle, int atomic_alloc,
2004 dwc_otg_pcd_request_t *req = NULL;
2005 dwc_otg_pcd_ep_t *ep;
2006 dwc_irqflags_t flags;
2009 ep = get_ep_from_handle(pcd, ep_handle);
2011 DWC_WARN("bad ep\n");
2012 return -DWC_E_INVALID;
2015 /* We support this extension only for DDMA mode */
2016 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2017 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2018 return -DWC_E_INVALID;
2020 /* Create a dwc_otg_pcd_request_t object */
2022 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2024 req = DWC_ALLOC(sizeof(*req));
2028 return -DWC_E_NO_MEMORY;
2031 /* Create the Isoc descs for this request which shall be the exact match
2032 * of the structure sent to us from the non-portable logic */
2034 dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2036 DWC_WARN("Failed to init the Isoc descriptors");
2041 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2043 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2046 req->length = buflen;
2047 req->sent_zlp = zero;
2048 req->priv = req_handle;
2050 //DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2051 ep->dwc_ep.dma_addr = dma_buf;
2052 ep->dwc_ep.start_xfer_buff = buf;
2053 ep->dwc_ep.xfer_buff = buf;
2054 ep->dwc_ep.xfer_len = 0;
2055 ep->dwc_ep.xfer_count = 0;
2056 ep->dwc_ep.sent_zlp = 0;
2057 ep->dwc_ep.total_len = buflen;
2059 /* Add this request to the tail */
2060 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2061 ep->dwc_ep.xiso_queued_xfers++;
2063 //DWC_DEBUG("CP_0");
2064 //DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
2065 //prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport);
2066 //prn_ext_request(&req->ext_req);
2068 //DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2070 /* If the req->status == ASAP then check if there is any active transfer
2071 * for this endpoint. If no active transfers, then get the first entry
2072 * from the queue and start that transfer
2074 if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2075 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2077 DWC_WARN("Failed to start the next Isoc transfer");
2078 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2084 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2089 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2090 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2091 uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2092 int zero, void *req_handle, int atomic_alloc)
2094 struct device *dev = dwc_otg_pcd_to_dev(pcd);
2095 dwc_irqflags_t flags;
2096 dwc_otg_pcd_request_t *req;
2097 dwc_otg_pcd_ep_t *ep;
2098 uint32_t max_transfer;
2100 ep = get_ep_from_handle(pcd, ep_handle);
2101 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2102 DWC_WARN("bad ep\n");
2103 return -DWC_E_INVALID;
2107 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2109 req = DWC_ALLOC(sizeof(*req));
2113 return -DWC_E_NO_MEMORY;
2115 DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2116 if (!GET_CORE_IF(pcd)->core_params->opt) {
2117 if (ep->dwc_ep.num != 0) {
2118 DWC_ERROR("queue req %p, len %d buf %p\n",
2119 req_handle, buflen, buf);
2125 req->length = buflen;
2126 req->sent_zlp = zero;
2127 req->priv = req_handle;
2128 req->dw_align_buf = NULL;
2129 if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2130 && !GET_CORE_IF(pcd)->dma_desc_enable)
2131 req->dw_align_buf = DWC_DMA_ALLOC(dev, buflen,
2132 &req->dw_align_buf_dma);
2133 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2136 * After adding request to the queue for IN ISOC wait for In Token Received
2137 * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2138 * Received when EP is disabled interrupt to obtain starting microframe
2139 * (odd/even) start transfer
2141 if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2143 depctl_data_t depctl = {.d32 =
2144 DWC_READ_REG32(&pcd->core_if->dev_if->
2145 in_ep_regs[ep->dwc_ep.num]->
2147 ++pcd->request_pending;
2149 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2150 if (ep->dwc_ep.is_in) {
2152 DWC_WRITE_REG32(&pcd->core_if->dev_if->
2153 in_ep_regs[ep->dwc_ep.num]->
2154 diepctl, depctl.d32);
2157 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2163 * For EP0 IN without premature status, zlp is required?
2165 if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2166 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2170 /* Start the transfer */
2171 if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2173 if (ep->dwc_ep.num == 0) {
2174 switch (pcd->ep0state) {
2175 case EP0_IN_DATA_PHASE:
2176 DWC_DEBUGPL(DBG_PCD,
2177 "%s ep0: EP0_IN_DATA_PHASE\n",
2181 case EP0_OUT_DATA_PHASE:
2182 DWC_DEBUGPL(DBG_PCD,
2183 "%s ep0: EP0_OUT_DATA_PHASE\n",
2185 if (pcd->request_config) {
2186 /* Complete STATUS PHASE */
2187 ep->dwc_ep.is_in = 1;
2188 pcd->ep0state = EP0_IN_STATUS_PHASE;
2192 case EP0_IN_STATUS_PHASE:
2193 DWC_DEBUGPL(DBG_PCD,
2194 "%s ep0: EP0_IN_STATUS_PHASE\n",
2199 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2201 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2202 return -DWC_E_SHUTDOWN;
2205 ep->dwc_ep.dma_addr = dma_buf;
2206 ep->dwc_ep.start_xfer_buff = buf;
2207 ep->dwc_ep.xfer_buff = buf;
2208 ep->dwc_ep.xfer_len = buflen;
2209 ep->dwc_ep.xfer_count = 0;
2210 ep->dwc_ep.sent_zlp = 0;
2211 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2214 if ((ep->dwc_ep.xfer_len %
2215 ep->dwc_ep.maxpacket == 0)
2216 && (ep->dwc_ep.xfer_len != 0)) {
2217 ep->dwc_ep.sent_zlp = 1;
2222 dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2224 } // non-ep0 endpoints
2227 if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2228 /* store the request length */
2229 ep->dwc_ep.cfi_req_len = buflen;
2230 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2235 GET_CORE_IF(ep->pcd)->core_params->
2238 /* Setup and start the Transfer */
2239 if (req->dw_align_buf){
2240 if (ep->dwc_ep.is_in)
2241 dwc_memcpy(req->dw_align_buf,
2243 ep->dwc_ep.dma_addr =
2244 req->dw_align_buf_dma;
2245 ep->dwc_ep.start_xfer_buff =
2247 ep->dwc_ep.xfer_buff =
2250 ep->dwc_ep.dma_addr = dma_buf;
2251 ep->dwc_ep.start_xfer_buff = buf;
2252 ep->dwc_ep.xfer_buff = buf;
2254 ep->dwc_ep.xfer_len = 0;
2255 ep->dwc_ep.xfer_count = 0;
2256 ep->dwc_ep.sent_zlp = 0;
2257 ep->dwc_ep.total_len = buflen;
2259 ep->dwc_ep.maxxfer = max_transfer;
2260 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2261 uint32_t out_max_xfer =
2262 DDMA_MAX_TRANSFER_SIZE -
2263 (DDMA_MAX_TRANSFER_SIZE % 4);
2264 if (ep->dwc_ep.is_in) {
2265 if (ep->dwc_ep.maxxfer >
2266 DDMA_MAX_TRANSFER_SIZE) {
2267 ep->dwc_ep.maxxfer =
2268 DDMA_MAX_TRANSFER_SIZE;
2271 if (ep->dwc_ep.maxxfer >
2273 ep->dwc_ep.maxxfer =
2278 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2279 ep->dwc_ep.maxxfer -=
2280 (ep->dwc_ep.maxxfer %
2281 ep->dwc_ep.maxpacket);
2285 if ((ep->dwc_ep.total_len %
2286 ep->dwc_ep.maxpacket == 0)
2287 && (ep->dwc_ep.total_len != 0)) {
2288 ep->dwc_ep.sent_zlp = 1;
2294 dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2300 ++pcd->request_pending;
2301 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2302 if (ep->dwc_ep.is_in && ep->stopped
2303 && !(GET_CORE_IF(pcd)->dma_enable)) {
2304 /** @todo NGS Create a function for this. */
2305 diepmsk_data_t diepmsk = {.d32 = 0 };
2306 diepmsk.b.intktxfemp = 1;
2307 if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2308 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2309 dev_if->dev_global_regs->diepeachintmsk
2310 [ep->dwc_ep.num], 0,
2313 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2314 dev_if->dev_global_regs->
2315 diepmsk, 0, diepmsk.d32);
2320 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2325 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
2328 dwc_irqflags_t flags;
2329 dwc_otg_pcd_request_t *req;
2330 dwc_otg_pcd_ep_t *ep;
2332 ep = get_ep_from_handle(pcd, ep_handle);
2333 if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2334 DWC_WARN("bad argument\n");
2335 return -DWC_E_INVALID;
2338 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2340 /* make sure it's actually queued on this endpoint */
2341 DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2342 if (req->priv == (void *)req_handle) {
2347 if (req->priv != (void *)req_handle) {
2348 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2349 return -DWC_E_INVALID;
2352 if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2353 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2358 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2360 return req ? 0 : -DWC_E_SHUTDOWN;
2365 * dwc_otg_pcd_ep_wedge - sets the halt feature and ignores clear requests
2367 * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
2368 * requests. If the gadget driver clears the halt status, it will
2369 * automatically unwedge the endpoint.
2371 * Returns zero on success, else negative DWC error code.
2373 int dwc_otg_pcd_ep_wedge(dwc_otg_pcd_t * pcd, void *ep_handle)
2375 dwc_otg_pcd_ep_t *ep;
2376 dwc_irqflags_t flags;
2379 ep = get_ep_from_handle(pcd, ep_handle);
2381 if ((!ep->desc && ep != &pcd->ep0) ||
2382 (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2383 DWC_WARN("%s, bad ep\n", __func__);
2384 return -DWC_E_INVALID;
2387 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2388 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2389 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2390 ep->dwc_ep.is_in ? "IN" : "OUT");
2391 retval = -DWC_E_AGAIN;
2393 /* This code needs to be reviewed */
2394 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2395 dtxfsts_data_t txstatus;
2396 fifosize_data_t txfifosize;
2399 DWC_READ_REG32(&GET_CORE_IF(pcd)->
2400 core_global_regs->dtxfsiz[ep->dwc_ep.
2403 DWC_READ_REG32(&GET_CORE_IF(pcd)->
2404 dev_if->in_ep_regs[ep->dwc_ep.num]->
2407 if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2408 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2409 retval = -DWC_E_AGAIN;
2411 if (ep->dwc_ep.num == 0) {
2412 pcd->ep0state = EP0_STALL;
2416 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2420 if (ep->dwc_ep.num == 0) {
2421 pcd->ep0state = EP0_STALL;
2425 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2429 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2434 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
2436 dwc_otg_pcd_ep_t *ep;
2437 dwc_irqflags_t flags;
2440 ep = get_ep_from_handle(pcd, ep_handle);
2442 if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2443 (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2444 DWC_WARN("%s, bad ep\n", __func__);
2445 return -DWC_E_INVALID;
2448 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2449 if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2450 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2451 ep->dwc_ep.is_in ? "IN" : "OUT");
2452 retval = -DWC_E_AGAIN;
2453 } else if (value == 0) {
2454 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2455 } else if (value == 1) {
2456 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2457 dtxfsts_data_t txstatus;
2458 fifosize_data_t txfifosize;
2461 DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
2462 dtxfsiz[ep->dwc_ep.tx_fifo_num]);
2464 DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
2465 in_ep_regs[ep->dwc_ep.num]->dtxfsts);
2467 if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2468 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2469 retval = -DWC_E_AGAIN;
2471 if (ep->dwc_ep.num == 0) {
2472 pcd->ep0state = EP0_STALL;
2476 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2480 if (ep->dwc_ep.num == 0) {
2481 pcd->ep0state = EP0_STALL;
2485 dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2487 } else if (value == 2) {
2488 ep->dwc_ep.stall_clear_flag = 0;
2489 } else if (value == 3) {
2490 ep->dwc_ep.stall_clear_flag = 1;
2493 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2499 * This function initiates remote wakeup of the host from suspend state.
2501 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
2503 dctl_data_t dctl = { 0 };
2504 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2507 dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2508 if (!dsts.b.suspsts) {
2509 DWC_WARN("Remote wakeup while is not in suspend state\n");
2511 /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2512 if (pcd->remote_wakeup_enable) {
2515 if (core_if->adp_enable) {
2516 gpwrdn_data_t gpwrdn;
2518 dwc_otg_adp_probe_stop(core_if);
2520 /* Mask SRP detected interrupt from Power Down Logic */
2522 gpwrdn.b.srp_det_msk = 1;
2523 DWC_MODIFY_REG32(&core_if->
2524 core_global_regs->gpwrdn,
2527 /* Disable Power Down Logic */
2529 gpwrdn.b.pmuactv = 1;
2530 DWC_MODIFY_REG32(&core_if->
2531 core_global_regs->gpwrdn,
2535 * Initialize the Core for Device mode.
2537 core_if->op_state = B_PERIPHERAL;
2538 dwc_otg_core_init(core_if);
2539 dwc_otg_enable_global_interrupts(core_if);
2540 cil_pcd_start(core_if);
2542 dwc_otg_initiate_srp(core_if);
2545 dctl.b.rmtwkupsig = 1;
2546 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2548 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2551 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2553 DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2556 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2560 #ifdef CONFIG_USB_DWC_OTG_LPM
2562 * This function initiates remote wakeup of the host from L1 sleep state.
2564 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
2566 glpmcfg_data_t lpmcfg;
2567 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2569 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2571 /* Check if we are in L1 state */
2572 if (!lpmcfg.b.prt_sleep_sts) {
2573 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2577 /* Check if host allows remote wakeup */
2578 if (!lpmcfg.b.rem_wkup_en) {
2579 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2583 /* Check if Resume OK */
2584 if (!lpmcfg.b.sleep_state_resumeok) {
2585 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2589 lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2590 lpmcfg.b.en_utmi_sleep = 0;
2591 lpmcfg.b.hird_thres &= (~(1 << 4));
2592 DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2595 dctl_data_t dctl = {.d32 = 0 };
2596 dctl.b.rmtwkupsig = 1;
2597 /* Set RmtWkUpSig bit to start remote wakup signaling.
2598 * Hardware will automatically clear this bit.
2600 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2602 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2609 * Performs remote wakeup.
2611 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
2613 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2614 dwc_irqflags_t flags;
2615 if (dwc_otg_is_device_mode(core_if)) {
2616 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2617 #ifdef CONFIG_USB_DWC_OTG_LPM
2618 if (core_if->lx_state == DWC_OTG_L1) {
2619 dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2622 dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2623 #ifdef CONFIG_USB_DWC_OTG_LPM
2626 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2631 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t * pcd, int no_of_usecs)
2633 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2634 dctl_data_t dctl = { 0 };
2636 if (dwc_otg_is_device_mode(core_if)) {
2637 dctl.b.sftdiscon = 1;
2638 DWC_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
2639 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
2640 dwc_udelay(no_of_usecs);
2641 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
2644 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2650 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
2653 gotgctl_data_t gotgctl;
2656 * This function starts the Protocol if no session is in progress. If
2657 * a session is already in progress, but the device is suspended,
2658 * remote wakeup signaling is started.
2661 /* Check if valid session */
2663 DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2664 if (gotgctl.b.bsesvld) {
2665 /* Check if suspend state */
2668 (GET_CORE_IF(pcd)->dev_if->
2669 dev_global_regs->dsts));
2670 if (dsts.b.suspsts) {
2671 dwc_otg_pcd_remote_wakeup(pcd, 1);
2674 dwc_otg_pcd_initiate_srp(pcd);
2682 * Start the SRP timer to detect when the SRP does not complete within
2685 * @param pcd the pcd structure.
2687 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
2689 dwc_irqflags_t flags;
2690 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2691 dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2692 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2695 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
2697 return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2700 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
2702 return GET_CORE_IF(pcd)->core_params->lpm_enable;
2705 uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
2707 return pcd->b_hnp_enable;
2710 uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
2712 return pcd->a_hnp_support;
2715 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
2717 return pcd->a_alt_hnp_support;
2720 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
2722 return pcd->remote_wakeup_enable;
2725 #endif /* DWC_HOST_ONLY */