2 /* ==========================================================================
3 * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $
8 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
9 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
10 * otherwise expressly agreed to in writing between Synopsys and you.
12 * The Software IS NOT an item of Licensed Software or Licensed Product under
13 * any End User Software License Agreement or Agreement for Licensed Product
14 * with Synopsys or any supplement thereto. You are permitted to use and
15 * redistribute this Software in source and binary forms, with or without
16 * modification, provided that redistributions of source code must retain this
17 * notice. You may not view, use, disclose, copy or distribute this file or
18 * any information contained herein except pursuant to this license grant from
19 * Synopsys. If you do not agree with this notice, including the disclaimer
20 * below, then you are not authorized to use the Software.
22 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
26 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
33 * ========================================================================== */
34 #ifndef DWC_DEVICE_ONLY
37 * This file implements HCD Core. All code in this file is portable and doesn't
38 * use any OS specific functions.
39 * Interface provided by HCD Core is defined in <code><hcd_if.h></code>
43 #include <linux/usb.h>
44 #include <linux/usb/hcd.h>
46 #include "dwc_otg_hcd.h"
47 #include "dwc_otg_regs.h"
48 #include "dwc_otg_fiq_fsm.h"
50 extern bool microframe_schedule;
51 extern uint16_t fiq_fsm_mask, nak_holdoff;
53 //#define DEBUG_HOST_CHANNELS
54 #ifdef DEBUG_HOST_CHANNELS
55 static int last_sel_trans_num_per_scheduled = 0;
56 static int last_sel_trans_num_nonper_scheduled = 0;
57 static int last_sel_trans_num_avail_hc_at_start = 0;
58 static int last_sel_trans_num_avail_hc_at_end = 0;
59 #endif /* DEBUG_HOST_CHANNELS */
62 dwc_otg_hcd_t *dwc_otg_hcd_alloc_hcd(void)
64 return DWC_ALLOC(sizeof(dwc_otg_hcd_t));
68 * Connection timeout function. An OTG host is required to display a
69 * message if the device does not connect within 10 seconds.
71 void dwc_otg_hcd_connect_timeout(void *ptr)
73 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, ptr);
74 DWC_PRINTF("Connect Timeout\n");
75 __DWC_ERROR("Device Not Connected/Responding\n");
79 static void dump_channel_info(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
81 if (qh->channel != NULL) {
82 dwc_hc_t *hc = qh->channel;
83 dwc_list_link_t *item;
84 dwc_otg_qh_t *qh_item;
85 int num_channels = hcd->core_if->core_params->host_channels;
88 dwc_otg_hc_regs_t *hc_regs;
94 hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
95 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
96 hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
97 hctsiz.d32 = DWC_READ_REG32(&hc_regs->hctsiz);
98 hcdma = DWC_READ_REG32(&hc_regs->hcdma);
100 DWC_PRINTF(" Assigned to channel %p:\n", hc);
101 DWC_PRINTF(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32,
103 DWC_PRINTF(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32,
105 DWC_PRINTF(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
106 hc->dev_addr, hc->ep_num, hc->ep_is_in);
107 DWC_PRINTF(" ep_type: %d\n", hc->ep_type);
108 DWC_PRINTF(" max_packet: %d\n", hc->max_packet);
109 DWC_PRINTF(" data_pid_start: %d\n", hc->data_pid_start);
110 DWC_PRINTF(" xfer_started: %d\n", hc->xfer_started);
111 DWC_PRINTF(" halt_status: %d\n", hc->halt_status);
112 DWC_PRINTF(" xfer_buff: %p\n", hc->xfer_buff);
113 DWC_PRINTF(" xfer_len: %d\n", hc->xfer_len);
114 DWC_PRINTF(" qh: %p\n", hc->qh);
115 DWC_PRINTF(" NP inactive sched:\n");
116 DWC_LIST_FOREACH(item, &hcd->non_periodic_sched_inactive) {
118 DWC_LIST_ENTRY(item, dwc_otg_qh_t, qh_list_entry);
119 DWC_PRINTF(" %p\n", qh_item);
121 DWC_PRINTF(" NP active sched:\n");
122 DWC_LIST_FOREACH(item, &hcd->non_periodic_sched_active) {
124 DWC_LIST_ENTRY(item, dwc_otg_qh_t, qh_list_entry);
125 DWC_PRINTF(" %p\n", qh_item);
127 DWC_PRINTF(" Channels: \n");
128 for (i = 0; i < num_channels; i++) {
129 dwc_hc_t *hc = hcd->hc_ptr_array[i];
130 DWC_PRINTF(" %2d: %p\n", i, hc);
135 #define dump_channel_info(hcd, qh)
139 * Work queue function for starting the HCD when A-Cable is connected.
140 * The hcd_start() must be called in a process context.
142 static void hcd_start_func(void *_vp)
144 dwc_otg_hcd_t *hcd = (dwc_otg_hcd_t *) _vp;
146 DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, hcd);
148 hcd->fops->start(hcd);
152 static void del_xfer_timers(dwc_otg_hcd_t * hcd)
156 int num_channels = hcd->core_if->core_params->host_channels;
157 for (i = 0; i < num_channels; i++) {
158 DWC_TIMER_CANCEL(hcd->core_if->hc_xfer_timer[i]);
163 static void del_timers(dwc_otg_hcd_t * hcd)
165 del_xfer_timers(hcd);
166 DWC_TIMER_CANCEL(hcd->conn_timer);
170 * Processes all the URBs in a single list of QHs. Completes them with
171 * -ESHUTDOWN and frees the QTD.
173 static void kill_urbs_in_qh_list(dwc_otg_hcd_t * hcd, dwc_list_link_t * qh_list)
175 dwc_list_link_t *qh_item, *qh_tmp;
177 dwc_otg_qtd_t *qtd, *qtd_tmp;
180 DWC_LIST_FOREACH_SAFE(qh_item, qh_tmp, qh_list) {
181 qh = DWC_LIST_ENTRY(qh_item, dwc_otg_qh_t, qh_list_entry);
182 DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp,
183 &qh->qtd_list, qtd_list_entry) {
184 qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
185 if (qtd->urb != NULL) {
186 hcd->fops->complete(hcd, qtd->urb->priv,
187 qtd->urb, -DWC_E_SHUTDOWN);
188 dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
193 int n = qh->channel->hc_num;
194 /* Using hcchar.chen == 1 is not a reliable test.
195 * It is possible that the channel has already halted
196 * but not yet been through the IRQ handler.
198 if (fiq_fsm_enable && (hcd->fiq_state->channel[qh->channel->hc_num].fsm != FIQ_PASSTHROUGH)) {
199 qh->channel->halt_status = DWC_OTG_HC_XFER_URB_DEQUEUE;
200 qh->channel->halt_pending = 1;
201 if (hcd->fiq_state->channel[n].fsm == FIQ_HS_ISOC_TURBO ||
202 hcd->fiq_state->channel[n].fsm == FIQ_HS_ISOC_SLEEPING)
203 hcd->fiq_state->channel[n].fsm = FIQ_HS_ISOC_ABORTED;
204 /* We're called from disconnect callback or in the middle of freeing the HCD here,
205 * so FIQ is disabled, top-level interrupts masked and we're holding the spinlock.
206 * No further URBs will be submitted, but wait 1 microframe for any previously
207 * submitted periodic DMA to finish.
214 dwc_otg_hc_halt(hcd->core_if, qh->channel,
215 DWC_OTG_HC_XFER_URB_DEQUEUE);
219 dwc_otg_hcd_qh_remove(hcd, qh);
224 * Responds with an error status of ESHUTDOWN to all URBs in the non-periodic
225 * and periodic schedules. The QTD associated with each URB is removed from
226 * the schedule and freed. This function may be called when a disconnect is
227 * detected or when the HCD is being stopped.
229 static void kill_all_urbs(dwc_otg_hcd_t * hcd)
231 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
232 kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
233 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
234 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
235 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
236 kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
240 * Start the connection timer. An OTG host is required to display a
241 * message if the device does not connect within 10 seconds. The
242 * timer is deleted if a port connect interrupt occurs before the
245 static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t * hcd)
247 DWC_TIMER_SCHEDULE(hcd->conn_timer, 10000 /* 10 secs */ );
251 * HCD Callback function for disconnect of the HCD.
253 * @param p void pointer to the <code>struct usb_hcd</code>
255 static int32_t dwc_otg_hcd_session_start_cb(void *p)
257 dwc_otg_hcd_t *dwc_otg_hcd;
258 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
260 dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
265 * HCD Callback function for starting the HCD when A-Cable is
268 * @param p void pointer to the <code>struct usb_hcd</code>
270 static int32_t dwc_otg_hcd_start_cb(void *p)
272 dwc_otg_hcd_t *dwc_otg_hcd = p;
273 dwc_otg_core_if_t *core_if;
276 core_if = dwc_otg_hcd->core_if;
278 if (core_if->op_state == B_HOST) {
280 * Reset the port. During a HNP mode switch the reset
281 * needs to occur within 1ms and have a duration of at
284 hprt0.d32 = dwc_otg_read_hprt0(core_if);
286 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
288 DWC_WORKQ_SCHEDULE_DELAYED(core_if->wq_otg,
289 hcd_start_func, dwc_otg_hcd, 50,
296 * HCD Callback function for disconnect of the HCD.
298 * @param p void pointer to the <code>struct usb_hcd</code>
300 static int32_t dwc_otg_hcd_disconnect_cb(void *p)
303 dwc_otg_hcd_t *dwc_otg_hcd = p;
305 DWC_SPINLOCK(dwc_otg_hcd->lock);
307 * Set status flags for the hub driver.
309 dwc_otg_hcd->flags.b.port_connect_status_change = 1;
310 dwc_otg_hcd->flags.b.port_connect_status = 0;
313 fiq_fsm_spin_lock(&dwc_otg_hcd->fiq_state->lock);
316 * Shutdown any transfers in process by clearing the Tx FIFO Empty
317 * interrupt mask and status bits and disabling subsequent host
318 * channel interrupts.
321 intr.b.nptxfempty = 1;
322 intr.b.ptxfempty = 1;
324 DWC_MODIFY_REG32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk,
326 DWC_MODIFY_REG32(&dwc_otg_hcd->core_if->core_global_regs->gintsts,
329 del_timers(dwc_otg_hcd);
332 * Turn off the vbus power only if the core has transitioned to device
333 * mode. If still in host mode, need to keep power on to detect a
336 if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
337 if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
338 hprt0_data_t hprt0 = {.d32 = 0 };
339 DWC_PRINTF("Disconnect: PortPower off\n");
341 DWC_WRITE_REG32(dwc_otg_hcd->core_if->host_if->hprt0,
345 dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
348 /* Respond with an error status to all URBs in the schedule. */
349 kill_all_urbs(dwc_otg_hcd);
351 if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
352 /* Clean up any host channels that were in use. */
356 dwc_otg_hc_regs_t *hc_regs;
357 hcchar_data_t hcchar;
359 num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
361 if (!dwc_otg_hcd->core_if->dma_enable) {
362 /* Flush out any channel requests in slave mode. */
363 for (i = 0; i < num_channels; i++) {
364 channel = dwc_otg_hcd->hc_ptr_array[i];
365 if (DWC_CIRCLEQ_EMPTY_ENTRY
366 (channel, hc_list_entry)) {
368 dwc_otg_hcd->core_if->
371 DWC_READ_REG32(&hc_regs->hcchar);
385 for(i=0; i < 128; i++) {
386 dwc_otg_hcd->hub_port[i] = 0;
392 fiq_fsm_spin_unlock(&dwc_otg_hcd->fiq_state->lock);
396 if (dwc_otg_hcd->fops->disconnect) {
397 dwc_otg_hcd->fops->disconnect(dwc_otg_hcd);
400 DWC_SPINUNLOCK(dwc_otg_hcd->lock);
405 * HCD Callback function for stopping the HCD.
407 * @param p void pointer to the <code>struct usb_hcd</code>
409 static int32_t dwc_otg_hcd_stop_cb(void *p)
411 dwc_otg_hcd_t *dwc_otg_hcd = p;
413 DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p);
414 dwc_otg_hcd_stop(dwc_otg_hcd);
418 #ifdef CONFIG_USB_DWC_OTG_LPM
420 * HCD Callback function for sleep of HCD.
422 * @param p void pointer to the <code>struct usb_hcd</code>
424 static int dwc_otg_hcd_sleep_cb(void *p)
426 dwc_otg_hcd_t *hcd = p;
428 dwc_otg_hcd_free_hc_from_lpm(hcd);
436 * HCD Callback function for Remote Wakeup.
438 * @param p void pointer to the <code>struct usb_hcd</code>
440 static int dwc_otg_hcd_rem_wakeup_cb(void *p)
442 dwc_otg_hcd_t *hcd = p;
444 if (hcd->core_if->lx_state == DWC_OTG_L2) {
445 hcd->flags.b.port_suspend_change = 1;
447 #ifdef CONFIG_USB_DWC_OTG_LPM
449 hcd->flags.b.port_l1_change = 1;
456 * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
459 void dwc_otg_hcd_stop(dwc_otg_hcd_t * hcd)
461 hprt0_data_t hprt0 = {.d32 = 0 };
463 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
466 * The root hub should be disconnected before this function is called.
467 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
468 * and the QH lists (via ..._hcd_endpoint_disable).
471 /* Turn off all host-specific interrupts. */
472 dwc_otg_disable_host_interrupts(hcd->core_if);
474 /* Turn off the vbus power */
475 DWC_PRINTF("PortPower off\n");
477 DWC_WRITE_REG32(hcd->core_if->host_if->hprt0, hprt0.d32);
481 int dwc_otg_hcd_urb_enqueue(dwc_otg_hcd_t * hcd,
482 dwc_otg_hcd_urb_t * dwc_otg_urb, void **ep_handle,
486 uint8_t needs_scheduling = 0;
487 dwc_otg_transaction_type_e tr_type;
489 gintmsk_data_t intr_mask = {.d32 = 0 };
490 hprt0_data_t hprt0 = { .d32 = 0 };
492 #ifdef DEBUG /* integrity checks (Broadcom) */
493 if (NULL == hcd->core_if) {
494 DWC_ERROR("**** DWC OTG HCD URB Enqueue - HCD has NULL core_if\n");
495 /* No longer connected. */
496 return -DWC_E_INVALID;
499 if (!hcd->flags.b.port_connect_status) {
500 /* No longer connected. */
501 DWC_ERROR("Not connected\n");
502 return -DWC_E_NO_DEVICE;
505 /* Some core configurations cannot support LS traffic on a FS root port */
506 if ((hcd->fops->speed(hcd, dwc_otg_urb->priv) == USB_SPEED_LOW) &&
507 (hcd->core_if->hwcfg2.b.fs_phy_type == 1) &&
508 (hcd->core_if->hwcfg2.b.hs_phy_type == 1)) {
509 hprt0.d32 = DWC_READ_REG32(hcd->core_if->host_if->hprt0);
510 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) {
511 return -DWC_E_NO_DEVICE;
515 qtd = dwc_otg_hcd_qtd_create(dwc_otg_urb, atomic_alloc);
517 DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
518 return -DWC_E_NO_MEMORY;
520 #ifdef DEBUG /* integrity checks (Broadcom) */
521 if (qtd->urb == NULL) {
522 DWC_ERROR("**** DWC OTG HCD URB Enqueue created QTD with no URBs\n");
523 return -DWC_E_NO_MEMORY;
525 if (qtd->urb->priv == NULL) {
526 DWC_ERROR("**** DWC OTG HCD URB Enqueue created QTD URB with no URB handle\n");
527 return -DWC_E_NO_MEMORY;
530 intr_mask.d32 = DWC_READ_REG32(&hcd->core_if->core_global_regs->gintmsk);
531 if(!intr_mask.b.sofintr || fiq_enable) needs_scheduling = 1;
532 if((((dwc_otg_qh_t *)ep_handle)->ep_type == UE_BULK) && !(qtd->urb->flags & URB_GIVEBACK_ASAP))
533 /* Do not schedule SG transactions until qtd has URB_GIVEBACK_ASAP set */
534 needs_scheduling = 0;
536 retval = dwc_otg_hcd_qtd_add(qtd, hcd, (dwc_otg_qh_t **) ep_handle, atomic_alloc);
537 // creates a new queue in ep_handle if it doesn't exist already
539 DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
540 "Error status %d\n", retval);
541 dwc_otg_hcd_qtd_free(qtd);
545 if(needs_scheduling) {
546 tr_type = dwc_otg_hcd_select_transactions(hcd);
547 if (tr_type != DWC_OTG_TRANSACTION_NONE) {
548 dwc_otg_hcd_queue_transactions(hcd, tr_type);
554 int dwc_otg_hcd_urb_dequeue(dwc_otg_hcd_t * hcd,
555 dwc_otg_hcd_urb_t * dwc_otg_urb)
558 dwc_otg_qtd_t *urb_qtd;
560 BUG_ON(!dwc_otg_urb);
562 #ifdef DEBUG /* integrity checks (Broadcom) */
565 DWC_ERROR("**** DWC OTG HCD URB Dequeue has NULL HCD\n");
566 return -DWC_E_INVALID;
568 if (dwc_otg_urb == NULL) {
569 DWC_ERROR("**** DWC OTG HCD URB Dequeue has NULL URB\n");
570 return -DWC_E_INVALID;
572 if (dwc_otg_urb->qtd == NULL) {
573 DWC_ERROR("**** DWC OTG HCD URB Dequeue with NULL QTD\n");
574 return -DWC_E_INVALID;
576 urb_qtd = dwc_otg_urb->qtd;
578 if (urb_qtd->qh == NULL) {
579 DWC_ERROR("**** DWC OTG HCD URB Dequeue with QTD with NULL Q handler\n");
580 return -DWC_E_INVALID;
583 urb_qtd = dwc_otg_urb->qtd;
588 if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
589 if (urb_qtd->in_process) {
590 dump_channel_info(hcd, qh);
593 #ifdef DEBUG /* integrity checks (Broadcom) */
594 if (hcd->core_if == NULL) {
595 DWC_ERROR("**** DWC OTG HCD URB Dequeue HCD has NULL core_if\n");
596 return -DWC_E_INVALID;
599 if (urb_qtd->in_process && qh->channel) {
600 /* The QTD is in process (it has been assigned to a channel). */
601 if (hcd->flags.b.port_connect_status) {
602 int n = qh->channel->hc_num;
604 * If still connected (i.e. in host mode), halt the
605 * channel so it can be used for other transfers. If
606 * no longer connected, the host registers can't be
607 * written to halt the channel since the core is in
610 /* In FIQ FSM mode, we need to shut down carefully.
611 * The FIQ may attempt to restart a disabled channel */
612 if (fiq_fsm_enable && (hcd->fiq_state->channel[n].fsm != FIQ_PASSTHROUGH)) {
615 enum fiq_fsm_state state;
618 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
619 qh->channel->halt_status = DWC_OTG_HC_XFER_URB_DEQUEUE;
620 qh->channel->halt_pending = 1;
621 if (hcd->fiq_state->channel[n].fsm == FIQ_HS_ISOC_TURBO ||
622 hcd->fiq_state->channel[n].fsm == FIQ_HS_ISOC_SLEEPING)
623 hcd->fiq_state->channel[n].fsm = FIQ_HS_ISOC_ABORTED;
624 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
627 if (dwc_qh_is_non_per(qh)) {
629 state = READ_ONCE(hcd->fiq_state->channel[n].fsm);
630 running = (state != FIQ_NP_SPLIT_DONE) &&
631 (state != FIQ_NP_SPLIT_LS_ABORTED) &&
632 (state != FIQ_NP_SPLIT_HS_ABORTED);
638 DWC_WARN("Timed out waiting for FSM NP transfer to complete on %d",
639 qh->channel->hc_num);
642 dwc_otg_hc_halt(hcd->core_if, qh->channel,
643 DWC_OTG_HC_XFER_URB_DEQUEUE);
649 * Free the QTD and clean up the associated QH. Leave the QH in the
650 * schedule if it has any remaining QTDs.
653 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue - "
654 "delete %sQueue handler\n",
655 hcd->core_if->dma_desc_enable?"DMA ":"");
656 if (!hcd->core_if->dma_desc_enable) {
657 uint8_t b = urb_qtd->in_process;
658 if (nak_holdoff && qh->do_split && dwc_qh_is_non_per(qh))
659 qh->nak_frame = 0xFFFF;
660 dwc_otg_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
662 dwc_otg_hcd_qh_deactivate(hcd, qh, 0);
664 } else if (DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
665 dwc_otg_hcd_qh_remove(hcd, qh);
668 dwc_otg_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
673 int dwc_otg_hcd_endpoint_disable(dwc_otg_hcd_t * hcd, void *ep_handle,
676 dwc_otg_qh_t *qh = (dwc_otg_qh_t *) ep_handle;
678 dwc_irqflags_t flags;
681 retval = -DWC_E_INVALID;
686 retval = -DWC_E_INVALID;
690 DWC_SPINLOCK_IRQSAVE(hcd->lock, &flags);
692 while (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list) && retry) {
693 DWC_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
696 DWC_SPINLOCK_IRQSAVE(hcd->lock, &flags);
699 dwc_otg_hcd_qh_remove(hcd, qh);
701 DWC_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
703 * Split dwc_otg_hcd_qh_remove_and_free() into qh_remove
704 * and qh_free to prevent stack dump on DWC_DMA_FREE() with
705 * irq_disabled (spinlock_irqsave) in dwc_otg_hcd_desc_list_free()
706 * and dwc_otg_hcd_frame_list_alloc().
708 dwc_otg_hcd_qh_free(hcd, qh);
714 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
715 int dwc_otg_hcd_endpoint_reset(dwc_otg_hcd_t * hcd, void *ep_handle)
718 dwc_otg_qh_t *qh = (dwc_otg_qh_t *) ep_handle;
720 return -DWC_E_INVALID;
722 qh->data_toggle = DWC_OTG_HC_PID_DATA0;
728 * HCD Callback structure for handling mode switching.
730 static dwc_otg_cil_callbacks_t hcd_cil_callbacks = {
731 .start = dwc_otg_hcd_start_cb,
732 .stop = dwc_otg_hcd_stop_cb,
733 .disconnect = dwc_otg_hcd_disconnect_cb,
734 .session_start = dwc_otg_hcd_session_start_cb,
735 .resume_wakeup = dwc_otg_hcd_rem_wakeup_cb,
736 #ifdef CONFIG_USB_DWC_OTG_LPM
737 .sleep = dwc_otg_hcd_sleep_cb,
743 * Reset tasklet function
745 static void reset_tasklet_func(void *data)
747 dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *) data;
748 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
751 DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
753 hprt0.d32 = dwc_otg_read_hprt0(core_if);
755 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
759 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
760 dwc_otg_hcd->flags.b.port_reset_change = 1;
763 static void completion_tasklet_func(void *ptr)
765 dwc_otg_hcd_t *hcd = (dwc_otg_hcd_t *) ptr;
767 urb_tq_entry_t *item;
768 dwc_irqflags_t flags;
770 /* This could just be spin_lock_irq */
771 DWC_SPINLOCK_IRQSAVE(hcd->lock, &flags);
772 while (!DWC_TAILQ_EMPTY(&hcd->completed_urb_list)) {
773 item = DWC_TAILQ_FIRST(&hcd->completed_urb_list);
775 DWC_TAILQ_REMOVE(&hcd->completed_urb_list, item,
777 DWC_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
780 usb_hcd_giveback_urb(hcd->priv, urb, urb->status);
783 DWC_SPINLOCK_IRQSAVE(hcd->lock, &flags);
785 DWC_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
789 static void qh_list_free(dwc_otg_hcd_t * hcd, dwc_list_link_t * qh_list)
791 dwc_list_link_t *item;
793 dwc_irqflags_t flags;
795 if (!qh_list->next) {
796 /* The list hasn't been initialized yet. */
800 * Hold spinlock here. Not needed in that case if bellow
801 * function is being called from ISR
803 DWC_SPINLOCK_IRQSAVE(hcd->lock, &flags);
804 /* Ensure there are no QTDs or URBs left. */
805 kill_urbs_in_qh_list(hcd, qh_list);
806 DWC_SPINUNLOCK_IRQRESTORE(hcd->lock, flags);
808 DWC_LIST_FOREACH(item, qh_list) {
809 qh = DWC_LIST_ENTRY(item, dwc_otg_qh_t, qh_list_entry);
810 dwc_otg_hcd_qh_remove_and_free(hcd, qh);
815 * Exit from Hibernation if Host did not detect SRP from connected SRP capable
816 * Device during SRP time by host power up.
818 void dwc_otg_hcd_power_up(void *ptr)
820 gpwrdn_data_t gpwrdn = {.d32 = 0 };
821 dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
823 DWC_PRINTF("%s called\n", __FUNCTION__);
825 if (!core_if->hibernation_suspend) {
826 DWC_PRINTF("Already exited from Hibernation\n");
830 /* Switch on the voltage to the core */
831 gpwrdn.b.pwrdnswtch = 1;
832 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
837 gpwrdn.b.pwrdnrstn = 1;
838 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
841 /* Disable power clamps */
843 gpwrdn.b.pwrdnclmp = 1;
844 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
846 /* Remove reset the core signal */
848 gpwrdn.b.pwrdnrstn = 1;
849 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
852 /* Disable PMU interrupt */
854 gpwrdn.b.pmuintsel = 1;
855 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
857 core_if->hibernation_suspend = 0;
861 gpwrdn.b.pmuactv = 1;
862 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
867 gpwrdn.b.dis_vbus = 1;
868 DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
870 core_if->op_state = A_HOST;
871 dwc_otg_core_init(core_if);
872 dwc_otg_enable_global_interrupts(core_if);
873 cil_hcd_start(core_if);
876 void dwc_otg_cleanup_fiq_channel(dwc_otg_hcd_t *hcd, uint32_t num)
878 struct fiq_channel_state *st = &hcd->fiq_state->channel[num];
879 struct fiq_dma_blob *blob = hcd->fiq_dmab;
882 st->fsm = FIQ_PASSTHROUGH;
883 st->hcchar_copy.d32 = 0;
884 st->hcsplt_copy.d32 = 0;
885 st->hcint_copy.d32 = 0;
886 st->hcintmsk_copy.d32 = 0;
887 st->hctsiz_copy.d32 = 0;
888 st->hcdma_copy.d32 = 0;
892 st->expected_uframe = 0;
894 st->dma_info.index = 0;
895 for (i = 0; i < 6; i++)
896 st->dma_info.slot_len[i] = 255;
897 st->hs_isoc_info.index = 0;
898 st->hs_isoc_info.iso_desc = NULL;
899 st->hs_isoc_info.nrframes = 0;
901 DWC_MEMSET(&blob->channel[num].index[0], 0x6b, 1128);
905 * Frees secondary storage associated with the dwc_otg_hcd structure contained
906 * in the struct usb_hcd field.
908 static void dwc_otg_hcd_free(dwc_otg_hcd_t * dwc_otg_hcd)
910 struct device *dev = dwc_otg_hcd_to_dev(dwc_otg_hcd);
913 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
915 del_timers(dwc_otg_hcd);
917 /* Free memory for QH/QTD lists */
918 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive);
919 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
920 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
921 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
922 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
923 qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
925 /* Free memory for the host channels. */
926 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
927 dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i];
930 if (dwc_otg_hcd->core_if->hc_xfer_timer[i]) {
931 DWC_TIMER_FREE(dwc_otg_hcd->core_if->hc_xfer_timer[i]);
935 DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n",
941 if (dwc_otg_hcd->core_if->dma_enable) {
942 if (dwc_otg_hcd->status_buf_dma) {
943 DWC_DMA_FREE(dev, DWC_OTG_HCD_STATUS_BUF_SIZE,
944 dwc_otg_hcd->status_buf,
945 dwc_otg_hcd->status_buf_dma);
947 } else if (dwc_otg_hcd->status_buf != NULL) {
948 DWC_FREE(dwc_otg_hcd->status_buf);
950 DWC_SPINLOCK_FREE(dwc_otg_hcd->lock);
951 /* Set core_if's lock pointer to NULL */
952 dwc_otg_hcd->core_if->lock = NULL;
954 DWC_TIMER_FREE(dwc_otg_hcd->conn_timer);
955 DWC_TASK_FREE(dwc_otg_hcd->reset_tasklet);
956 DWC_TASK_FREE(dwc_otg_hcd->completion_tasklet);
957 DWC_DMA_FREE(dev, 16, dwc_otg_hcd->fiq_state->dummy_send,
958 dwc_otg_hcd->fiq_state->dummy_send_dma);
959 DWC_FREE(dwc_otg_hcd->fiq_state);
961 #ifdef DWC_DEV_SRPCAP
962 if (dwc_otg_hcd->core_if->power_down == 2 &&
963 dwc_otg_hcd->core_if->pwron_timer) {
964 DWC_TIMER_FREE(dwc_otg_hcd->core_if->pwron_timer);
967 DWC_FREE(dwc_otg_hcd);
970 int dwc_otg_hcd_init(dwc_otg_hcd_t * hcd, dwc_otg_core_if_t * core_if)
972 struct device *dev = dwc_otg_hcd_to_dev(hcd);
978 #if (defined(DWC_LINUX) && defined(CONFIG_DEBUG_SPINLOCK))
979 DWC_SPINLOCK_ALLOC_LINUX_DEBUG(hcd->lock);
981 hcd->lock = DWC_SPINLOCK_ALLOC();
983 DWC_DEBUGPL(DBG_HCDV, "init of HCD %p given core_if %p\n",
986 DWC_ERROR("Could not allocate lock for pcd");
988 retval = -DWC_E_NO_MEMORY;
991 hcd->core_if = core_if;
993 /* Register the HCD CIL Callbacks */
994 dwc_otg_cil_register_hcd_callbacks(hcd->core_if,
995 &hcd_cil_callbacks, hcd);
997 /* Initialize the non-periodic schedule. */
998 DWC_LIST_INIT(&hcd->non_periodic_sched_inactive);
999 DWC_LIST_INIT(&hcd->non_periodic_sched_active);
1001 /* Initialize the periodic schedule. */
1002 DWC_LIST_INIT(&hcd->periodic_sched_inactive);
1003 DWC_LIST_INIT(&hcd->periodic_sched_ready);
1004 DWC_LIST_INIT(&hcd->periodic_sched_assigned);
1005 DWC_LIST_INIT(&hcd->periodic_sched_queued);
1006 DWC_TAILQ_INIT(&hcd->completed_urb_list);
1008 * Create a host channel descriptor for each host channel implemented
1009 * in the controller. Initialize the channel descriptor array.
1011 DWC_CIRCLEQ_INIT(&hcd->free_hc_list);
1012 num_channels = hcd->core_if->core_params->host_channels;
1013 DWC_MEMSET(hcd->hc_ptr_array, 0, sizeof(hcd->hc_ptr_array));
1014 for (i = 0; i < num_channels; i++) {
1015 channel = DWC_ALLOC(sizeof(dwc_hc_t));
1016 if (channel == NULL) {
1017 retval = -DWC_E_NO_MEMORY;
1018 DWC_ERROR("%s: host channel allocation failed\n",
1020 dwc_otg_hcd_free(hcd);
1023 channel->hc_num = i;
1024 hcd->hc_ptr_array[i] = channel;
1026 hcd->core_if->hc_xfer_timer[i] =
1027 DWC_TIMER_ALLOC("hc timer", hc_xfer_timeout,
1028 &hcd->core_if->hc_xfer_info[i]);
1030 DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,
1035 hcd->fiq_state = DWC_ALLOC(sizeof(struct fiq_state) + (sizeof(struct fiq_channel_state) * num_channels));
1036 if (!hcd->fiq_state) {
1037 retval = -DWC_E_NO_MEMORY;
1038 DWC_ERROR("%s: cannot allocate fiq_state structure\n", __func__);
1039 dwc_otg_hcd_free(hcd);
1042 DWC_MEMSET(hcd->fiq_state, 0, (sizeof(struct fiq_state) + (sizeof(struct fiq_channel_state) * num_channels)));
1045 spin_lock_init(&hcd->fiq_state->lock);
1048 for (i = 0; i < num_channels; i++) {
1049 hcd->fiq_state->channel[i].fsm = FIQ_PASSTHROUGH;
1051 hcd->fiq_state->dummy_send = DWC_DMA_ALLOC_ATOMIC(dev, 16,
1052 &hcd->fiq_state->dummy_send_dma);
1054 hcd->fiq_stack = DWC_ALLOC(sizeof(struct fiq_stack));
1055 if (!hcd->fiq_stack) {
1056 retval = -DWC_E_NO_MEMORY;
1057 DWC_ERROR("%s: cannot allocate fiq_stack structure\n", __func__);
1058 dwc_otg_hcd_free(hcd);
1061 hcd->fiq_stack->magic1 = 0xDEADBEEF;
1062 hcd->fiq_stack->magic2 = 0xD00DFEED;
1063 hcd->fiq_state->gintmsk_saved.d32 = ~0;
1064 hcd->fiq_state->haintmsk_saved.b2.chint = ~0;
1066 /* This bit is terrible and uses no API, but necessary. The FIQ has no concept of DMA pools
1067 * (and if it did, would be a lot slower). This allocates a chunk of memory (~9kiB for 8 host channels)
1068 * for use as transaction bounce buffers in a 2-D array. Our access into this chunk is done by some
1069 * moderately readable array casts.
1071 hcd->fiq_dmab = DWC_DMA_ALLOC(dev, (sizeof(struct fiq_dma_channel) * num_channels), &hcd->fiq_state->dma_base);
1072 DWC_WARN("FIQ DMA bounce buffers: virt = %px dma = %pad len=%zu",
1073 hcd->fiq_dmab, &hcd->fiq_state->dma_base,
1074 sizeof(struct fiq_dma_channel) * num_channels);
1076 DWC_MEMSET(hcd->fiq_dmab, 0x6b, 9024);
1078 /* pointer for debug in fiq_print */
1079 hcd->fiq_state->fiq_dmab = hcd->fiq_dmab;
1080 if (fiq_fsm_enable) {
1082 for (i=0; i < hcd->core_if->core_params->host_channels; i++) {
1083 dwc_otg_cleanup_fiq_channel(hcd, i);
1085 DWC_PRINTF("FIQ FSM acceleration enabled for :\n%s%s%s%s",
1086 (fiq_fsm_mask & 0x1) ? "Non-periodic Split Transactions\n" : "",
1087 (fiq_fsm_mask & 0x2) ? "Periodic Split Transactions\n" : "",
1088 (fiq_fsm_mask & 0x4) ? "High-Speed Isochronous Endpoints\n" : "",
1089 (fiq_fsm_mask & 0x8) ? "Interrupt/Control Split Transaction hack enabled\n" : "");
1093 /* Initialize the Connection timeout timer. */
1094 hcd->conn_timer = DWC_TIMER_ALLOC("Connection timer",
1095 dwc_otg_hcd_connect_timeout, 0);
1097 printk(KERN_DEBUG "dwc_otg: Microframe scheduler %s\n", microframe_schedule ? "enabled":"disabled");
1098 if (microframe_schedule)
1099 init_hcd_usecs(hcd);
1101 /* Initialize reset tasklet. */
1102 hcd->reset_tasklet = DWC_TASK_ALLOC("reset_tasklet", reset_tasklet_func, hcd);
1104 hcd->completion_tasklet = DWC_TASK_ALLOC("completion_tasklet",
1105 completion_tasklet_func, hcd);
1106 #ifdef DWC_DEV_SRPCAP
1107 if (hcd->core_if->power_down == 2) {
1108 /* Initialize Power on timer for Host power up in case hibernation */
1109 hcd->core_if->pwron_timer = DWC_TIMER_ALLOC("PWRON TIMER",
1110 dwc_otg_hcd_power_up, core_if);
1115 * Allocate space for storing data on status transactions. Normally no
1116 * data is sent, but this space acts as a bit bucket. This must be
1117 * done after usb_add_hcd since that function allocates the DMA buffer
1120 if (hcd->core_if->dma_enable) {
1122 DWC_DMA_ALLOC(dev, DWC_OTG_HCD_STATUS_BUF_SIZE,
1123 &hcd->status_buf_dma);
1125 hcd->status_buf = DWC_ALLOC(DWC_OTG_HCD_STATUS_BUF_SIZE);
1127 if (!hcd->status_buf) {
1128 retval = -DWC_E_NO_MEMORY;
1129 DWC_ERROR("%s: status_buf allocation failed\n", __func__);
1130 dwc_otg_hcd_free(hcd);
1135 hcd->frame_list = NULL;
1136 hcd->frame_list_dma = 0;
1137 hcd->periodic_qh_count = 0;
1139 DWC_MEMSET(hcd->hub_port, 0, sizeof(hcd->hub_port));
1141 DWC_MEMSET(hcd->hub_port_alloc, -1, sizeof(hcd->hub_port_alloc));
1148 void dwc_otg_hcd_remove(dwc_otg_hcd_t * hcd)
1150 /* Turn off all host-specific interrupts. */
1151 dwc_otg_disable_host_interrupts(hcd->core_if);
1153 dwc_otg_hcd_free(hcd);
1157 * Initializes dynamic portions of the DWC_otg HCD state.
1159 static void dwc_otg_hcd_reinit(dwc_otg_hcd_t * hcd)
1164 dwc_hc_t *channel_tmp;
1168 hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
1169 if (!microframe_schedule) {
1170 hcd->non_periodic_channels = 0;
1171 hcd->periodic_channels = 0;
1173 hcd->available_host_channels = hcd->core_if->core_params->host_channels;
1176 * Put all channels in the free channel list and clean up channel
1179 DWC_CIRCLEQ_FOREACH_SAFE(channel, channel_tmp,
1180 &hcd->free_hc_list, hc_list_entry) {
1181 DWC_CIRCLEQ_REMOVE(&hcd->free_hc_list, channel, hc_list_entry);
1184 num_channels = hcd->core_if->core_params->host_channels;
1185 for (i = 0; i < num_channels; i++) {
1186 channel = hcd->hc_ptr_array[i];
1187 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, channel,
1189 dwc_otg_hc_cleanup(hcd->core_if, channel);
1192 /* Initialize the DWC core for host mode operation. */
1193 dwc_otg_core_host_init(hcd->core_if);
1195 /* Set core_if's lock pointer to the hcd->lock */
1196 hcd->core_if->lock = hcd->lock;
1200 * Assigns transactions from a QTD to a free host channel and initializes the
1201 * host channel to perform the transactions. The host channel is removed from
1204 * @param hcd The HCD state structure.
1205 * @param qh Transactions from the first QTD for this QH are selected and
1206 * assigned to a free host channel.
1208 static void assign_and_init_hc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
1212 dwc_otg_hcd_urb_t *urb;
1215 uint32_t intr_enable;
1216 unsigned long flags;
1217 gintmsk_data_t gintmsk = { .d32 = 0, };
1218 struct device *dev = dwc_otg_hcd_to_dev(hcd);
1220 qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
1224 DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p) - urb %x, actual_length %d\n", __func__, hcd, qh, (unsigned int)urb, urb->actual_length);
1226 if (((urb->actual_length < 0) || (urb->actual_length > urb->length)) && !dwc_otg_hcd_is_pipe_in(&urb->pipe_info))
1227 urb->actual_length = urb->length;
1230 hc = DWC_CIRCLEQ_FIRST(&hcd->free_hc_list);
1232 /* Remove the host channel from the free list. */
1233 DWC_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
1237 qtd->in_process = 1;
1240 * Use usb_pipedevice to determine device address. This address is
1241 * 0 before the SET_ADDRESS command and the correct address afterward.
1243 hc->dev_addr = dwc_otg_hcd_get_dev_addr(&urb->pipe_info);
1244 hc->ep_num = dwc_otg_hcd_get_ep_num(&urb->pipe_info);
1245 hc->speed = qh->dev_speed;
1246 hc->max_packet = dwc_max_packet(qh->maxp);
1248 hc->xfer_started = 0;
1249 hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
1250 hc->error_state = (qtd->error_count > 0);
1251 hc->halt_on_queue = 0;
1252 hc->halt_pending = 0;
1256 * The following values may be modified in the transfer type section
1257 * below. The xfer_len value may be reduced when the transfer is
1258 * started to accommodate the max widths of the XferSize and PktCnt
1259 * fields in the HCTSIZn register.
1262 hc->ep_is_in = (dwc_otg_hcd_is_pipe_in(&urb->pipe_info) != 0);
1266 hc->do_ping = qh->ping_state;
1269 hc->data_pid_start = qh->data_toggle;
1270 hc->multi_count = 1;
1272 if (hcd->core_if->dma_enable) {
1273 hc->xfer_buff = (uint8_t *) urb->dma + urb->actual_length;
1275 /* For non-dword aligned case */
1276 if (((unsigned long)hc->xfer_buff & 0x3)
1277 && !hcd->core_if->dma_desc_enable) {
1278 ptr = (uint8_t *) urb->buf + urb->actual_length;
1281 hc->xfer_buff = (uint8_t *) urb->buf + urb->actual_length;
1283 hc->xfer_len = urb->length - urb->actual_length;
1287 * Set the split attributes
1291 uint32_t hub_addr, port_addr;
1293 hc->start_pkt_count = 1;
1294 hc->xact_pos = qtd->isoc_split_pos;
1295 /* We don't need to do complete splits anymore */
1296 // if(fiq_fsm_enable)
1298 hc->complete_split = qtd->complete_split = 0;
1300 hc->complete_split = qtd->complete_split;
1302 hcd->fops->hub_info(hcd, urb->priv, &hub_addr, &port_addr);
1303 hc->hub_addr = (uint8_t) hub_addr;
1304 hc->port_addr = (uint8_t) port_addr;
1307 switch (dwc_otg_hcd_get_pipe_type(&urb->pipe_info)) {
1309 hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
1310 switch (qtd->control_phase) {
1311 case DWC_OTG_CONTROL_SETUP:
1312 DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
1315 hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
1316 if (hcd->core_if->dma_enable) {
1317 hc->xfer_buff = (uint8_t *) urb->setup_dma;
1319 hc->xfer_buff = (uint8_t *) urb->setup_packet;
1324 case DWC_OTG_CONTROL_DATA:
1325 DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
1327 * Hardware bug: small IN packets with length < 4
1328 * cause a 4-byte write to memory. We can only catch
1329 * the case where we know a short packet is going to be
1330 * returned in a control transfer, as the length is
1331 * specified in the setup packet. This is only an issue
1332 * for drivers that insist on packing a device's various
1333 * properties into a struct and querying them one at a
1335 * Force the use of align_buf so that the subsequent
1336 * memcpy puts the right number of bytes in the URB's
1339 wLength = ((uint16_t *)urb->setup_packet)[3];
1340 if (hc->ep_is_in && wLength < 4)
1341 ptr = hc->xfer_buff;
1343 hc->data_pid_start = qtd->data_toggle;
1345 case DWC_OTG_CONTROL_STATUS:
1347 * Direction is opposite of data direction or IN if no
1350 DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n");
1351 if (urb->length == 0) {
1355 dwc_otg_hcd_is_pipe_out(&urb->pipe_info);
1361 hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
1364 if (hcd->core_if->dma_enable) {
1365 hc->xfer_buff = (uint8_t *) hcd->status_buf_dma;
1367 hc->xfer_buff = (uint8_t *) hcd->status_buf;
1374 hc->ep_type = DWC_OTG_EP_TYPE_BULK;
1377 hc->ep_type = DWC_OTG_EP_TYPE_INTR;
1379 case UE_ISOCHRONOUS:
1381 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
1383 hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
1385 if (hcd->core_if->dma_desc_enable)
1388 frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
1390 frame_desc->status = 0;
1392 if (hcd->core_if->dma_enable) {
1393 hc->xfer_buff = (uint8_t *) urb->dma;
1395 hc->xfer_buff = (uint8_t *) urb->buf;
1398 frame_desc->offset + qtd->isoc_split_offset;
1400 frame_desc->length - qtd->isoc_split_offset;
1402 /* For non-dword aligned buffers */
1403 if (((unsigned long)hc->xfer_buff & 0x3)
1404 && hcd->core_if->dma_enable) {
1406 (uint8_t *) urb->buf + frame_desc->offset +
1407 qtd->isoc_split_offset;
1411 if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
1412 if (hc->xfer_len <= 188) {
1413 hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
1416 DWC_HCSPLIT_XACTPOS_BEGIN;
1422 /* non DWORD-aligned buffer case */
1425 if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
1426 buf_size = hcd->core_if->core_params->max_transfer_size;
1430 if (!qh->dw_align_buf) {
1431 qh->dw_align_buf = DWC_DMA_ALLOC_ATOMIC(dev, buf_size,
1432 &qh->dw_align_buf_dma);
1433 if (!qh->dw_align_buf) {
1435 ("%s: Failed to allocate memory to handle "
1436 "non-dword aligned buffer case\n",
1441 if (!hc->ep_is_in) {
1442 dwc_memcpy(qh->dw_align_buf, ptr, hc->xfer_len);
1444 hc->align_buff = qh->dw_align_buf_dma;
1449 if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
1450 hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
1452 * This value may be modified when the transfer is started to
1453 * reflect the actual transfer length.
1455 hc->multi_count = dwc_hb_mult(qh->maxp);
1458 if (hcd->core_if->dma_desc_enable)
1459 hc->desc_list_addr = qh->desc_list_dma;
1461 dwc_otg_hc_init(hcd->core_if, hc);
1463 local_irq_save(flags);
1466 local_fiq_disable();
1467 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1470 /* Enable the top level host channel interrupt. */
1471 intr_enable = (1 << hc->hc_num);
1472 DWC_MODIFY_REG32(&hcd->core_if->host_if->host_global_regs->haintmsk, 0, intr_enable);
1474 /* Make sure host channel interrupts are enabled. */
1475 gintmsk.b.hcintr = 1;
1476 DWC_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
1479 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
1483 local_irq_restore(flags);
1489 * fiq_fsm_transaction_suitable() - Test a QH for compatibility with the FIQ
1490 * @hcd: Pointer to the dwc_otg_hcd struct
1491 * @qh: pointer to the endpoint's queue head
1493 * Transaction start/end control flow is grafted onto the existing dwc_otg
1494 * mechanisms, to avoid spaghettifying the functions more than they already are.
1495 * This function's eligibility check is altered by debug parameter.
1497 * Returns: 0 for unsuitable, 1 implies the FIQ can be enabled for this transaction.
1500 int fiq_fsm_transaction_suitable(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
1503 switch (qh->ep_type) {
1506 if (fiq_fsm_mask & (1 << 0))
1510 case UE_ISOCHRONOUS:
1511 if (fiq_fsm_mask & (1 << 1))
1517 } else if (qh->ep_type == UE_ISOCHRONOUS) {
1518 if (fiq_fsm_mask & (1 << 2)) {
1519 /* ISOCH support. We test for compatibility:
1520 * - DWORD aligned buffers
1521 * - Must be at least 2 transfers (otherwise pointless to use the FIQ)
1522 * If yes, then the fsm enqueue function will handle the state machine setup.
1524 dwc_otg_qtd_t *qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
1525 dwc_otg_hcd_urb_t *urb = qtd->urb;
1529 if (urb->packet_count < 2)
1531 for (i = 0; i < urb->packet_count; i++) {
1532 ptr = urb->dma + urb->iso_descs[i].offset;
1543 * fiq_fsm_setup_periodic_dma() - Set up DMA bounce buffers
1544 * @hcd: Pointer to the dwc_otg_hcd struct
1545 * @qh: Pointer to the endpoint's queue head
1547 * Periodic split transactions are transmitted modulo 188 bytes.
1548 * This necessitates slicing data up into buckets for isochronous out
1549 * and fixing up the DMA address for all IN transfers.
1551 * Returns 1 if the DMA bounce buffers have been used, 0 if the default
1552 * HC buffer has been used.
1554 int fiq_fsm_setup_periodic_dma(dwc_otg_hcd_t *hcd, struct fiq_channel_state *st, dwc_otg_qh_t *qh)
1556 int frame_length, i = 0;
1557 uint8_t *ptr = NULL;
1558 dwc_hc_t *hc = qh->channel;
1559 struct fiq_dma_blob *blob;
1560 struct dwc_otg_hcd_iso_packet_desc *frame_desc;
1562 for (i = 0; i < 6; i++) {
1563 st->dma_info.slot_len[i] = 255;
1565 st->dma_info.index = 0;
1569 * Set dma_regs to bounce buffer. FIQ will update the
1570 * state depending on transaction progress.
1571 * Pointer arithmetic on hcd->fiq_state->dma_base (a dma_addr_t)
1572 * to point it to the correct offset in the allocated buffers.
1574 blob = (struct fiq_dma_blob *) hcd->fiq_state->dma_base;
1575 st->hcdma_copy.d32 = (dma_addr_t) blob->channel[hc->hc_num].index[0].buf;
1577 /* Calculate the max number of CSPLITS such that the FIQ can time out
1578 * a transaction if it fails.
1580 frame_length = st->hcchar_copy.b.mps;
1583 frame_length -= 188;
1584 } while (frame_length >= 0);
1588 if (qh->ep_type == UE_ISOCHRONOUS) {
1590 dwc_otg_qtd_t *qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
1592 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
1593 frame_length = frame_desc->length;
1595 /* Virtual address for bounce buffers */
1596 blob = hcd->fiq_dmab;
1598 ptr = qtd->urb->buf + frame_desc->offset;
1599 if (frame_length == 0) {
1601 * for isochronous transactions, we must still transmit a packet
1602 * even if the length is zero.
1604 st->dma_info.slot_len[0] = 0;
1608 if (frame_length <= 188) {
1609 dwc_memcpy(&blob->channel[hc->hc_num].index[i].buf[0], ptr, frame_length);
1610 st->dma_info.slot_len[i] = frame_length;
1611 ptr += frame_length;
1613 dwc_memcpy(&blob->channel[hc->hc_num].index[i].buf[0], ptr, 188);
1614 st->dma_info.slot_len[i] = 188;
1618 frame_length -= 188;
1619 } while (frame_length > 0);
1622 ptr = qtd->urb->buf + frame_desc->offset;
1624 * Point the HC at the DMA address of the bounce buffers
1626 * Pointer arithmetic on hcd->fiq_state->dma_base (a
1627 * dma_addr_t) to point it to the correct offset in the
1628 * allocated buffers.
1630 blob = (struct fiq_dma_blob *) hcd->fiq_state->dma_base;
1631 st->hcdma_copy.d32 = (dma_addr_t) blob->channel[hc->hc_num].index[0].buf;
1633 /* fixup xfersize to the actual packet size */
1634 st->hctsiz_copy.b.pid = 0;
1635 st->hctsiz_copy.b.xfersize = st->dma_info.slot_len[0];
1638 /* For interrupt, single OUT packet required, goes in the SSPLIT from hc_buff. */
1645 * fiq_fsm_np_tt_contended() - Avoid performing contended non-periodic transfers
1646 * @hcd: Pointer to the dwc_otg_hcd struct
1647 * @qh: Pointer to the endpoint's queue head
1649 * Certain hub chips don't differentiate between IN and OUT non-periodic pipes
1650 * with the same endpoint number. If transfers get completed out of order
1651 * (disregarding the direction token) then the hub can lock up
1652 * or return erroneous responses.
1654 * Returns 1 if initiating the transfer would cause contention, 0 otherwise.
1656 int fiq_fsm_np_tt_contended(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
1659 struct fiq_channel_state *st;
1660 int dev_addr = qh->channel->dev_addr;
1661 int ep_num = qh->channel->ep_num;
1662 for (i = 0; i < hcd->core_if->core_params->host_channels; i++) {
1663 if (i == qh->channel->hc_num)
1665 st = &hcd->fiq_state->channel[i];
1667 case FIQ_NP_SSPLIT_STARTED:
1668 case FIQ_NP_SSPLIT_RETRY:
1669 case FIQ_NP_SSPLIT_PENDING:
1670 case FIQ_NP_OUT_CSPLIT_RETRY:
1671 case FIQ_NP_IN_CSPLIT_RETRY:
1672 if (st->hcchar_copy.b.devaddr == dev_addr &&
1673 st->hcchar_copy.b.epnum == ep_num)
1684 * Pushing a periodic request into the queue near the EOF1 point
1685 * in a microframe causes erroneous behaviour (frmovrun) interrupt.
1686 * Usually, the request goes out on the bus causing a transfer but
1687 * the core does not transfer the data to memory.
1688 * This guard interval (in number of 60MHz clocks) is required which
1689 * must cater for CPU latency between reading the value and enabling
1692 #define PERIODIC_FRREM_BACKOFF 1000
1694 int fiq_fsm_queue_isoc_transaction(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
1696 dwc_hc_t *hc = qh->channel;
1697 dwc_otg_hc_regs_t *hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
1698 dwc_otg_qtd_t *qtd = DWC_CIRCLEQ_FIRST(&qh->qtd_list);
1700 struct fiq_channel_state *st = &hcd->fiq_state->channel[hc->hc_num];
1701 int xfer_len, nrpackets;
1705 if (st->fsm != FIQ_PASSTHROUGH)
1710 st->hcchar_copy.d32 = 0;
1711 st->hcchar_copy.b.mps = hc->max_packet;
1712 st->hcchar_copy.b.epdir = hc->ep_is_in;
1713 st->hcchar_copy.b.devaddr = hc->dev_addr;
1714 st->hcchar_copy.b.epnum = hc->ep_num;
1715 st->hcchar_copy.b.eptype = hc->ep_type;
1717 st->hcintmsk_copy.b.chhltd = 1;
1719 frame = dwc_otg_hcd_get_frame_number(hcd);
1720 st->hcchar_copy.b.oddfrm = (frame & 0x1) ? 0 : 1;
1722 st->hcchar_copy.b.lspddev = 0;
1723 /* Enable the channel later as a final register write. */
1725 st->hcsplt_copy.d32 = 0;
1727 st->hs_isoc_info.iso_desc = (struct dwc_otg_hcd_iso_packet_desc *) &qtd->urb->iso_descs;
1728 st->hs_isoc_info.nrframes = qtd->urb->packet_count;
1729 /* grab the next DMA address offset from the array */
1730 st->hcdma_copy.d32 = qtd->urb->dma;
1731 hcdma.d32 = st->hcdma_copy.d32 + st->hs_isoc_info.iso_desc[0].offset;
1733 /* We need to set multi_count. This is a bit tricky - has to be set per-transaction as
1734 * the core needs to be told to send the correct number. Caution: for IN transfers,
1735 * this is always set to the maximum size of the endpoint. */
1736 xfer_len = st->hs_isoc_info.iso_desc[0].length;
1737 nrpackets = (xfer_len + st->hcchar_copy.b.mps - 1) / st->hcchar_copy.b.mps;
1740 st->hcchar_copy.b.multicnt = nrpackets;
1741 st->hctsiz_copy.b.pktcnt = nrpackets;
1743 /* Initial PID also needs to be set */
1744 if (st->hcchar_copy.b.epdir == 0) {
1745 st->hctsiz_copy.b.xfersize = xfer_len;
1746 switch (st->hcchar_copy.b.multicnt) {
1748 st->hctsiz_copy.b.pid = DWC_PID_DATA0;
1752 st->hctsiz_copy.b.pid = DWC_PID_MDATA;
1757 st->hctsiz_copy.b.xfersize = nrpackets * st->hcchar_copy.b.mps;
1758 switch (st->hcchar_copy.b.multicnt) {
1760 st->hctsiz_copy.b.pid = DWC_PID_DATA0;
1763 st->hctsiz_copy.b.pid = DWC_PID_DATA1;
1766 st->hctsiz_copy.b.pid = DWC_PID_DATA2;
1771 st->hs_isoc_info.stride = qh->interval;
1772 st->uframe_sleeps = 0;
1774 fiq_print(FIQDBG_INT, hcd->fiq_state, "FSMQ %01d ", hc->hc_num);
1775 fiq_print(FIQDBG_INT, hcd->fiq_state, "%08x", st->hcchar_copy.d32);
1776 fiq_print(FIQDBG_INT, hcd->fiq_state, "%08x", st->hctsiz_copy.d32);
1777 fiq_print(FIQDBG_INT, hcd->fiq_state, "%08x", st->hcdma_copy.d32);
1778 hfnum.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
1779 local_fiq_disable();
1780 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1781 DWC_WRITE_REG32(&hc_regs->hctsiz, st->hctsiz_copy.d32);
1782 DWC_WRITE_REG32(&hc_regs->hcsplt, st->hcsplt_copy.d32);
1783 DWC_WRITE_REG32(&hc_regs->hcdma, st->hcdma_copy.d32);
1784 DWC_WRITE_REG32(&hc_regs->hcchar, st->hcchar_copy.d32);
1785 DWC_WRITE_REG32(&hc_regs->hcintmsk, st->hcintmsk_copy.d32);
1786 if (hfnum.b.frrem < PERIODIC_FRREM_BACKOFF) {
1787 /* Prevent queueing near EOF1. Bad things happen if a periodic
1788 * split transaction is queued very close to EOF. SOF interrupt handler
1789 * will wake this channel at the next interrupt.
1791 st->fsm = FIQ_HS_ISOC_SLEEPING;
1792 st->uframe_sleeps = 1;
1794 st->fsm = FIQ_HS_ISOC_TURBO;
1795 st->hcchar_copy.b.chen = 1;
1796 DWC_WRITE_REG32(&hc_regs->hcchar, st->hcchar_copy.d32);
1799 st->hcchar_copy.b.chen = 0;
1800 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
1807 * fiq_fsm_queue_split_transaction() - Set up a host channel and FIQ state
1808 * @hcd: Pointer to the dwc_otg_hcd struct
1809 * @qh: Pointer to the endpoint's queue head
1811 * This overrides the dwc_otg driver's normal method of queueing a transaction.
1812 * Called from dwc_otg_hcd_queue_transactions(), this performs specific setup
1813 * for the nominated host channel.
1815 * For periodic transfers, it also peeks at the FIQ state to see if an immediate
1816 * start is possible. If not, then the FIQ is left to start the transfer.
1818 int fiq_fsm_queue_split_transaction(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh)
1820 int start_immediate = 1, i;
1822 dwc_hc_t *hc = qh->channel;
1823 dwc_otg_hc_regs_t *hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num];
1824 /* Program HC registers, setup FIQ_state, examine FIQ if periodic, start transfer (not if uframe 5) */
1825 int hub_addr, port_addr, frame, uframe;
1826 struct fiq_channel_state *st = &hcd->fiq_state->channel[hc->hc_num];
1829 * Non-periodic channel assignments stay in the non_periodic_active queue.
1830 * Therefore we get repeatedly called until the FIQ's done processing this channel.
1832 if (qh->channel->xfer_started == 1)
1835 if (st->fsm != FIQ_PASSTHROUGH) {
1836 pr_warn_ratelimited("%s:%d: Queue called for an active channel\n", __func__, __LINE__);
1840 qh->channel->xfer_started = 1;
1844 st->hcchar_copy.d32 = 0;
1845 st->hcchar_copy.b.mps = min_t(uint32_t, hc->xfer_len, hc->max_packet);
1846 st->hcchar_copy.b.epdir = hc->ep_is_in;
1847 st->hcchar_copy.b.devaddr = hc->dev_addr;
1848 st->hcchar_copy.b.epnum = hc->ep_num;
1849 st->hcchar_copy.b.eptype = hc->ep_type;
1850 if (hc->ep_type & 0x1) {
1852 st->hcchar_copy.b.multicnt = 3;
1854 /* Docs say set this to 1, but driver sets to 0! */
1855 st->hcchar_copy.b.multicnt = 0;
1857 st->hcchar_copy.b.multicnt = 1;
1858 st->hcchar_copy.b.oddfrm = 0;
1860 st->hcchar_copy.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW) ? 1 : 0;
1861 /* Enable the channel later as a final register write. */
1863 st->hcsplt_copy.d32 = 0;
1865 hcd->fops->hub_info(hcd, DWC_CIRCLEQ_FIRST(&qh->qtd_list)->urb->priv, &hub_addr, &port_addr);
1866 st->hcsplt_copy.b.compsplt = 0;
1867 st->hcsplt_copy.b.spltena = 1;
1868 // XACTPOS is for isoc-out only but needs initialising anyway.
1869 st->hcsplt_copy.b.xactpos = ISOC_XACTPOS_ALL;
1870 if((qh->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!qh->ep_is_in)) {
1871 /* For packetsize 0 < L < 188, ISOC_XACTPOS_ALL.
1872 * for longer than this, ISOC_XACTPOS_BEGIN and the FIQ
1873 * will update as necessary.
1875 if (hc->xfer_len > 188) {
1876 st->hcsplt_copy.b.xactpos = ISOC_XACTPOS_BEGIN;
1879 st->hcsplt_copy.b.hubaddr = (uint8_t) hub_addr;
1880 st->hcsplt_copy.b.prtaddr = (uint8_t) port_addr;
1881 st->hub_addr = hub_addr;
1882 st->port_addr = port_addr;
1885 st->hctsiz_copy.d32 = 0;
1886 st->hctsiz_copy.b.dopng = 0;
1887 st->hctsiz_copy.b.pid = hc->data_pid_start;
1889 if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
1890 hc->xfer_len = min_t(uint32_t, hc->xfer_len, hc->max_packet);
1891 } else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
1894 st->hctsiz_copy.b.xfersize = hc->xfer_len;
1896 st->hctsiz_copy.b.pktcnt = 1;
1898 if (hc->ep_type & 0x1) {
1900 * For potentially multi-packet transfers, must use the DMA bounce buffers. For IN transfers,
1901 * the DMA address is the address of the first 188byte slot buffer in the bounce buffer array.
1902 * For multi-packet OUT transfers, we need to copy the data into the bounce buffer array so the FIQ can punt
1903 * the right address out as necessary. hc->xfer_buff and hc->xfer_len have already been set
1904 * in assign_and_init_hc(), but this is for the eventual transaction completion only. The FIQ
1905 * must not touch internal driver state.
1907 if(!fiq_fsm_setup_periodic_dma(hcd, st, qh)) {
1908 if (hc->align_buff) {
1909 st->hcdma_copy.d32 = hc->align_buff;
1911 st->hcdma_copy.d32 = ((unsigned long) hc->xfer_buff & 0xFFFFFFFF);
1915 if (hc->align_buff) {
1916 st->hcdma_copy.d32 = hc->align_buff;
1918 st->hcdma_copy.d32 = ((unsigned long) hc->xfer_buff & 0xFFFFFFFF);
1921 /* The FIQ depends upon no other interrupts being enabled except channel halt.
1922 * Fixup channel interrupt mask. */
1923 st->hcintmsk_copy.d32 = 0;
1924 st->hcintmsk_copy.b.chhltd = 1;
1925 st->hcintmsk_copy.b.ahberr = 1;
1927 /* Hack courtesy of FreeBSD: apparently forcing Interrupt Split transactions
1928 * as Control puts the transfer into the non-periodic request queue and the
1929 * non-periodic handler in the hub. Makes things lots easier.
1931 if ((fiq_fsm_mask & 0x8) && hc->ep_type == UE_INTERRUPT) {
1932 st->hcchar_copy.b.multicnt = 0;
1933 st->hcchar_copy.b.oddfrm = 0;
1934 st->hcchar_copy.b.eptype = UE_CONTROL;
1935 if (hc->align_buff) {
1936 st->hcdma_copy.d32 = hc->align_buff;
1938 st->hcdma_copy.d32 = ((unsigned long) hc->xfer_buff & 0xFFFFFFFF);
1941 DWC_WRITE_REG32(&hc_regs->hcdma, st->hcdma_copy.d32);
1942 DWC_WRITE_REG32(&hc_regs->hctsiz, st->hctsiz_copy.d32);
1943 DWC_WRITE_REG32(&hc_regs->hcsplt, st->hcsplt_copy.d32);
1944 DWC_WRITE_REG32(&hc_regs->hcchar, st->hcchar_copy.d32);
1945 DWC_WRITE_REG32(&hc_regs->hcintmsk, st->hcintmsk_copy.d32);
1947 local_fiq_disable();
1948 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
1950 if (hc->ep_type & 0x1) {
1951 hfnum.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hfnum);
1952 frame = (hfnum.b.frnum & ~0x7) >> 3;
1953 uframe = hfnum.b.frnum & 0x7;
1954 if (hfnum.b.frrem < PERIODIC_FRREM_BACKOFF) {
1955 /* Prevent queueing near EOF1. Bad things happen if a periodic
1956 * split transaction is queued very close to EOF.
1958 start_immediate = 0;
1959 } else if (uframe == 5) {
1960 start_immediate = 0;
1961 } else if (hc->ep_type == UE_ISOCHRONOUS && !hc->ep_is_in) {
1962 start_immediate = 0;
1963 } else if (hc->ep_is_in && fiq_fsm_too_late(hcd->fiq_state, hc->hc_num)) {
1964 start_immediate = 0;
1966 /* Search through all host channels to determine if a transaction
1967 * is currently in progress */
1968 for (i = 0; i < hcd->core_if->core_params->host_channels; i++) {
1969 if (i == hc->hc_num || hcd->fiq_state->channel[i].fsm == FIQ_PASSTHROUGH)
1971 switch (hcd->fiq_state->channel[i].fsm) {
1972 /* TT is reserved for channels that are in the middle of a periodic
1973 * split transaction.
1975 case FIQ_PER_SSPLIT_STARTED:
1976 case FIQ_PER_CSPLIT_WAIT:
1977 case FIQ_PER_CSPLIT_NYET1:
1978 case FIQ_PER_CSPLIT_POLL:
1979 case FIQ_PER_ISO_OUT_ACTIVE:
1980 case FIQ_PER_ISO_OUT_LAST:
1981 if (hcd->fiq_state->channel[i].hub_addr == hub_addr &&
1982 hcd->fiq_state->channel[i].port_addr == port_addr) {
1983 start_immediate = 0;
1989 if (!start_immediate)
1994 if ((fiq_fsm_mask & 0x8) && hc->ep_type == UE_INTERRUPT)
1995 start_immediate = 1;
1997 fiq_print(FIQDBG_INT, hcd->fiq_state, "FSMQ %01d %01d", hc->hc_num, start_immediate);
1998 fiq_print(FIQDBG_INT, hcd->fiq_state, "%08d", hfnum.b.frrem);
1999 //fiq_print(FIQDBG_INT, hcd->fiq_state, "H:%02dP:%02d", hub_addr, port_addr);
2000 //fiq_print(FIQDBG_INT, hcd->fiq_state, "%08x", st->hctsiz_copy.d32);
2001 //fiq_print(FIQDBG_INT, hcd->fiq_state, "%08x", st->hcdma_copy.d32);
2002 switch (hc->ep_type) {
2005 if (fiq_fsm_np_tt_contended(hcd, qh)) {
2006 st->fsm = FIQ_NP_SSPLIT_PENDING;
2007 start_immediate = 0;
2009 st->fsm = FIQ_NP_SSPLIT_STARTED;
2012 case UE_ISOCHRONOUS:
2014 if (start_immediate) {
2015 st->fsm = FIQ_PER_SSPLIT_STARTED;
2017 st->fsm = FIQ_PER_SSPLIT_QUEUED;
2020 if (start_immediate) {
2021 /* Single-isoc OUT packets don't require FIQ involvement */
2022 if (st->nrpackets == 1) {
2023 st->fsm = FIQ_PER_ISO_OUT_LAST;
2025 st->fsm = FIQ_PER_ISO_OUT_ACTIVE;
2028 st->fsm = FIQ_PER_ISO_OUT_PENDING;
2033 if (fiq_fsm_mask & 0x8) {
2034 if (fiq_fsm_np_tt_contended(hcd, qh)) {
2035 st->fsm = FIQ_NP_SSPLIT_PENDING;
2036 start_immediate = 0;
2038 st->fsm = FIQ_NP_SSPLIT_STARTED;
2040 } else if (start_immediate) {
2041 st->fsm = FIQ_PER_SSPLIT_STARTED;
2043 st->fsm = FIQ_PER_SSPLIT_QUEUED;
2048 if (start_immediate) {
2049 /* Set the oddfrm bit as close as possible to actual queueing */
2050 frame = dwc_otg_hcd_get_frame_number(hcd);
2051 st->expected_uframe = (frame + 1) & 0x3FFF;
2052 st->hcchar_copy.b.oddfrm = (frame & 0x1) ? 0 : 1;
2053 st->hcchar_copy.b.chen = 1;
2054 DWC_WRITE_REG32(&hc_regs->hcchar, st->hcchar_copy.d32);
2057 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
2064 * This function selects transactions from the HCD transfer schedule and
2065 * assigns them to available host channels. It is called from HCD interrupt
2066 * handler functions.
2068 * @param hcd The HCD state structure.
2070 * @return The types of new transactions that were assigned to host channels.
2072 dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t * hcd)
2074 dwc_list_link_t *qh_ptr;
2077 dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
2079 #ifdef DEBUG_HOST_CHANNELS
2080 last_sel_trans_num_per_scheduled = 0;
2081 last_sel_trans_num_nonper_scheduled = 0;
2082 last_sel_trans_num_avail_hc_at_start = hcd->available_host_channels;
2083 #endif /* DEBUG_HOST_CHANNELS */
2085 /* Process entries in the periodic ready list. */
2086 qh_ptr = DWC_LIST_FIRST(&hcd->periodic_sched_ready);
2088 while (qh_ptr != &hcd->periodic_sched_ready &&
2089 !DWC_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
2091 qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2093 if (microframe_schedule) {
2094 // Make sure we leave one channel for non periodic transactions.
2095 if (hcd->available_host_channels <= 1) {
2098 hcd->available_host_channels--;
2099 #ifdef DEBUG_HOST_CHANNELS
2100 last_sel_trans_num_per_scheduled++;
2101 #endif /* DEBUG_HOST_CHANNELS */
2103 qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2104 assign_and_init_hc(hcd, qh);
2107 * Move the QH from the periodic ready schedule to the
2108 * periodic assigned schedule.
2110 qh_ptr = DWC_LIST_NEXT(qh_ptr);
2111 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
2112 &qh->qh_list_entry);
2116 * Process entries in the inactive portion of the non-periodic
2117 * schedule. Some free host channels may not be used if they are
2118 * reserved for periodic transfers.
2120 qh_ptr = hcd->non_periodic_sched_inactive.next;
2121 num_channels = hcd->core_if->core_params->host_channels;
2122 while (qh_ptr != &hcd->non_periodic_sched_inactive &&
2123 (microframe_schedule || hcd->non_periodic_channels <
2124 num_channels - hcd->periodic_channels) &&
2125 !DWC_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
2127 qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2129 * Check to see if this is a NAK'd retransmit, in which case ignore for retransmission
2130 * we hold off on bulk retransmissions to reduce NAK interrupt overhead for full-speed
2131 * cheeky devices that just hold off using NAKs
2133 if (fiq_enable && nak_holdoff && qh->do_split) {
2134 if (qh->nak_frame != 0xffff) {
2135 uint16_t next_frame = dwc_frame_num_inc(qh->nak_frame, (qh->ep_type == UE_BULK) ? nak_holdoff : 8);
2136 uint16_t frame = dwc_otg_hcd_get_frame_number(hcd);
2137 if (dwc_frame_num_le(frame, next_frame)) {
2138 if(dwc_frame_num_le(next_frame, hcd->fiq_state->next_sched_frame)) {
2139 hcd->fiq_state->next_sched_frame = next_frame;
2141 qh_ptr = DWC_LIST_NEXT(qh_ptr);
2144 qh->nak_frame = 0xFFFF;
2149 if (microframe_schedule) {
2150 if (hcd->available_host_channels < 1) {
2153 hcd->available_host_channels--;
2154 #ifdef DEBUG_HOST_CHANNELS
2155 last_sel_trans_num_nonper_scheduled++;
2156 #endif /* DEBUG_HOST_CHANNELS */
2159 assign_and_init_hc(hcd, qh);
2162 * Move the QH from the non-periodic inactive schedule to the
2163 * non-periodic active schedule.
2165 qh_ptr = DWC_LIST_NEXT(qh_ptr);
2166 DWC_LIST_MOVE_HEAD(&hcd->non_periodic_sched_active,
2167 &qh->qh_list_entry);
2169 if (!microframe_schedule)
2170 hcd->non_periodic_channels++;
2172 /* we moved a non-periodic QH to the active schedule. If the inactive queue is empty,
2173 * stop the FIQ from kicking us. We could potentially still have elements here if we
2174 * ran out of host channels.
2177 if (DWC_LIST_EMPTY(&hcd->non_periodic_sched_inactive)) {
2178 hcd->fiq_state->kick_np_queues = 0;
2180 /* For each entry remaining in the NP inactive queue,
2181 * if this a NAK'd retransmit then don't set the kick flag.
2184 DWC_LIST_FOREACH(qh_ptr, &hcd->non_periodic_sched_inactive) {
2185 qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2186 if (qh->nak_frame == 0xFFFF) {
2187 hcd->fiq_state->kick_np_queues = 1;
2193 if(!DWC_LIST_EMPTY(&hcd->periodic_sched_assigned))
2194 ret_val |= DWC_OTG_TRANSACTION_PERIODIC;
2196 if(!DWC_LIST_EMPTY(&hcd->non_periodic_sched_active))
2197 ret_val |= DWC_OTG_TRANSACTION_NON_PERIODIC;
2200 #ifdef DEBUG_HOST_CHANNELS
2201 last_sel_trans_num_avail_hc_at_end = hcd->available_host_channels;
2202 #endif /* DEBUG_HOST_CHANNELS */
2207 * Attempts to queue a single transaction request for a host channel
2208 * associated with either a periodic or non-periodic transfer. This function
2209 * assumes that there is space available in the appropriate request queue. For
2210 * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
2211 * is available in the appropriate Tx FIFO.
2213 * @param hcd The HCD state structure.
2214 * @param hc Host channel descriptor associated with either a periodic or
2215 * non-periodic transfer.
2216 * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx
2217 * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
2220 * @return 1 if a request is queued and more requests may be needed to
2221 * complete the transfer, 0 if no more requests are required for this
2222 * transfer, -1 if there is insufficient space in the Tx FIFO.
2224 static int queue_transaction(dwc_otg_hcd_t * hcd,
2225 dwc_hc_t * hc, uint16_t fifo_dwords_avail)
2229 if (hcd->core_if->dma_enable) {
2230 if (hcd->core_if->dma_desc_enable) {
2231 if (!hc->xfer_started
2232 || (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) {
2233 dwc_otg_hcd_start_xfer_ddma(hcd, hc->qh);
2234 hc->qh->ping_state = 0;
2236 } else if (!hc->xfer_started) {
2237 if (fiq_fsm_enable && hc->error_state) {
2238 hcd->fiq_state->channel[hc->hc_num].nr_errors =
2239 DWC_CIRCLEQ_FIRST(&hc->qh->qtd_list)->error_count;
2240 hcd->fiq_state->channel[hc->hc_num].fsm =
2241 FIQ_PASSTHROUGH_ERRORSTATE;
2243 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2244 hc->qh->ping_state = 0;
2247 } else if (hc->halt_pending) {
2248 /* Don't queue a request if the channel has been halted. */
2250 } else if (hc->halt_on_queue) {
2251 dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status);
2253 } else if (hc->do_ping) {
2254 if (!hc->xfer_started) {
2255 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2258 } else if (!hc->ep_is_in || hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
2259 if ((fifo_dwords_avail * 4) >= hc->max_packet) {
2260 if (!hc->xfer_started) {
2261 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2265 dwc_otg_hc_continue_transfer(hcd->core_if,
2272 if (!hc->xfer_started) {
2273 dwc_otg_hc_start_transfer(hcd->core_if, hc);
2276 retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc);
2284 * Processes periodic channels for the next frame and queues transactions for
2285 * these channels to the DWC_otg controller. After queueing transactions, the
2286 * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
2287 * to queue as Periodic Tx FIFO or request queue space becomes available.
2288 * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
2290 static void process_periodic_channels(dwc_otg_hcd_t * hcd)
2292 hptxsts_data_t tx_status;
2293 dwc_list_link_t *qh_ptr;
2296 int no_queue_space = 0;
2297 int no_fifo_space = 0;
2299 dwc_otg_host_global_regs_t *host_regs;
2300 host_regs = hcd->core_if->host_if->host_global_regs;
2302 DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
2304 tx_status.d32 = DWC_READ_REG32(&host_regs->hptxsts);
2305 DWC_DEBUGPL(DBG_HCDV,
2306 " P Tx Req Queue Space Avail (before queue): %d\n",
2307 tx_status.b.ptxqspcavail);
2308 DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n",
2309 tx_status.b.ptxfspcavail);
2312 qh_ptr = hcd->periodic_sched_assigned.next;
2313 while (qh_ptr != &hcd->periodic_sched_assigned) {
2314 tx_status.d32 = DWC_READ_REG32(&host_regs->hptxsts);
2315 if (tx_status.b.ptxqspcavail == 0) {
2320 qh = DWC_LIST_ENTRY(qh_ptr, dwc_otg_qh_t, qh_list_entry);
2322 // Do not send a split start transaction any later than frame .6
2323 // Note, we have to schedule a periodic in .5 to make it go in .6
2324 if(fiq_fsm_enable && qh->do_split && ((dwc_otg_hcd_get_frame_number(hcd) + 1) & 7) > 6)
2326 qh_ptr = qh_ptr->next;
2327 hcd->fiq_state->next_sched_frame = dwc_otg_hcd_get_frame_number(hcd) | 7;
2331 if (fiq_fsm_enable && fiq_fsm_transaction_suitable(hcd, qh)) {
2333 fiq_fsm_queue_split_transaction(hcd, qh);
2335 fiq_fsm_queue_isoc_transaction(hcd, qh);
2339 * Set a flag if we're queueing high-bandwidth in slave mode.
2340 * The flag prevents any halts to get into the request queue in
2341 * the middle of multiple high-bandwidth packets getting queued.
2343 if (!hcd->core_if->dma_enable && qh->channel->multi_count > 1) {
2344 hcd->core_if->queuing_high_bandwidth = 1;
2346 status = queue_transaction(hcd, qh->channel,
2347 tx_status.b.ptxfspcavail);
2355 * In Slave mode, stay on the current transfer until there is
2356 * nothing more to do or the high-bandwidth request count is
2357 * reached. In DMA mode, only need to queue one request. The
2358 * controller automatically handles multiple packets for
2359 * high-bandwidth transfers.
2361 if (hcd->core_if->dma_enable || status == 0 ||
2362 qh->channel->requests == qh->channel->multi_count) {
2363 qh_ptr = qh_ptr->next;
2365 * Move the QH from the periodic assigned schedule to
2366 * the periodic queued schedule.
2368 DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_queued,
2369 &qh->qh_list_entry);
2371 /* done queuing high bandwidth */
2372 hcd->core_if->queuing_high_bandwidth = 0;
2376 if (!hcd->core_if->dma_enable) {
2377 dwc_otg_core_global_regs_t *global_regs;
2378 gintmsk_data_t intr_mask = {.d32 = 0 };
2380 global_regs = hcd->core_if->core_global_regs;
2381 intr_mask.b.ptxfempty = 1;
2383 tx_status.d32 = DWC_READ_REG32(&host_regs->hptxsts);
2384 DWC_DEBUGPL(DBG_HCDV,
2385 " P Tx Req Queue Space Avail (after queue): %d\n",
2386 tx_status.b.ptxqspcavail);
2387 DWC_DEBUGPL(DBG_HCDV,
2388 " P Tx FIFO Space Avail (after queue): %d\n",
2389 tx_status.b.ptxfspcavail);
2391 if (!DWC_LIST_EMPTY(&hcd->periodic_sched_assigned) ||
2392 no_queue_space || no_fifo_space) {
2394 * May need to queue more transactions as the request
2395 * queue or Tx FIFO empties. Enable the periodic Tx
2396 * FIFO empty interrupt. (Always use the half-empty
2397 * level to ensure that new requests are loaded as
2398 * soon as possible.)
2400 DWC_MODIFY_REG32(&global_regs->gintmsk, 0,
2404 * Disable the Tx FIFO empty interrupt since there are
2405 * no more transactions that need to be queued right
2406 * now. This function is called from interrupt
2407 * handlers to queue more transactions as transfer
2410 DWC_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32,
2417 * Processes active non-periodic channels and queues transactions for these
2418 * channels to the DWC_otg controller. After queueing transactions, the NP Tx
2419 * FIFO Empty interrupt is enabled if there are more transactions to queue as
2420 * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
2421 * FIFO Empty interrupt is disabled.
2423 static void process_non_periodic_channels(dwc_otg_hcd_t * hcd)
2425 gnptxsts_data_t tx_status;
2426 dwc_list_link_t *orig_qh_ptr;
2429 int no_queue_space = 0;
2430 int no_fifo_space = 0;
2433 dwc_otg_core_global_regs_t *global_regs =
2434 hcd->core_if->core_global_regs;
2436 DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
2438 tx_status.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
2439 DWC_DEBUGPL(DBG_HCDV,
2440 " NP Tx Req Queue Space Avail (before queue): %d\n",
2441 tx_status.b.nptxqspcavail);
2442 DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
2443 tx_status.b.nptxfspcavail);
2446 * Keep track of the starting point. Skip over the start-of-list
2449 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2450 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2452 orig_qh_ptr = hcd->non_periodic_qh_ptr;
2455 * Process once through the active list or until no more space is
2456 * available in the request queue or the Tx FIFO.
2459 tx_status.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
2460 if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
2465 qh = DWC_LIST_ENTRY(hcd->non_periodic_qh_ptr, dwc_otg_qh_t,
2468 if(fiq_fsm_enable && fiq_fsm_transaction_suitable(hcd, qh)) {
2469 fiq_fsm_queue_split_transaction(hcd, qh);
2471 status = queue_transaction(hcd, qh->channel,
2472 tx_status.b.nptxfspcavail);
2476 } else if (status < 0) {
2481 /* Advance to next QH, skipping start-of-list entry. */
2482 hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
2483 if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) {
2484 hcd->non_periodic_qh_ptr =
2485 hcd->non_periodic_qh_ptr->next;
2488 } while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
2490 if (!hcd->core_if->dma_enable) {
2491 gintmsk_data_t intr_mask = {.d32 = 0 };
2492 intr_mask.b.nptxfempty = 1;
2495 tx_status.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
2496 DWC_DEBUGPL(DBG_HCDV,
2497 " NP Tx Req Queue Space Avail (after queue): %d\n",
2498 tx_status.b.nptxqspcavail);
2499 DWC_DEBUGPL(DBG_HCDV,
2500 " NP Tx FIFO Space Avail (after queue): %d\n",
2501 tx_status.b.nptxfspcavail);
2503 if (more_to_do || no_queue_space || no_fifo_space) {
2505 * May need to queue more transactions as the request
2506 * queue or Tx FIFO empties. Enable the non-periodic
2507 * Tx FIFO empty interrupt. (Always use the half-empty
2508 * level to ensure that new requests are loaded as
2509 * soon as possible.)
2511 DWC_MODIFY_REG32(&global_regs->gintmsk, 0,
2515 * Disable the Tx FIFO empty interrupt since there are
2516 * no more transactions that need to be queued right
2517 * now. This function is called from interrupt
2518 * handlers to queue more transactions as transfer
2521 DWC_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32,
2528 * This function processes the currently active host channels and queues
2529 * transactions for these channels to the DWC_otg controller. It is called
2530 * from HCD interrupt handler functions.
2532 * @param hcd The HCD state structure.
2533 * @param tr_type The type(s) of transactions to queue (non-periodic,
2534 * periodic, or both).
2536 void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t * hcd,
2537 dwc_otg_transaction_type_e tr_type)
2540 DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
2542 /* Process host channels associated with periodic transfers. */
2543 if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC ||
2544 tr_type == DWC_OTG_TRANSACTION_ALL) &&
2545 !DWC_LIST_EMPTY(&hcd->periodic_sched_assigned)) {
2547 process_periodic_channels(hcd);
2550 /* Process host channels associated with non-periodic transfers. */
2551 if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC ||
2552 tr_type == DWC_OTG_TRANSACTION_ALL) {
2553 if (!DWC_LIST_EMPTY(&hcd->non_periodic_sched_active)) {
2554 process_non_periodic_channels(hcd);
2557 * Ensure NP Tx FIFO empty interrupt is disabled when
2558 * there are no non-periodic transfers to process.
2560 gintmsk_data_t gintmsk = {.d32 = 0 };
2561 gintmsk.b.nptxfempty = 1;
2564 local_fiq_disable();
2565 fiq_fsm_spin_lock(&hcd->fiq_state->lock);
2566 DWC_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
2567 fiq_fsm_spin_unlock(&hcd->fiq_state->lock);
2570 DWC_MODIFY_REG32(&hcd->core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
2576 #ifdef DWC_HS_ELECT_TST
2578 * Quick and dirty hack to implement the HS Electrical Test
2579 * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
2581 * This code was copied from our userspace app "hset". It sends a
2582 * Get Device Descriptor control sequence in two parts, first the
2583 * Setup packet by itself, followed some time later by the In and
2584 * Ack packets. Rather than trying to figure out how to add this
2585 * functionality to the normal driver code, we just hijack the
2586 * hardware, using these two function to drive the hardware
2590 static dwc_otg_core_global_regs_t *global_regs;
2591 static dwc_otg_host_global_regs_t *hc_global_regs;
2592 static dwc_otg_hc_regs_t *hc_regs;
2593 static uint32_t *data_fifo;
2595 static void do_setup(void)
2597 gintsts_data_t gintsts;
2598 hctsiz_data_t hctsiz;
2599 hcchar_data_t hcchar;
2604 DWC_WRITE_REG32(&hc_global_regs->haintmsk, 0x0001);
2607 DWC_WRITE_REG32(&hc_regs->hcintmsk, 0x04a3);
2610 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2613 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2616 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2619 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2622 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2625 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2628 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2631 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2634 * Send Setup packet (Get Device Descriptor)
2637 /* Make sure channel is disabled */
2638 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2639 if (hcchar.b.chen) {
2641 // hcchar.b.chen = 1;
2642 DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
2647 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2650 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2653 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2656 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2659 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2662 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2665 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2667 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2672 hctsiz.b.xfersize = 8;
2673 hctsiz.b.pktcnt = 1;
2674 hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
2675 DWC_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
2678 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2679 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
2684 DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
2686 /* Fill FIFO with Setup data for Get Device Descriptor */
2687 data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
2688 DWC_WRITE_REG32(data_fifo++, 0x01000680);
2689 DWC_WRITE_REG32(data_fifo++, 0x00080000);
2691 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2693 /* Wait for host channel interrupt */
2695 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2696 } while (gintsts.b.hcintr == 0);
2698 /* Disable HCINTs */
2699 DWC_WRITE_REG32(&hc_regs->hcintmsk, 0x0000);
2701 /* Disable HAINTs */
2702 DWC_WRITE_REG32(&hc_global_regs->haintmsk, 0x0000);
2705 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2708 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2711 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2714 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2717 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2720 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2723 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2726 static void do_in_ack(void)
2728 gintsts_data_t gintsts;
2729 hctsiz_data_t hctsiz;
2730 hcchar_data_t hcchar;
2733 host_grxsts_data_t grxsts;
2736 DWC_WRITE_REG32(&hc_global_regs->haintmsk, 0x0001);
2739 DWC_WRITE_REG32(&hc_regs->hcintmsk, 0x04a3);
2742 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2745 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2748 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2751 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2754 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2757 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2760 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2763 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2766 * Receive Control In packet
2769 /* Make sure channel is disabled */
2770 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2771 if (hcchar.b.chen) {
2774 DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
2779 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2782 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2785 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2788 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2791 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2794 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2797 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2799 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2804 hctsiz.b.xfersize = 8;
2805 hctsiz.b.pktcnt = 1;
2806 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
2807 DWC_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
2810 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2811 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
2816 DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
2818 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2820 /* Wait for receive status queue interrupt */
2822 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2823 } while (gintsts.b.rxstsqlvl == 0);
2826 grxsts.d32 = DWC_READ_REG32(&global_regs->grxstsp);
2828 /* Clear RXSTSQLVL in GINTSTS */
2830 gintsts.b.rxstsqlvl = 1;
2831 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2833 switch (grxsts.b.pktsts) {
2834 case DWC_GRXSTS_PKTSTS_IN:
2835 /* Read the data into the host buffer */
2836 if (grxsts.b.bcnt > 0) {
2838 int word_count = (grxsts.b.bcnt + 3) / 4;
2840 data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
2842 for (i = 0; i < word_count; i++) {
2843 (void)DWC_READ_REG32(data_fifo++);
2852 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2854 /* Wait for receive status queue interrupt */
2856 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2857 } while (gintsts.b.rxstsqlvl == 0);
2860 grxsts.d32 = DWC_READ_REG32(&global_regs->grxstsp);
2862 /* Clear RXSTSQLVL in GINTSTS */
2864 gintsts.b.rxstsqlvl = 1;
2865 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2867 switch (grxsts.b.pktsts) {
2868 case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
2875 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2877 /* Wait for host channel interrupt */
2879 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2880 } while (gintsts.b.hcintr == 0);
2883 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2886 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2889 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2892 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2895 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2898 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2901 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2908 * Send handshake packet
2912 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2915 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2918 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2921 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2924 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2927 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2930 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2932 /* Make sure channel is disabled */
2933 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2934 if (hcchar.b.chen) {
2937 DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
2942 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2945 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2948 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
2951 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2954 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
2957 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
2960 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
2962 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2967 hctsiz.b.xfersize = 0;
2968 hctsiz.b.pktcnt = 1;
2969 hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
2970 DWC_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
2973 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
2974 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
2979 DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
2981 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2983 /* Wait for host channel interrupt */
2985 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
2986 } while (gintsts.b.hcintr == 0);
2988 /* Disable HCINTs */
2989 DWC_WRITE_REG32(&hc_regs->hcintmsk, 0x0000);
2991 /* Disable HAINTs */
2992 DWC_WRITE_REG32(&hc_global_regs->haintmsk, 0x0000);
2995 haint.d32 = DWC_READ_REG32(&hc_global_regs->haint);
2998 hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
3001 hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
3004 DWC_WRITE_REG32(&hc_regs->hcint, hcint.d32);
3007 DWC_WRITE_REG32(&hc_global_regs->haint, haint.d32);
3010 DWC_WRITE_REG32(&global_regs->gintsts, gintsts.d32);
3013 gintsts.d32 = DWC_READ_REG32(&global_regs->gintsts);
3017 /** Handles hub class-specific requests. */
3018 int dwc_otg_hcd_hub_control(dwc_otg_hcd_t * dwc_otg_hcd,
3021 uint16_t wIndex, uint8_t * buf, uint16_t wLength)
3025 dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
3026 usb_hub_descriptor_t *hub_desc;
3027 hprt0_data_t hprt0 = {.d32 = 0 };
3029 uint32_t port_status;
3032 case UCR_CLEAR_HUB_FEATURE:
3033 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3034 "ClearHubFeature 0x%x\n", wValue);
3036 case UHF_C_HUB_LOCAL_POWER:
3037 case UHF_C_HUB_OVER_CURRENT:
3038 /* Nothing required here */
3041 retval = -DWC_E_INVALID;
3042 DWC_ERROR("DWC OTG HCD - "
3043 "ClearHubFeature request %xh unknown\n",
3047 case UCR_CLEAR_PORT_FEATURE:
3048 #ifdef CONFIG_USB_DWC_OTG_LPM
3049 if (wValue != UHF_PORT_L1)
3051 if (!wIndex || wIndex > 1)
3055 case UHF_PORT_ENABLE:
3056 DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
3057 "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
3058 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3060 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3062 case UHF_PORT_SUSPEND:
3063 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3064 "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
3066 if (core_if->power_down == 2) {
3067 dwc_otg_host_hibernation_restore(core_if, 0, 0);
3069 DWC_WRITE_REG32(core_if->pcgcctl, 0);
3072 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3074 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3075 hprt0.b.prtsusp = 0;
3076 /* Clear Resume bit */
3079 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3082 #ifdef CONFIG_USB_DWC_OTG_LPM
3085 pcgcctl_data_t pcgcctl = {.d32 = 0 };
3086 glpmcfg_data_t lpmcfg = {.d32 = 0 };
3089 DWC_READ_REG32(&core_if->
3090 core_global_regs->glpmcfg);
3091 lpmcfg.b.en_utmi_sleep = 0;
3092 lpmcfg.b.hird_thres &= (~(1 << 4));
3093 lpmcfg.b.prt_sleep_sts = 1;
3094 DWC_WRITE_REG32(&core_if->
3095 core_global_regs->glpmcfg,
3098 /* Clear Enbl_L1Gating bit. */
3099 pcgcctl.b.enbl_sleep_gating = 1;
3100 DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,
3105 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3107 DWC_WRITE_REG32(core_if->host_if->hprt0,
3109 /* This bit will be cleared in wakeup interrupt handle */
3113 case UHF_PORT_POWER:
3114 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3115 "ClearPortFeature USB_PORT_FEAT_POWER\n");
3116 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3118 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3120 case UHF_PORT_INDICATOR:
3121 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3122 "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
3123 /* Port inidicator not supported */
3125 case UHF_C_PORT_CONNECTION:
3126 /* Clears drivers internal connect status change
3128 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3129 "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
3130 dwc_otg_hcd->flags.b.port_connect_status_change = 0;
3132 case UHF_C_PORT_RESET:
3133 /* Clears the driver's internal Port Reset Change
3135 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3136 "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
3137 dwc_otg_hcd->flags.b.port_reset_change = 0;
3139 case UHF_C_PORT_ENABLE:
3140 /* Clears the driver's internal Port
3141 * Enable/Disable Change flag */
3142 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3143 "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
3144 dwc_otg_hcd->flags.b.port_enable_change = 0;
3146 case UHF_C_PORT_SUSPEND:
3147 /* Clears the driver's internal Port Suspend
3148 * Change flag, which is set when resume signaling on
3149 * the host port is complete */
3150 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3151 "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
3152 dwc_otg_hcd->flags.b.port_suspend_change = 0;
3154 #ifdef CONFIG_USB_DWC_OTG_LPM
3156 dwc_otg_hcd->flags.b.port_l1_change = 0;
3159 case UHF_C_PORT_OVER_CURRENT:
3160 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3161 "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
3162 dwc_otg_hcd->flags.b.port_over_current_change = 0;
3165 retval = -DWC_E_INVALID;
3166 DWC_ERROR("DWC OTG HCD - "
3167 "ClearPortFeature request %xh "
3168 "unknown or unsupported\n", wValue);
3171 case UCR_GET_HUB_DESCRIPTOR:
3172 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3173 "GetHubDescriptor\n");
3174 hub_desc = (usb_hub_descriptor_t *) buf;
3175 hub_desc->bDescLength = 9;
3176 hub_desc->bDescriptorType = 0x29;
3177 hub_desc->bNbrPorts = 1;
3178 USETW(hub_desc->wHubCharacteristics, 0x08);
3179 hub_desc->bPwrOn2PwrGood = 1;
3180 hub_desc->bHubContrCurrent = 0;
3181 hub_desc->DeviceRemovable[0] = 0;
3182 hub_desc->DeviceRemovable[1] = 0xff;
3184 case UCR_GET_HUB_STATUS:
3185 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3187 DWC_MEMSET(buf, 0, 4);
3189 case UCR_GET_PORT_STATUS:
3190 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3191 "GetPortStatus wIndex = 0x%04x FLAGS=0x%08x\n",
3192 wIndex, dwc_otg_hcd->flags.d32);
3193 if (!wIndex || wIndex > 1)
3198 if (dwc_otg_hcd->flags.b.port_connect_status_change)
3199 port_status |= (1 << UHF_C_PORT_CONNECTION);
3201 if (dwc_otg_hcd->flags.b.port_enable_change)
3202 port_status |= (1 << UHF_C_PORT_ENABLE);
3204 if (dwc_otg_hcd->flags.b.port_suspend_change)
3205 port_status |= (1 << UHF_C_PORT_SUSPEND);
3207 if (dwc_otg_hcd->flags.b.port_l1_change)
3208 port_status |= (1 << UHF_C_PORT_L1);
3210 if (dwc_otg_hcd->flags.b.port_reset_change) {
3211 port_status |= (1 << UHF_C_PORT_RESET);
3214 if (dwc_otg_hcd->flags.b.port_over_current_change) {
3215 DWC_WARN("Overcurrent change detected\n");
3216 port_status |= (1 << UHF_C_PORT_OVER_CURRENT);
3219 if (!dwc_otg_hcd->flags.b.port_connect_status) {
3221 * The port is disconnected, which means the core is
3222 * either in device mode or it soon will be. Just
3223 * return 0's for the remainder of the port status
3224 * since the port register can't be read if the core
3225 * is in device mode.
3227 *((__le32 *) buf) = dwc_cpu_to_le32(&port_status);
3231 hprt0.d32 = DWC_READ_REG32(core_if->host_if->hprt0);
3232 DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
3234 if (hprt0.b.prtconnsts)
3235 port_status |= (1 << UHF_PORT_CONNECTION);
3238 port_status |= (1 << UHF_PORT_ENABLE);
3240 if (hprt0.b.prtsusp)
3241 port_status |= (1 << UHF_PORT_SUSPEND);
3243 if (hprt0.b.prtovrcurract)
3244 port_status |= (1 << UHF_PORT_OVER_CURRENT);
3247 port_status |= (1 << UHF_PORT_RESET);
3250 port_status |= (1 << UHF_PORT_POWER);
3252 if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
3253 port_status |= (1 << UHF_PORT_HIGH_SPEED);
3254 else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
3255 port_status |= (1 << UHF_PORT_LOW_SPEED);
3257 if (hprt0.b.prttstctl)
3258 port_status |= (1 << UHF_PORT_TEST);
3259 if (dwc_otg_get_lpm_portsleepstatus(dwc_otg_hcd->core_if)) {
3260 port_status |= (1 << UHF_PORT_L1);
3263 For Synopsys HW emulation of Power down wkup_control asserts the
3264 hreset_n and prst_n on suspned. This causes the HPRT0 to be zero.
3265 We intentionally tell the software that port is in L2Suspend state.
3268 if ((core_if->power_down == 2)
3269 && (core_if->hibernation_suspend == 1)) {
3270 port_status |= (1 << UHF_PORT_SUSPEND);
3272 /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
3274 *((__le32 *) buf) = dwc_cpu_to_le32(&port_status);
3277 case UCR_SET_HUB_FEATURE:
3278 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3280 /* No HUB features supported */
3282 case UCR_SET_PORT_FEATURE:
3283 if (wValue != UHF_PORT_TEST && (!wIndex || wIndex > 1))
3286 if (!dwc_otg_hcd->flags.b.port_connect_status) {
3288 * The port is disconnected, which means the core is
3289 * either in device mode or it soon will be. Just
3290 * return without doing anything since the port
3291 * register can't be written if the core is in device
3298 case UHF_PORT_SUSPEND:
3299 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3300 "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
3301 if (dwc_otg_hcd_otg_port(dwc_otg_hcd) != wIndex) {
3304 if (core_if->power_down == 2) {
3306 dwc_irqflags_t flags;
3307 pcgcctl_data_t pcgcctl = {.d32 = 0 };
3308 gpwrdn_data_t gpwrdn = {.d32 = 0 };
3309 gusbcfg_data_t gusbcfg = {.d32 = 0 };
3310 #ifdef DWC_DEV_SRPCAP
3311 int32_t otg_cap_param = core_if->core_params->otg_cap;
3313 DWC_PRINTF("Preparing for complete power-off\n");
3315 /* Save registers before hibernation */
3316 dwc_otg_save_global_regs(core_if);
3317 dwc_otg_save_host_regs(core_if);
3319 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3320 hprt0.b.prtsusp = 1;
3322 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3323 /* Spin hprt0.b.prtsusp to became 1 */
3325 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3326 if (hprt0.b.prtsusp) {
3330 } while (--timeout);
3332 DWC_WARN("Suspend wasn't genereted\n");
3337 * We need to disable interrupts to prevent servicing of any IRQ
3338 * during going to hibernation
3340 DWC_SPINLOCK_IRQSAVE(dwc_otg_hcd->lock, &flags);
3341 core_if->lx_state = DWC_OTG_L2;
3342 #ifdef DWC_DEV_SRPCAP
3343 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3346 DWC_WRITE_REG32(core_if->host_if->hprt0,
3350 DWC_READ_REG32(&core_if->core_global_regs->
3352 if (gusbcfg.b.ulpi_utmi_sel == 1) {
3353 /* ULPI interface */
3354 /* Suspend the Phy Clock */
3356 pcgcctl.b.stoppclk = 1;
3357 DWC_MODIFY_REG32(core_if->pcgcctl, 0,
3360 gpwrdn.b.pmuactv = 1;
3361 DWC_MODIFY_REG32(&core_if->
3363 gpwrdn, 0, gpwrdn.d32);
3365 /* UTMI+ Interface */
3366 gpwrdn.b.pmuactv = 1;
3367 DWC_MODIFY_REG32(&core_if->
3369 gpwrdn, 0, gpwrdn.d32);
3371 pcgcctl.b.stoppclk = 1;
3372 DWC_MODIFY_REG32(core_if->pcgcctl, 0, pcgcctl.d32);
3375 #ifdef DWC_DEV_SRPCAP
3377 gpwrdn.b.dis_vbus = 1;
3378 DWC_MODIFY_REG32(&core_if->core_global_regs->
3379 gpwrdn, 0, gpwrdn.d32);
3382 gpwrdn.b.pmuintsel = 1;
3383 DWC_MODIFY_REG32(&core_if->core_global_regs->
3384 gpwrdn, 0, gpwrdn.d32);
3388 #ifdef DWC_DEV_SRPCAP
3389 gpwrdn.b.srp_det_msk = 1;
3391 gpwrdn.b.disconn_det_msk = 1;
3392 gpwrdn.b.lnstchng_msk = 1;
3393 gpwrdn.b.sts_chngint_msk = 1;
3394 DWC_MODIFY_REG32(&core_if->core_global_regs->
3395 gpwrdn, 0, gpwrdn.d32);
3398 /* Enable Power Down Clamp and all interrupts in GPWRDN */
3400 gpwrdn.b.pwrdnclmp = 1;
3401 DWC_MODIFY_REG32(&core_if->core_global_regs->
3402 gpwrdn, 0, gpwrdn.d32);
3405 /* Switch off VDD */
3407 gpwrdn.b.pwrdnswtch = 1;
3408 DWC_MODIFY_REG32(&core_if->core_global_regs->
3409 gpwrdn, 0, gpwrdn.d32);
3411 #ifdef DWC_DEV_SRPCAP
3412 if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE)
3414 core_if->pwron_timer_started = 1;
3415 DWC_TIMER_SCHEDULE(core_if->pwron_timer, 6000 /* 6 secs */ );
3418 /* Save gpwrdn register for further usage if stschng interrupt */
3419 core_if->gr_backup->gpwrdn_local =
3420 DWC_READ_REG32(&core_if->core_global_regs->gpwrdn);
3422 /* Set flag to indicate that we are in hibernation */
3423 core_if->hibernation_suspend = 1;
3424 DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock,flags);
3426 DWC_PRINTF("Host hibernation completed\n");
3427 // Exit from case statement
3431 if (dwc_otg_hcd_otg_port(dwc_otg_hcd) == wIndex &&
3432 dwc_otg_hcd->fops->get_b_hnp_enable(dwc_otg_hcd)) {
3433 gotgctl_data_t gotgctl = {.d32 = 0 };
3434 gotgctl.b.hstsethnpen = 1;
3435 DWC_MODIFY_REG32(&core_if->core_global_regs->
3436 gotgctl, 0, gotgctl.d32);
3437 core_if->op_state = A_SUSPEND;
3439 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3440 hprt0.b.prtsusp = 1;
3441 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3443 dwc_irqflags_t flags;
3444 /* Update lx_state */
3445 DWC_SPINLOCK_IRQSAVE(dwc_otg_hcd->lock, &flags);
3446 core_if->lx_state = DWC_OTG_L2;
3447 DWC_SPINUNLOCK_IRQRESTORE(dwc_otg_hcd->lock, flags);
3449 /* Suspend the Phy Clock */
3451 pcgcctl_data_t pcgcctl = {.d32 = 0 };
3452 pcgcctl.b.stoppclk = 1;
3453 DWC_MODIFY_REG32(core_if->pcgcctl, 0,
3458 /* For HNP the bus must be suspended for at least 200ms. */
3459 if (dwc_otg_hcd->fops->get_b_hnp_enable(dwc_otg_hcd)) {
3460 pcgcctl_data_t pcgcctl = {.d32 = 0 };
3461 pcgcctl.b.stoppclk = 1;
3462 DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
3466 /** @todo - check how sw can wait for 1 sec to check asesvld??? */
3467 #if 0 //vahrama !!!!!!!!!!!!!!!!!!
3468 if (core_if->adp_enable) {
3469 gotgctl_data_t gotgctl = {.d32 = 0 };
3470 gpwrdn_data_t gpwrdn;
3472 while (gotgctl.b.asesvld == 1) {
3474 DWC_READ_REG32(&core_if->
3480 /* Enable Power Down Logic */
3482 gpwrdn.b.pmuactv = 1;
3483 DWC_MODIFY_REG32(&core_if->core_global_regs->
3484 gpwrdn, 0, gpwrdn.d32);
3486 /* Unmask SRP detected interrupt from Power Down Logic */
3488 gpwrdn.b.srp_det_msk = 1;
3489 DWC_MODIFY_REG32(&core_if->core_global_regs->
3490 gpwrdn, 0, gpwrdn.d32);
3492 dwc_otg_adp_probe_start(core_if);
3496 case UHF_PORT_POWER:
3497 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3498 "SetPortFeature - USB_PORT_FEAT_POWER\n");
3499 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3501 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3503 case UHF_PORT_RESET:
3504 if ((core_if->power_down == 2)
3505 && (core_if->hibernation_suspend == 1)) {
3506 /* If we are going to exit from Hibernated
3507 * state via USB RESET.
3509 dwc_otg_host_hibernation_restore(core_if, 0, 1);
3511 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3513 DWC_DEBUGPL(DBG_HCD,
3514 "DWC OTG HCD HUB CONTROL - "
3515 "SetPortFeature - USB_PORT_FEAT_RESET\n");
3517 pcgcctl_data_t pcgcctl = {.d32 = 0 };
3518 pcgcctl.b.enbl_sleep_gating = 1;
3519 pcgcctl.b.stoppclk = 1;
3520 DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
3521 DWC_WRITE_REG32(core_if->pcgcctl, 0);
3523 #ifdef CONFIG_USB_DWC_OTG_LPM
3525 glpmcfg_data_t lpmcfg;
3527 DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
3528 if (lpmcfg.b.prt_sleep_sts) {
3529 lpmcfg.b.en_utmi_sleep = 0;
3530 lpmcfg.b.hird_thres &= (~(1 << 4));
3532 (&core_if->core_global_regs->glpmcfg,
3538 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3539 /* Clear suspend bit if resetting from suspended state. */
3540 hprt0.b.prtsusp = 0;
3541 /* When B-Host the Port reset bit is set in
3542 * the Start HCD Callback function, so that
3543 * the reset is started within 1ms of the HNP
3544 * success interrupt. */
3545 if (!dwc_otg_hcd_is_b_host(dwc_otg_hcd)) {
3548 DWC_PRINTF("Indeed it is in host mode hprt0 = %08x\n",hprt0.d32);
3549 DWC_WRITE_REG32(core_if->host_if->hprt0,
3552 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
3555 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3556 core_if->lx_state = DWC_OTG_L0; /* Now back to the on state */
3559 #ifdef DWC_HS_ELECT_TST
3563 gintmsk_data_t gintmsk;
3565 t = (wIndex >> 8); /* MSB wIndex USB */
3566 DWC_DEBUGPL(DBG_HCD,
3567 "DWC OTG HCD HUB CONTROL - "
3568 "SetPortFeature - USB_PORT_FEAT_TEST %d\n",
3570 DWC_WARN("USB_PORT_FEAT_TEST %d\n", t);
3572 hprt0.d32 = dwc_otg_read_hprt0(core_if);
3573 hprt0.b.prttstctl = t;
3574 DWC_WRITE_REG32(core_if->host_if->hprt0,
3577 /* Setup global vars with reg addresses (quick and
3578 * dirty hack, should be cleaned up)
3580 global_regs = core_if->core_global_regs;
3582 core_if->host_if->host_global_regs;
3584 (dwc_otg_hc_regs_t *) ((char *)
3588 (uint32_t *) ((char *)global_regs +
3591 if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
3592 /* Save current interrupt mask */
3595 (&global_regs->gintmsk);
3597 /* Disable all interrupts while we muck with
3598 * the hardware directly
3600 DWC_WRITE_REG32(&global_regs->gintmsk, 0);
3602 /* 15 second delay per the test spec */
3605 /* Drive suspend on the root port */
3607 dwc_otg_read_hprt0(core_if);
3608 hprt0.b.prtsusp = 1;
3610 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3612 /* 15 second delay per the test spec */
3615 /* Drive resume on the root port */
3617 dwc_otg_read_hprt0(core_if);
3618 hprt0.b.prtsusp = 0;
3620 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3623 /* Clear the resume bit */
3625 DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
3627 /* Restore interrupts */
3628 DWC_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
3629 } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
3630 /* Save current interrupt mask */
3633 (&global_regs->gintmsk);
3635 /* Disable all interrupts while we muck with
3636 * the hardware directly
3638 DWC_WRITE_REG32(&global_regs->gintmsk, 0);
3640 /* 15 second delay per the test spec */
3643 /* Send the Setup packet */
3646 /* 15 second delay so nothing else happens for awhile */
3649 /* Restore interrupts */
3650 DWC_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
3651 } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
3652 /* Save current interrupt mask */
3655 (&global_regs->gintmsk);
3657 /* Disable all interrupts while we muck with
3658 * the hardware directly
3660 DWC_WRITE_REG32(&global_regs->gintmsk, 0);
3662 /* Send the Setup packet */
3665 /* 15 second delay so nothing else happens for awhile */
3668 /* Send the In and Ack packets */
3671 /* 15 second delay so nothing else happens for awhile */
3674 /* Restore interrupts */
3675 DWC_WRITE_REG32(&global_regs->gintmsk, gintmsk.d32);
3680 #endif /* DWC_HS_ELECT_TST */
3682 case UHF_PORT_INDICATOR:
3683 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
3684 "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
3688 retval = -DWC_E_INVALID;
3689 DWC_ERROR("DWC OTG HCD - "
3690 "SetPortFeature request %xh "
3691 "unknown or unsupported\n", wValue);
3695 #ifdef CONFIG_USB_DWC_OTG_LPM
3696 case UCR_SET_AND_TEST_PORT_FEATURE:
3697 if (wValue != UHF_PORT_L1) {
3701 int portnum, hird, devaddr, remwake;
3702 glpmcfg_data_t lpmcfg;
3703 uint32_t time_usecs;
3704 gintsts_data_t gintsts;
3705 gintmsk_data_t gintmsk;
3707 if (!dwc_otg_get_param_lpm_enable(core_if)) {
3710 if (wValue != UHF_PORT_L1 || wLength != 1) {
3713 /* Check if the port currently is in SLEEP state */
3715 DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
3716 if (lpmcfg.b.prt_sleep_sts) {
3717 DWC_INFO("Port is already in sleep mode\n");
3718 buf[0] = 0; /* Return success */
3722 portnum = wIndex & 0xf;
3723 hird = (wIndex >> 4) & 0xf;
3724 devaddr = (wIndex >> 8) & 0x7f;
3725 remwake = (wIndex >> 15);
3728 retval = -DWC_E_INVALID;
3730 ("Wrong port number(%d) in SetandTestPortFeature request\n",
3736 ("SetandTestPortFeature request: portnum = %d, hird = %d, devaddr = %d, rewake = %d\n",
3737 portnum, hird, devaddr, remwake);
3738 /* Disable LPM interrupt */
3740 gintmsk.b.lpmtranrcvd = 1;
3741 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk,
3744 if (dwc_otg_hcd_send_lpm
3745 (dwc_otg_hcd, devaddr, hird, remwake)) {
3746 retval = -DWC_E_INVALID;
3750 time_usecs = 10 * (lpmcfg.b.retry_count + 1);
3751 /* We will consider timeout if time_usecs microseconds pass,
3752 * and we don't receive LPM transaction status.
3753 * After receiving non-error responce(ACK/NYET/STALL) from device,
3754 * core will set lpmtranrcvd bit.
3758 DWC_READ_REG32(&core_if->core_global_regs->gintsts);
3759 if (gintsts.b.lpmtranrcvd) {
3763 } while (--time_usecs);
3764 /* lpm_int bit will be cleared in LPM interrupt handler */
3771 if (!gintsts.b.lpmtranrcvd) {
3772 buf[0] = 0x3; /* Completion code is Timeout */
3773 dwc_otg_hcd_free_hc_from_lpm(dwc_otg_hcd);
3776 DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
3777 if (lpmcfg.b.lpm_resp == 0x3) {
3778 /* ACK responce from the device */
3779 buf[0] = 0x00; /* Success */
3780 } else if (lpmcfg.b.lpm_resp == 0x2) {
3781 /* NYET responce from the device */
3784 /* Otherwise responce with Timeout */
3788 DWC_PRINTF("Device responce to LPM trans is %x\n",
3790 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0,
3795 #endif /* CONFIG_USB_DWC_OTG_LPM */
3798 retval = -DWC_E_INVALID;
3799 DWC_WARN("DWC OTG HCD - "
3800 "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
3801 typeReq, wIndex, wValue);
3808 #ifdef CONFIG_USB_DWC_OTG_LPM
3809 /** Returns index of host channel to perform LPM transaction. */
3810 int dwc_otg_hcd_get_hc_for_lpm_tran(dwc_otg_hcd_t * hcd, uint8_t devaddr)
3812 dwc_otg_core_if_t *core_if = hcd->core_if;
3814 hcchar_data_t hcchar;
3815 gintmsk_data_t gintmsk = {.d32 = 0 };
3817 if (DWC_CIRCLEQ_EMPTY(&hcd->free_hc_list)) {
3818 DWC_PRINTF("No free channel to select for LPM transaction\n");
3822 hc = DWC_CIRCLEQ_FIRST(&hcd->free_hc_list);
3824 /* Mask host channel interrupts. */
3825 gintmsk.b.hcintr = 1;
3826 DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
3828 /* Fill fields that core needs for LPM transaction */
3829 hcchar.b.devaddr = devaddr;
3831 hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
3833 hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW);
3834 hcchar.b.epdir = 0; /* OUT */
3835 DWC_WRITE_REG32(&core_if->host_if->hc_regs[hc->hc_num]->hcchar,
3838 /* Remove the host channel from the free list. */
3839 DWC_CIRCLEQ_REMOVE_INIT(&hcd->free_hc_list, hc, hc_list_entry);
3841 DWC_PRINTF("hcnum = %d devaddr = %d\n", hc->hc_num, devaddr);
3846 /** Release hc after performing LPM transaction */
3847 void dwc_otg_hcd_free_hc_from_lpm(dwc_otg_hcd_t * hcd)
3850 glpmcfg_data_t lpmcfg;
3853 lpmcfg.d32 = DWC_READ_REG32(&hcd->core_if->core_global_regs->glpmcfg);
3854 hc_num = lpmcfg.b.lpm_chan_index;
3856 hc = hcd->hc_ptr_array[hc_num];
3858 DWC_PRINTF("Freeing channel %d after LPM\n", hc_num);
3859 /* Return host channel to free list */
3860 DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
3863 int dwc_otg_hcd_send_lpm(dwc_otg_hcd_t * hcd, uint8_t devaddr, uint8_t hird,
3864 uint8_t bRemoteWake)
3866 glpmcfg_data_t lpmcfg;
3867 pcgcctl_data_t pcgcctl = {.d32 = 0 };
3870 channel = dwc_otg_hcd_get_hc_for_lpm_tran(hcd, devaddr);
3875 pcgcctl.b.enbl_sleep_gating = 1;
3876 DWC_MODIFY_REG32(hcd->core_if->pcgcctl, 0, pcgcctl.d32);
3878 /* Read LPM config register */
3879 lpmcfg.d32 = DWC_READ_REG32(&hcd->core_if->core_global_regs->glpmcfg);
3881 /* Program LPM transaction fields */
3882 lpmcfg.b.rem_wkup_en = bRemoteWake;
3883 lpmcfg.b.hird = hird;
3884 lpmcfg.b.hird_thres = 0x1c;
3885 lpmcfg.b.lpm_chan_index = channel;
3886 lpmcfg.b.en_utmi_sleep = 1;
3887 /* Program LPM config register */
3888 DWC_WRITE_REG32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
3890 /* Send LPM transaction */
3891 lpmcfg.b.send_lpm = 1;
3892 DWC_WRITE_REG32(&hcd->core_if->core_global_regs->glpmcfg, lpmcfg.d32);
3897 #endif /* CONFIG_USB_DWC_OTG_LPM */
3899 int dwc_otg_hcd_is_status_changed(dwc_otg_hcd_t * hcd, int port)
3904 return -DWC_E_INVALID;
3907 retval = (hcd->flags.b.port_connect_status_change ||
3908 hcd->flags.b.port_reset_change ||
3909 hcd->flags.b.port_enable_change ||
3910 hcd->flags.b.port_suspend_change ||
3911 hcd->flags.b.port_over_current_change);
3914 DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
3915 " Root port status changed\n");
3916 DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
3917 hcd->flags.b.port_connect_status_change);
3918 DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
3919 hcd->flags.b.port_reset_change);
3920 DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
3921 hcd->flags.b.port_enable_change);
3922 DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
3923 hcd->flags.b.port_suspend_change);
3924 DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
3925 hcd->flags.b.port_over_current_change);
3931 int dwc_otg_hcd_get_frame_number(dwc_otg_hcd_t * dwc_otg_hcd)
3935 DWC_READ_REG32(&dwc_otg_hcd->core_if->host_if->host_global_regs->
3939 DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n",
3942 return hfnum.b.frnum;
3945 int dwc_otg_hcd_start(dwc_otg_hcd_t * hcd,
3946 struct dwc_otg_hcd_function_ops *fops)
3951 if (!dwc_otg_is_device_mode(hcd->core_if) &&
3952 (!hcd->core_if->adp_enable || hcd->core_if->adp.adp_started)) {
3953 dwc_otg_hcd_reinit(hcd);
3955 retval = -DWC_E_NO_DEVICE;
3961 void *dwc_otg_hcd_get_priv_data(dwc_otg_hcd_t * hcd)
3966 void dwc_otg_hcd_set_priv_data(dwc_otg_hcd_t * hcd, void *priv_data)
3968 hcd->priv = priv_data;
3971 uint32_t dwc_otg_hcd_otg_port(dwc_otg_hcd_t * hcd)
3973 return hcd->otg_port;
3976 uint32_t dwc_otg_hcd_is_b_host(dwc_otg_hcd_t * hcd)
3979 if (hcd->core_if->op_state == B_HOST) {
3988 dwc_otg_hcd_urb_t *dwc_otg_hcd_urb_alloc(dwc_otg_hcd_t * hcd,
3989 int iso_desc_count, int atomic_alloc)
3991 dwc_otg_hcd_urb_t *dwc_otg_urb;
3995 sizeof(*dwc_otg_urb) +
3996 iso_desc_count * sizeof(struct dwc_otg_hcd_iso_packet_desc);
3998 dwc_otg_urb = DWC_ALLOC_ATOMIC(size);
4000 dwc_otg_urb = DWC_ALLOC(size);
4003 dwc_otg_urb->packet_count = iso_desc_count;
4005 DWC_ERROR("**** DWC OTG HCD URB alloc - "
4006 "%salloc of %db failed\n",
4007 atomic_alloc?"atomic ":"", size);
4012 void dwc_otg_hcd_urb_set_pipeinfo(dwc_otg_hcd_urb_t * dwc_otg_urb,
4013 uint8_t dev_addr, uint8_t ep_num,
4014 uint8_t ep_type, uint8_t ep_dir, uint16_t mps)
4016 dwc_otg_hcd_fill_pipe(&dwc_otg_urb->pipe_info, dev_addr, ep_num,
4017 ep_type, ep_dir, mps);
4020 ("addr = %d, ep_num = %d, ep_dir = 0x%x, ep_type = 0x%x, mps = %d\n",
4021 dev_addr, ep_num, ep_dir, ep_type, mps);
4025 void dwc_otg_hcd_urb_set_params(dwc_otg_hcd_urb_t * dwc_otg_urb,
4026 void *urb_handle, void *buf, dwc_dma_t dma,
4027 uint32_t buflen, void *setup_packet,
4028 dwc_dma_t setup_dma, uint32_t flags,
4031 dwc_otg_urb->priv = urb_handle;
4032 dwc_otg_urb->buf = buf;
4033 dwc_otg_urb->dma = dma;
4034 dwc_otg_urb->length = buflen;
4035 dwc_otg_urb->setup_packet = setup_packet;
4036 dwc_otg_urb->setup_dma = setup_dma;
4037 dwc_otg_urb->flags = flags;
4038 dwc_otg_urb->interval = interval;
4039 dwc_otg_urb->status = -DWC_E_IN_PROGRESS;
4042 uint32_t dwc_otg_hcd_urb_get_status(dwc_otg_hcd_urb_t * dwc_otg_urb)
4044 return dwc_otg_urb->status;
4047 uint32_t dwc_otg_hcd_urb_get_actual_length(dwc_otg_hcd_urb_t * dwc_otg_urb)
4049 return dwc_otg_urb->actual_length;
4052 uint32_t dwc_otg_hcd_urb_get_error_count(dwc_otg_hcd_urb_t * dwc_otg_urb)
4054 return dwc_otg_urb->error_count;
4057 void dwc_otg_hcd_urb_set_iso_desc_params(dwc_otg_hcd_urb_t * dwc_otg_urb,
4058 int desc_num, uint32_t offset,
4061 dwc_otg_urb->iso_descs[desc_num].offset = offset;
4062 dwc_otg_urb->iso_descs[desc_num].length = length;
4065 uint32_t dwc_otg_hcd_urb_get_iso_desc_status(dwc_otg_hcd_urb_t * dwc_otg_urb,
4068 return dwc_otg_urb->iso_descs[desc_num].status;
4071 uint32_t dwc_otg_hcd_urb_get_iso_desc_actual_length(dwc_otg_hcd_urb_t *
4072 dwc_otg_urb, int desc_num)
4074 return dwc_otg_urb->iso_descs[desc_num].actual_length;
4077 int dwc_otg_hcd_is_bandwidth_allocated(dwc_otg_hcd_t * hcd, void *ep_handle)
4080 dwc_otg_qh_t *qh = (dwc_otg_qh_t *) ep_handle;
4083 if (!DWC_LIST_EMPTY(&qh->qh_list_entry)) {
4090 int dwc_otg_hcd_is_bandwidth_freed(dwc_otg_hcd_t * hcd, void *ep_handle)
4092 dwc_otg_qh_t *qh = (dwc_otg_qh_t *) ep_handle;
4094 DWC_ASSERT(qh, "qh is not allocated\n");
4096 if (DWC_LIST_EMPTY(&qh->qh_list_entry)) {
4103 uint8_t dwc_otg_hcd_get_ep_bandwidth(dwc_otg_hcd_t * hcd, void *ep_handle)
4105 dwc_otg_qh_t *qh = (dwc_otg_qh_t *) ep_handle;
4106 DWC_ASSERT(qh, "qh is not allocated\n");
4110 void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * hcd)
4115 gnptxsts_data_t np_tx_status;
4116 hptxsts_data_t p_tx_status;
4118 num_channels = hcd->core_if->core_params->host_channels;
4121 ("************************************************************\n");
4122 DWC_PRINTF("HCD State:\n");
4123 DWC_PRINTF(" Num channels: %d\n", num_channels);
4124 for (i = 0; i < num_channels; i++) {
4125 dwc_hc_t *hc = hcd->hc_ptr_array[i];
4126 DWC_PRINTF(" Channel %d:\n", i);
4127 DWC_PRINTF(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
4128 hc->dev_addr, hc->ep_num, hc->ep_is_in);
4129 DWC_PRINTF(" speed: %d\n", hc->speed);
4130 DWC_PRINTF(" ep_type: %d\n", hc->ep_type);
4131 DWC_PRINTF(" max_packet: %d\n", hc->max_packet);
4132 DWC_PRINTF(" data_pid_start: %d\n", hc->data_pid_start);
4133 DWC_PRINTF(" multi_count: %d\n", hc->multi_count);
4134 DWC_PRINTF(" xfer_started: %d\n", hc->xfer_started);
4135 DWC_PRINTF(" xfer_buff: %p\n", hc->xfer_buff);
4136 DWC_PRINTF(" xfer_len: %d\n", hc->xfer_len);
4137 DWC_PRINTF(" xfer_count: %d\n", hc->xfer_count);
4138 DWC_PRINTF(" halt_on_queue: %d\n", hc->halt_on_queue);
4139 DWC_PRINTF(" halt_pending: %d\n", hc->halt_pending);
4140 DWC_PRINTF(" halt_status: %d\n", hc->halt_status);
4141 DWC_PRINTF(" do_split: %d\n", hc->do_split);
4142 DWC_PRINTF(" complete_split: %d\n", hc->complete_split);
4143 DWC_PRINTF(" hub_addr: %d\n", hc->hub_addr);
4144 DWC_PRINTF(" port_addr: %d\n", hc->port_addr);
4145 DWC_PRINTF(" xact_pos: %d\n", hc->xact_pos);
4146 DWC_PRINTF(" requests: %d\n", hc->requests);
4147 DWC_PRINTF(" qh: %p\n", hc->qh);
4148 if (hc->xfer_started) {
4150 hcchar_data_t hcchar;
4151 hctsiz_data_t hctsiz;
4153 hcintmsk_data_t hcintmsk;
4155 DWC_READ_REG32(&hcd->core_if->
4156 host_if->host_global_regs->hfnum);
4158 DWC_READ_REG32(&hcd->core_if->host_if->
4159 hc_regs[i]->hcchar);
4161 DWC_READ_REG32(&hcd->core_if->host_if->
4162 hc_regs[i]->hctsiz);
4164 DWC_READ_REG32(&hcd->core_if->host_if->
4167 DWC_READ_REG32(&hcd->core_if->host_if->
4168 hc_regs[i]->hcintmsk);
4169 DWC_PRINTF(" hfnum: 0x%08x\n", hfnum.d32);
4170 DWC_PRINTF(" hcchar: 0x%08x\n", hcchar.d32);
4171 DWC_PRINTF(" hctsiz: 0x%08x\n", hctsiz.d32);
4172 DWC_PRINTF(" hcint: 0x%08x\n", hcint.d32);
4173 DWC_PRINTF(" hcintmsk: 0x%08x\n", hcintmsk.d32);
4175 if (hc->xfer_started && hc->qh) {
4177 dwc_otg_hcd_urb_t *urb;
4179 DWC_CIRCLEQ_FOREACH(qtd, &hc->qh->qtd_list, qtd_list_entry) {
4180 if (!qtd->in_process)
4184 DWC_PRINTF(" URB Info:\n");
4185 DWC_PRINTF(" qtd: %p, urb: %p\n", qtd, urb);
4187 DWC_PRINTF(" Dev: %d, EP: %d %s\n",
4188 dwc_otg_hcd_get_dev_addr(&urb->
4190 dwc_otg_hcd_get_ep_num(&urb->
4192 dwc_otg_hcd_is_pipe_in(&urb->
4195 DWC_PRINTF(" Max packet size: %d\n",
4196 dwc_otg_hcd_get_mps(&urb->
4198 DWC_PRINTF(" transfer_buffer: %p\n",
4200 DWC_PRINTF(" transfer_dma: %p\n",
4202 DWC_PRINTF(" transfer_buffer_length: %d\n",
4204 DWC_PRINTF(" actual_length: %d\n",
4205 urb->actual_length);
4210 DWC_PRINTF(" non_periodic_channels: %d\n", hcd->non_periodic_channels);
4211 DWC_PRINTF(" periodic_channels: %d\n", hcd->periodic_channels);
4212 DWC_PRINTF(" periodic_usecs: %d\n", hcd->periodic_usecs);
4214 DWC_READ_REG32(&hcd->core_if->core_global_regs->gnptxsts);
4215 DWC_PRINTF(" NP Tx Req Queue Space Avail: %d\n",
4216 np_tx_status.b.nptxqspcavail);
4217 DWC_PRINTF(" NP Tx FIFO Space Avail: %d\n",
4218 np_tx_status.b.nptxfspcavail);
4220 DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hptxsts);
4221 DWC_PRINTF(" P Tx Req Queue Space Avail: %d\n",
4222 p_tx_status.b.ptxqspcavail);
4223 DWC_PRINTF(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
4224 dwc_otg_hcd_dump_frrem(hcd);
4225 dwc_otg_dump_global_registers(hcd->core_if);
4226 dwc_otg_dump_host_registers(hcd->core_if);
4228 ("************************************************************\n");
4234 void dwc_print_setup_data(uint8_t * setup)
4237 if (CHK_DEBUG_LEVEL(DBG_HCD)) {
4238 DWC_PRINTF("Setup Data = MSB ");
4239 for (i = 7; i >= 0; i--)
4240 DWC_PRINTF("%02x ", setup[i]);
4242 DWC_PRINTF(" bmRequestType Tranfer = %s\n",
4243 (setup[0] & 0x80) ? "Device-to-Host" :
4245 DWC_PRINTF(" bmRequestType Type = ");
4246 switch ((setup[0] & 0x60) >> 5) {
4248 DWC_PRINTF("Standard\n");
4251 DWC_PRINTF("Class\n");
4254 DWC_PRINTF("Vendor\n");
4257 DWC_PRINTF("Reserved\n");
4260 DWC_PRINTF(" bmRequestType Recipient = ");
4261 switch (setup[0] & 0x1f) {
4263 DWC_PRINTF("Device\n");
4266 DWC_PRINTF("Interface\n");
4269 DWC_PRINTF("Endpoint\n");
4272 DWC_PRINTF("Other\n");
4275 DWC_PRINTF("Reserved\n");
4278 DWC_PRINTF(" bRequest = 0x%0x\n", setup[1]);
4279 DWC_PRINTF(" wValue = 0x%0x\n", *((uint16_t *) & setup[2]));
4280 DWC_PRINTF(" wIndex = 0x%0x\n", *((uint16_t *) & setup[4]));
4281 DWC_PRINTF(" wLength = 0x%0x\n\n", *((uint16_t *) & setup[6]));
4286 void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * hcd)
4289 DWC_PRINTF("Frame remaining at SOF:\n");
4290 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4291 hcd->frrem_samples, hcd->frrem_accum,
4292 (hcd->frrem_samples > 0) ?
4293 hcd->frrem_accum / hcd->frrem_samples : 0);
4296 DWC_PRINTF("Frame remaining at start_transfer (uframe 7):\n");
4297 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4298 hcd->core_if->hfnum_7_samples,
4299 hcd->core_if->hfnum_7_frrem_accum,
4300 (hcd->core_if->hfnum_7_samples >
4301 0) ? hcd->core_if->hfnum_7_frrem_accum /
4302 hcd->core_if->hfnum_7_samples : 0);
4303 DWC_PRINTF("Frame remaining at start_transfer (uframe 0):\n");
4304 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4305 hcd->core_if->hfnum_0_samples,
4306 hcd->core_if->hfnum_0_frrem_accum,
4307 (hcd->core_if->hfnum_0_samples >
4308 0) ? hcd->core_if->hfnum_0_frrem_accum /
4309 hcd->core_if->hfnum_0_samples : 0);
4310 DWC_PRINTF("Frame remaining at start_transfer (uframe 1-6):\n");
4311 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4312 hcd->core_if->hfnum_other_samples,
4313 hcd->core_if->hfnum_other_frrem_accum,
4314 (hcd->core_if->hfnum_other_samples >
4315 0) ? hcd->core_if->hfnum_other_frrem_accum /
4316 hcd->core_if->hfnum_other_samples : 0);
4319 DWC_PRINTF("Frame remaining at sample point A (uframe 7):\n");
4320 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4321 hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a,
4322 (hcd->hfnum_7_samples_a > 0) ?
4323 hcd->hfnum_7_frrem_accum_a / hcd->hfnum_7_samples_a : 0);
4324 DWC_PRINTF("Frame remaining at sample point A (uframe 0):\n");
4325 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4326 hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a,
4327 (hcd->hfnum_0_samples_a > 0) ?
4328 hcd->hfnum_0_frrem_accum_a / hcd->hfnum_0_samples_a : 0);
4329 DWC_PRINTF("Frame remaining at sample point A (uframe 1-6):\n");
4330 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4331 hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a,
4332 (hcd->hfnum_other_samples_a > 0) ?
4333 hcd->hfnum_other_frrem_accum_a /
4334 hcd->hfnum_other_samples_a : 0);
4337 DWC_PRINTF("Frame remaining at sample point B (uframe 7):\n");
4338 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4339 hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b,
4340 (hcd->hfnum_7_samples_b > 0) ?
4341 hcd->hfnum_7_frrem_accum_b / hcd->hfnum_7_samples_b : 0);
4342 DWC_PRINTF("Frame remaining at sample point B (uframe 0):\n");
4343 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4344 hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b,
4345 (hcd->hfnum_0_samples_b > 0) ?
4346 hcd->hfnum_0_frrem_accum_b / hcd->hfnum_0_samples_b : 0);
4347 DWC_PRINTF("Frame remaining at sample point B (uframe 1-6):\n");
4348 DWC_PRINTF(" samples %u, accum %llu, avg %llu\n",
4349 hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b,
4350 (hcd->hfnum_other_samples_b > 0) ?
4351 hcd->hfnum_other_frrem_accum_b /
4352 hcd->hfnum_other_samples_b : 0);
4356 #endif /* DWC_DEVICE_ONLY */