2 * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3 * Author: Chao Xie <chao.xie@marvell.com>
4 * Neil Zhang <zhangwm@marvell.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/ioport.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/timer.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/moduleparam.h>
27 #include <linux/device.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30 #include <linux/usb/otg.h>
33 #include <linux/irq.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <linux/platform_data/mv_usb.h>
37 #include <asm/system.h>
38 #include <asm/unaligned.h>
42 #define DRIVER_DESC "Marvell PXA USB Device Controller driver"
43 #define DRIVER_VERSION "8 Nov 2010"
45 #define ep_dir(ep) (((ep)->ep_num == 0) ? \
46 ((ep)->udc->ep0_dir) : ((ep)->direction))
48 /* timeout value -- usec */
49 #define RESET_TIMEOUT 10000
50 #define FLUSH_TIMEOUT 10000
51 #define EPSTATUS_TIMEOUT 10000
52 #define PRIME_TIMEOUT 10000
53 #define READSAFE_TIMEOUT 1000
54 #define DTD_TIMEOUT 1000
56 #define LOOPS_USEC_SHIFT 4
57 #define LOOPS_USEC (1 << LOOPS_USEC_SHIFT)
58 #define LOOPS(timeout) ((timeout) >> LOOPS_USEC_SHIFT)
60 static DECLARE_COMPLETION(release_done);
62 static const char driver_name[] = "mv_udc";
63 static const char driver_desc[] = DRIVER_DESC;
65 /* controller device global variable */
66 static struct mv_udc *the_controller;
69 static void nuke(struct mv_ep *ep, int status);
71 /* for endpoint 0 operations */
72 static const struct usb_endpoint_descriptor mv_ep0_desc = {
73 .bLength = USB_DT_ENDPOINT_SIZE,
74 .bDescriptorType = USB_DT_ENDPOINT,
75 .bEndpointAddress = 0,
76 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
77 .wMaxPacketSize = EP0_MAX_PKT_SIZE,
80 static void ep0_reset(struct mv_udc *udc)
87 for (i = 0; i < 2; i++) {
92 ep->dqh = &udc->ep_dqh[i];
94 /* configure ep0 endpoint capabilities in dQH */
95 ep->dqh->max_packet_length =
96 (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
99 epctrlx = readl(&udc->op_regs->epctrlx[0]);
101 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
102 | (USB_ENDPOINT_XFER_CONTROL
103 << EPCTRL_TX_EP_TYPE_SHIFT);
106 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
107 | (USB_ENDPOINT_XFER_CONTROL
108 << EPCTRL_RX_EP_TYPE_SHIFT);
111 writel(epctrlx, &udc->op_regs->epctrlx[0]);
115 /* protocol ep0 stall, will automatically be cleared on new transaction */
116 static void ep0_stall(struct mv_udc *udc)
120 /* set TX and RX to stall */
121 epctrlx = readl(&udc->op_regs->epctrlx[0]);
122 epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
123 writel(epctrlx, &udc->op_regs->epctrlx[0]);
125 /* update ep0 state */
126 udc->ep0_state = WAIT_FOR_SETUP;
127 udc->ep0_dir = EP_DIR_OUT;
130 static int process_ep_req(struct mv_udc *udc, int index,
131 struct mv_req *curr_req)
133 struct mv_dtd *curr_dtd;
134 struct mv_dqh *curr_dqh;
135 int td_complete, actual, remaining_length;
140 curr_dqh = &udc->ep_dqh[index];
141 direction = index % 2;
143 curr_dtd = curr_req->head;
145 actual = curr_req->req.length;
147 for (i = 0; i < curr_req->dtd_count; i++) {
148 if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
149 dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
150 udc->eps[index].name);
154 errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
157 (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
158 >> DTD_LENGTH_BIT_POS;
159 actual -= remaining_length;
161 dev_info(&udc->dev->dev,
162 "complete_tr error: ep=%d %s: error = 0x%x\n",
163 index >> 1, direction ? "SEND" : "RECV",
165 if (errors & DTD_STATUS_HALTED) {
166 /* Clear the errors and Halt condition */
167 curr_dqh->size_ioc_int_sts &= ~errors;
169 } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
171 } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
175 if (i != curr_req->dtd_count - 1)
176 curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
181 curr_req->req.actual = actual;
187 * done() - retire a request; caller blocked irqs
188 * @status : request status to be set, only works when
189 * request is still in progress.
191 static void done(struct mv_ep *ep, struct mv_req *req, int status)
193 struct mv_udc *udc = NULL;
194 unsigned char stopped = ep->stopped;
195 struct mv_dtd *curr_td, *next_td;
198 udc = (struct mv_udc *)ep->udc;
199 /* Removed the req from fsl_ep->queue */
200 list_del_init(&req->queue);
202 /* req.status should be set as -EINPROGRESS in ep_queue() */
203 if (req->req.status == -EINPROGRESS)
204 req->req.status = status;
206 status = req->req.status;
208 /* Free dtd for the request */
210 for (j = 0; j < req->dtd_count; j++) {
212 if (j != req->dtd_count - 1)
213 next_td = curr_td->next_dtd_virt;
214 dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
218 dma_unmap_single(ep->udc->gadget.dev.parent,
219 req->req.dma, req->req.length,
220 ((ep_dir(ep) == EP_DIR_IN) ?
221 DMA_TO_DEVICE : DMA_FROM_DEVICE));
222 req->req.dma = DMA_ADDR_INVALID;
225 dma_sync_single_for_cpu(ep->udc->gadget.dev.parent,
226 req->req.dma, req->req.length,
227 ((ep_dir(ep) == EP_DIR_IN) ?
228 DMA_TO_DEVICE : DMA_FROM_DEVICE));
230 if (status && (status != -ESHUTDOWN))
231 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
232 ep->ep.name, &req->req, status,
233 req->req.actual, req->req.length);
237 spin_unlock(&ep->udc->lock);
239 * complete() is from gadget layer,
240 * eg fsg->bulk_in_complete()
242 if (req->req.complete)
243 req->req.complete(&ep->ep, &req->req);
245 spin_lock(&ep->udc->lock);
246 ep->stopped = stopped;
249 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
251 u32 tmp, epstatus, bit_pos, direction;
255 int readsafe, retval = 0;
258 direction = ep_dir(ep);
259 dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
260 bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
262 /* check if the pipe is empty */
263 if (!(list_empty(&ep->queue))) {
264 struct mv_req *lastreq;
265 lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
266 lastreq->tail->dtd_next =
267 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
268 if (readl(&udc->op_regs->epprime) & bit_pos) {
269 loops = LOOPS(PRIME_TIMEOUT);
270 while (readl(&udc->op_regs->epprime) & bit_pos) {
278 if (readl(&udc->op_regs->epstatus) & bit_pos)
282 loops = LOOPS(READSAFE_TIMEOUT);
283 while (readsafe == 0) {
288 /* start with setting the semaphores */
289 tmp = readl(&udc->op_regs->usbcmd);
290 tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
291 writel(tmp, &udc->op_regs->usbcmd);
293 /* read the endpoint status */
294 epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
297 * Reread the ATDTW semaphore bit to check if it is
298 * cleared. When hardware see a hazard, it will clear
299 * the bit or else we remain set to 1 and we can
300 * proceed with priming of endpoint if not already
303 if (readl(&udc->op_regs->usbcmd)
304 & USBCMD_ATDTW_TRIPWIRE_SET) {
311 /* Clear the semaphore */
312 tmp = readl(&udc->op_regs->usbcmd);
313 tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
314 writel(tmp, &udc->op_regs->usbcmd);
316 /* If endpoint is not active, we activate it now. */
318 if (direction == EP_DIR_IN) {
319 struct mv_dtd *curr_dtd = dma_to_virt(
320 &udc->dev->dev, dqh->curr_dtd_ptr);
322 loops = LOOPS(DTD_TIMEOUT);
323 while (curr_dtd->size_ioc_sts
324 & DTD_STATUS_ACTIVE) {
333 /* No other transfers on the queue */
335 /* Write dQH next pointer and terminate bit to 0 */
336 dqh->next_dtd_ptr = req->head->td_dma
337 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
338 dqh->size_ioc_int_sts = 0;
341 * Ensure that updates to the QH will
342 * occur before priming.
346 /* Prime the Endpoint */
347 writel(bit_pos, &udc->op_regs->epprime);
350 /* Write dQH next pointer and terminate bit to 0 */
351 dqh->next_dtd_ptr = req->head->td_dma
352 & EP_QUEUE_HEAD_NEXT_POINTER_MASK;;
353 dqh->size_ioc_int_sts = 0;
355 /* Ensure that updates to the QH will occur before priming. */
358 /* Prime the Endpoint */
359 writel(bit_pos, &udc->op_regs->epprime);
361 if (direction == EP_DIR_IN) {
362 /* FIXME add status check after prime the IN ep */
364 u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
366 loops = LOOPS(DTD_TIMEOUT);
368 while ((curr_dtd_ptr != req->head->td_dma)) {
369 curr_dtd_ptr = dqh->curr_dtd_ptr;
371 dev_err(&udc->dev->dev,
372 "failed to prime %s\n",
380 if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
383 dev_info(&udc->dev->dev,
386 &udc->op_regs->epprime);
396 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
397 dma_addr_t *dma, int *is_last)
403 /* how big will this transfer be? */
404 *length = min(req->req.length - req->req.actual,
405 (unsigned)EP_MAX_LENGTH_TRANSFER);
410 * Be careful that no _GFP_HIGHMEM is set,
411 * or we can not use dma_to_virt
413 dtd = dma_pool_alloc(udc->dtd_pool, GFP_KERNEL, dma);
418 /* initialize buffer page pointers */
419 temp = (u32)(req->req.dma + req->req.actual);
420 dtd->buff_ptr0 = cpu_to_le32(temp);
422 dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
423 dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
424 dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
425 dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
427 req->req.actual += *length;
429 /* zlp is needed if req->req.zero is set */
431 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
435 } else if (req->req.length == req->req.actual)
440 /* Fill in the transfer size; set active bit */
441 temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
443 /* Enable interrupt for the last dtd of a request */
444 if (*is_last && !req->req.no_interrupt)
447 dtd->size_ioc_sts = temp;
454 /* generate dTD linked list for a request */
455 static int req_to_dtd(struct mv_req *req)
458 int is_last, is_first = 1;
459 struct mv_dtd *dtd, *last_dtd = NULL;
466 dtd = build_dtd(req, &count, &dma, &is_last);
474 last_dtd->dtd_next = dma;
475 last_dtd->next_dtd_virt = dtd;
481 /* set terminate bit to 1 for the last dTD */
482 dtd->dtd_next = DTD_NEXT_TERMINATE;
489 static int mv_ep_enable(struct usb_ep *_ep,
490 const struct usb_endpoint_descriptor *desc)
496 u32 bit_pos, epctrlx, direction;
497 unsigned char zlt = 0, ios = 0, mult = 0;
499 ep = container_of(_ep, struct mv_ep, ep);
502 if (!_ep || !desc || ep->desc
503 || desc->bDescriptorType != USB_DT_ENDPOINT)
506 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
509 direction = ep_dir(ep);
510 max = usb_endpoint_maxp(desc);
513 * disable HW zero length termination select
514 * driver handles zero length packet through req->req.zero
518 /* Get the endpoint queue head address */
519 dqh = (struct mv_dqh *)ep->dqh;
521 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
523 /* Check if the Endpoint is Primed */
524 if ((readl(&udc->op_regs->epprime) & bit_pos)
525 || (readl(&udc->op_regs->epstatus) & bit_pos)) {
526 dev_info(&udc->dev->dev,
527 "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
528 " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
529 (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
530 (unsigned)readl(&udc->op_regs->epprime),
531 (unsigned)readl(&udc->op_regs->epstatus),
535 /* Set the max packet length, interrupt on Setup and Mult fields */
536 switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
537 case USB_ENDPOINT_XFER_BULK:
541 case USB_ENDPOINT_XFER_CONTROL:
543 case USB_ENDPOINT_XFER_INT:
546 case USB_ENDPOINT_XFER_ISOC:
547 /* Calculate transactions needed for high bandwidth iso */
548 mult = (unsigned char)(1 + ((max >> 11) & 0x03));
549 max = max & 0x8ff; /* bit 0~10 */
550 /* 3 transactions at most */
557 dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
558 | (mult << EP_QUEUE_HEAD_MULT_POS)
559 | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
560 | (ios ? EP_QUEUE_HEAD_IOS : 0);
561 dqh->next_dtd_ptr = 1;
562 dqh->size_ioc_int_sts = 0;
564 ep->ep.maxpacket = max;
568 /* Enable the endpoint for Rx or Tx and set the endpoint type */
569 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
570 if (direction == EP_DIR_IN) {
571 epctrlx &= ~EPCTRL_TX_ALL_MASK;
572 epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
573 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
574 << EPCTRL_TX_EP_TYPE_SHIFT);
576 epctrlx &= ~EPCTRL_RX_ALL_MASK;
577 epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
578 | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
579 << EPCTRL_RX_EP_TYPE_SHIFT);
581 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
584 * Implement Guideline (GL# USB-7) The unused endpoint type must
585 * be programmed to bulk.
587 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
588 if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
589 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
590 << EPCTRL_RX_EP_TYPE_SHIFT);
591 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
594 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
595 if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
596 epctrlx |= ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
597 << EPCTRL_TX_EP_TYPE_SHIFT);
598 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
606 static int mv_ep_disable(struct usb_ep *_ep)
611 u32 bit_pos, epctrlx, direction;
613 ep = container_of(_ep, struct mv_ep, ep);
614 if ((_ep == NULL) || !ep->desc)
619 /* Get the endpoint queue head address */
622 direction = ep_dir(ep);
623 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
625 /* Reset the max packet length and the interrupt on Setup */
626 dqh->max_packet_length = 0;
628 /* Disable the endpoint for Rx or Tx and reset the endpoint type */
629 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
630 epctrlx &= ~((direction == EP_DIR_IN)
631 ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
632 : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
633 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
635 /* nuke all pending requests (does flush) */
636 nuke(ep, -ESHUTDOWN);
643 static struct usb_request *
644 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
646 struct mv_req *req = NULL;
648 req = kzalloc(sizeof *req, gfp_flags);
652 req->req.dma = DMA_ADDR_INVALID;
653 INIT_LIST_HEAD(&req->queue);
658 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
660 struct mv_req *req = NULL;
662 req = container_of(_req, struct mv_req, req);
668 static void mv_ep_fifo_flush(struct usb_ep *_ep)
671 u32 bit_pos, direction;
672 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
676 direction = ep_dir(ep);
677 bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
679 * Flushing will halt the pipe
680 * Write 1 to the Flush register
682 writel(bit_pos, &udc->op_regs->epflush);
684 /* Wait until flushing completed */
685 loops = LOOPS(FLUSH_TIMEOUT);
686 while (readl(&udc->op_regs->epflush) & bit_pos) {
688 * ENDPTFLUSH bit should be cleared to indicate this
689 * operation is complete
692 dev_err(&udc->dev->dev,
693 "TIMEOUT for ENDPTFLUSH=0x%x, bit_pos=0x%x\n",
694 (unsigned)readl(&udc->op_regs->epflush),
701 loops = LOOPS(EPSTATUS_TIMEOUT);
702 while (readl(&udc->op_regs->epstatus) & bit_pos) {
703 unsigned int inter_loops;
706 dev_err(&udc->dev->dev,
707 "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
708 (unsigned)readl(&udc->op_regs->epstatus),
712 /* Write 1 to the Flush register */
713 writel(bit_pos, &udc->op_regs->epflush);
715 /* Wait until flushing completed */
716 inter_loops = LOOPS(FLUSH_TIMEOUT);
717 while (readl(&udc->op_regs->epflush) & bit_pos) {
719 * ENDPTFLUSH bit should be cleared to indicate this
720 * operation is complete
722 if (inter_loops == 0) {
723 dev_err(&udc->dev->dev,
724 "TIMEOUT for ENDPTFLUSH=0x%x,"
726 (unsigned)readl(&udc->op_regs->epflush),
737 /* queues (submits) an I/O request to an endpoint */
739 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
741 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
742 struct mv_req *req = container_of(_req, struct mv_req, req);
743 struct mv_udc *udc = ep->udc;
746 /* catch various bogus parameters */
747 if (!_req || !req->req.complete || !req->req.buf
748 || !list_empty(&req->queue)) {
749 dev_err(&udc->dev->dev, "%s, bad params", __func__);
752 if (unlikely(!_ep || !ep->desc)) {
753 dev_err(&udc->dev->dev, "%s, bad ep", __func__);
756 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
757 if (req->req.length > ep->ep.maxpacket)
762 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
767 /* map virtual address to hardware */
768 if (req->req.dma == DMA_ADDR_INVALID) {
769 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
771 req->req.length, ep_dir(ep)
776 dma_sync_single_for_device(ep->udc->gadget.dev.parent,
777 req->req.dma, req->req.length,
784 req->req.status = -EINPROGRESS;
788 spin_lock_irqsave(&udc->lock, flags);
790 /* build dtds and push them to device queue */
791 if (!req_to_dtd(req)) {
793 retval = queue_dtd(ep, req);
795 spin_unlock_irqrestore(&udc->lock, flags);
799 spin_unlock_irqrestore(&udc->lock, flags);
803 /* Update ep0 state */
805 udc->ep0_state = DATA_STATE_XMIT;
807 /* irq handler advances the queue */
809 list_add_tail(&req->queue, &ep->queue);
810 spin_unlock_irqrestore(&udc->lock, flags);
815 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
816 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
818 struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
820 struct mv_udc *udc = ep->udc;
822 int stopped, ret = 0;
828 spin_lock_irqsave(&ep->udc->lock, flags);
829 stopped = ep->stopped;
831 /* Stop the ep before we deal with the queue */
833 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
834 if (ep_dir(ep) == EP_DIR_IN)
835 epctrlx &= ~EPCTRL_TX_ENABLE;
837 epctrlx &= ~EPCTRL_RX_ENABLE;
838 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
840 /* make sure it's actually queued on this endpoint */
841 list_for_each_entry(req, &ep->queue, queue) {
842 if (&req->req == _req)
845 if (&req->req != _req) {
850 /* The request is in progress, or completed but not dequeued */
851 if (ep->queue.next == &req->queue) {
852 _req->status = -ECONNRESET;
853 mv_ep_fifo_flush(_ep); /* flush current transfer */
855 /* The request isn't the last request in this ep queue */
856 if (req->queue.next != &ep->queue) {
858 struct mv_req *next_req;
861 next_req = list_entry(req->queue.next, struct mv_req,
864 /* Point the QH to the first TD of next request */
865 writel((u32) next_req->head, &qh->curr_dtd_ptr);
870 qh->next_dtd_ptr = 1;
871 qh->size_ioc_int_sts = 0;
874 /* The request hasn't been processed, patch up the TD chain */
876 struct mv_req *prev_req;
878 prev_req = list_entry(req->queue.prev, struct mv_req, queue);
879 writel(readl(&req->tail->dtd_next),
880 &prev_req->tail->dtd_next);
884 done(ep, req, -ECONNRESET);
888 epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
889 if (ep_dir(ep) == EP_DIR_IN)
890 epctrlx |= EPCTRL_TX_ENABLE;
892 epctrlx |= EPCTRL_RX_ENABLE;
893 writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
894 ep->stopped = stopped;
896 spin_unlock_irqrestore(&ep->udc->lock, flags);
900 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
904 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
907 if (direction == EP_DIR_IN)
908 epctrlx |= EPCTRL_TX_EP_STALL;
910 epctrlx |= EPCTRL_RX_EP_STALL;
912 if (direction == EP_DIR_IN) {
913 epctrlx &= ~EPCTRL_TX_EP_STALL;
914 epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
916 epctrlx &= ~EPCTRL_RX_EP_STALL;
917 epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
920 writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
923 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
927 epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
929 if (direction == EP_DIR_OUT)
930 return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
932 return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
935 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
938 unsigned long flags = 0;
942 ep = container_of(_ep, struct mv_ep, ep);
944 if (!_ep || !ep->desc) {
949 if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
950 status = -EOPNOTSUPP;
955 * Attempt to halt IN ep will fail if any transfer requests
958 if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
963 spin_lock_irqsave(&ep->udc->lock, flags);
964 ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
969 spin_unlock_irqrestore(&ep->udc->lock, flags);
971 if (ep->ep_num == 0) {
972 udc->ep0_state = WAIT_FOR_SETUP;
973 udc->ep0_dir = EP_DIR_OUT;
979 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
981 return mv_ep_set_halt_wedge(_ep, halt, 0);
984 static int mv_ep_set_wedge(struct usb_ep *_ep)
986 return mv_ep_set_halt_wedge(_ep, 1, 1);
989 static struct usb_ep_ops mv_ep_ops = {
990 .enable = mv_ep_enable,
991 .disable = mv_ep_disable,
993 .alloc_request = mv_alloc_request,
994 .free_request = mv_free_request,
996 .queue = mv_ep_queue,
997 .dequeue = mv_ep_dequeue,
999 .set_wedge = mv_ep_set_wedge,
1000 .set_halt = mv_ep_set_halt,
1001 .fifo_flush = mv_ep_fifo_flush, /* flush fifo */
1004 static void udc_clock_enable(struct mv_udc *udc)
1008 for (i = 0; i < udc->clknum; i++)
1009 clk_enable(udc->clk[i]);
1012 static void udc_clock_disable(struct mv_udc *udc)
1016 for (i = 0; i < udc->clknum; i++)
1017 clk_disable(udc->clk[i]);
1020 static void udc_stop(struct mv_udc *udc)
1024 /* Disable interrupts */
1025 tmp = readl(&udc->op_regs->usbintr);
1026 tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
1027 USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
1028 writel(tmp, &udc->op_regs->usbintr);
1030 /* Reset the Run the bit in the command register to stop VUSB */
1031 tmp = readl(&udc->op_regs->usbcmd);
1032 tmp &= ~USBCMD_RUN_STOP;
1033 writel(tmp, &udc->op_regs->usbcmd);
1036 static void udc_start(struct mv_udc *udc)
1040 usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
1041 | USBINTR_PORT_CHANGE_DETECT_EN
1042 | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
1043 /* Enable interrupts */
1044 writel(usbintr, &udc->op_regs->usbintr);
1046 /* Set the Run bit in the command register */
1047 writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1050 static int udc_reset(struct mv_udc *udc)
1055 /* Stop the controller */
1056 tmp = readl(&udc->op_regs->usbcmd);
1057 tmp &= ~USBCMD_RUN_STOP;
1058 writel(tmp, &udc->op_regs->usbcmd);
1060 /* Reset the controller to get default values */
1061 writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1063 /* wait for reset to complete */
1064 loops = LOOPS(RESET_TIMEOUT);
1065 while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1067 dev_err(&udc->dev->dev,
1068 "Wait for RESET completed TIMEOUT\n");
1075 /* set controller to device mode */
1076 tmp = readl(&udc->op_regs->usbmode);
1077 tmp |= USBMODE_CTRL_MODE_DEVICE;
1079 /* turn setup lockout off, require setup tripwire in usbcmd */
1080 tmp |= USBMODE_SETUP_LOCK_OFF | USBMODE_STREAM_DISABLE;
1082 writel(tmp, &udc->op_regs->usbmode);
1084 writel(0x0, &udc->op_regs->epsetupstat);
1086 /* Configure the Endpoint List Address */
1087 writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1088 &udc->op_regs->eplistaddr);
1090 portsc = readl(&udc->op_regs->portsc[0]);
1091 if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1092 portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1095 portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1097 portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1099 writel(portsc, &udc->op_regs->portsc[0]);
1101 tmp = readl(&udc->op_regs->epctrlx[0]);
1102 tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1103 writel(tmp, &udc->op_regs->epctrlx[0]);
1108 static int mv_udc_get_frame(struct usb_gadget *gadget)
1116 udc = container_of(gadget, struct mv_udc, gadget);
1118 retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1123 /* Tries to wake up the host connected to this gadget */
1124 static int mv_udc_wakeup(struct usb_gadget *gadget)
1126 struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1129 /* Remote wakeup feature not enabled by host */
1130 if (!udc->remote_wakeup)
1133 portsc = readl(&udc->op_regs->portsc);
1134 /* not suspended? */
1135 if (!(portsc & PORTSCX_PORT_SUSPEND))
1137 /* trigger force resume */
1138 portsc |= PORTSCX_PORT_FORCE_RESUME;
1139 writel(portsc, &udc->op_regs->portsc[0]);
1143 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1146 unsigned long flags;
1148 udc = container_of(gadget, struct mv_udc, gadget);
1149 spin_lock_irqsave(&udc->lock, flags);
1151 udc->softconnect = (is_on != 0);
1152 if (udc->driver && udc->softconnect)
1157 spin_unlock_irqrestore(&udc->lock, flags);
1161 static int mv_udc_start(struct usb_gadget_driver *driver,
1162 int (*bind)(struct usb_gadget *));
1163 static int mv_udc_stop(struct usb_gadget_driver *driver);
1164 /* device controller usb_gadget_ops structure */
1165 static const struct usb_gadget_ops mv_ops = {
1167 /* returns the current frame number */
1168 .get_frame = mv_udc_get_frame,
1170 /* tries to wake up the host connected to this gadget */
1171 .wakeup = mv_udc_wakeup,
1173 /* D+ pullup, software-controlled connect/disconnect to USB host */
1174 .pullup = mv_udc_pullup,
1175 .start = mv_udc_start,
1176 .stop = mv_udc_stop,
1179 static void mv_udc_testmode(struct mv_udc *udc, u16 index, bool enter)
1181 dev_info(&udc->dev->dev, "Test Mode is not support yet\n");
1184 static int eps_init(struct mv_udc *udc)
1190 /* initialize ep0 */
1193 strncpy(ep->name, "ep0", sizeof(ep->name));
1194 ep->ep.name = ep->name;
1195 ep->ep.ops = &mv_ep_ops;
1198 ep->ep.maxpacket = EP0_MAX_PKT_SIZE;
1200 ep->desc = &mv_ep0_desc;
1201 INIT_LIST_HEAD(&ep->queue);
1203 ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1205 /* initialize other endpoints */
1206 for (i = 2; i < udc->max_eps * 2; i++) {
1209 snprintf(name, sizeof(name), "ep%din", i / 2);
1210 ep->direction = EP_DIR_IN;
1212 snprintf(name, sizeof(name), "ep%dout", i / 2);
1213 ep->direction = EP_DIR_OUT;
1216 strncpy(ep->name, name, sizeof(ep->name));
1217 ep->ep.name = ep->name;
1219 ep->ep.ops = &mv_ep_ops;
1221 ep->ep.maxpacket = (unsigned short) ~0;
1224 INIT_LIST_HEAD(&ep->queue);
1225 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1227 ep->dqh = &udc->ep_dqh[i];
1233 /* delete all endpoint requests, called with spinlock held */
1234 static void nuke(struct mv_ep *ep, int status)
1236 /* called with spinlock held */
1239 /* endpoint fifo flush */
1240 mv_ep_fifo_flush(&ep->ep);
1242 while (!list_empty(&ep->queue)) {
1243 struct mv_req *req = NULL;
1244 req = list_entry(ep->queue.next, struct mv_req, queue);
1245 done(ep, req, status);
1249 /* stop all USB activities */
1250 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1254 nuke(&udc->eps[0], -ESHUTDOWN);
1256 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1257 nuke(ep, -ESHUTDOWN);
1260 /* report disconnect; the driver is already quiesced */
1262 spin_unlock(&udc->lock);
1263 driver->disconnect(&udc->gadget);
1264 spin_lock(&udc->lock);
1268 static int mv_udc_start(struct usb_gadget_driver *driver,
1269 int (*bind)(struct usb_gadget *))
1271 struct mv_udc *udc = the_controller;
1273 unsigned long flags;
1281 spin_lock_irqsave(&udc->lock, flags);
1283 /* hook up the driver ... */
1284 driver->driver.bus = NULL;
1285 udc->driver = driver;
1286 udc->gadget.dev.driver = &driver->driver;
1288 udc->usb_state = USB_STATE_ATTACHED;
1289 udc->ep0_state = WAIT_FOR_SETUP;
1290 udc->ep0_dir = USB_DIR_OUT;
1292 spin_unlock_irqrestore(&udc->lock, flags);
1294 retval = bind(&udc->gadget);
1296 dev_err(&udc->dev->dev, "bind to driver %s --> %d\n",
1297 driver->driver.name, retval);
1299 udc->gadget.dev.driver = NULL;
1309 static int mv_udc_stop(struct usb_gadget_driver *driver)
1311 struct mv_udc *udc = the_controller;
1312 unsigned long flags;
1319 spin_lock_irqsave(&udc->lock, flags);
1321 /* stop all usb activities */
1322 udc->gadget.speed = USB_SPEED_UNKNOWN;
1323 stop_activity(udc, driver);
1324 spin_unlock_irqrestore(&udc->lock, flags);
1326 /* unbind gadget driver */
1327 driver->unbind(&udc->gadget);
1328 udc->gadget.dev.driver = NULL;
1335 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1342 udc->ep0_dir = direction;
1344 req = udc->status_req;
1346 /* fill in the reqest structure */
1347 if (empty == false) {
1348 *((u16 *) req->req.buf) = cpu_to_le16(status);
1349 req->req.length = 2;
1351 req->req.length = 0;
1354 req->req.status = -EINPROGRESS;
1355 req->req.actual = 0;
1356 req->req.complete = NULL;
1359 /* prime the data phase */
1360 if (!req_to_dtd(req))
1361 retval = queue_dtd(ep, req);
1368 dev_err(&udc->dev->dev, "response error on GET_STATUS request\n");
1372 list_add_tail(&req->queue, &ep->queue);
1379 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1381 udc->dev_addr = (u8)setup->wValue;
1383 /* update usb state */
1384 udc->usb_state = USB_STATE_ADDRESS;
1386 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1390 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1391 struct usb_ctrlrequest *setup)
1396 if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1397 != (USB_DIR_IN | USB_TYPE_STANDARD))
1400 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1401 status = 1 << USB_DEVICE_SELF_POWERED;
1402 status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1403 } else if ((setup->bRequestType & USB_RECIP_MASK)
1404 == USB_RECIP_INTERFACE) {
1405 /* get interface status */
1407 } else if ((setup->bRequestType & USB_RECIP_MASK)
1408 == USB_RECIP_ENDPOINT) {
1409 u8 ep_num, direction;
1411 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1412 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1413 ? EP_DIR_IN : EP_DIR_OUT;
1414 status = ep_is_stall(udc, ep_num, direction)
1415 << USB_ENDPOINT_HALT;
1418 retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1423 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1429 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1430 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1431 switch (setup->wValue) {
1432 case USB_DEVICE_REMOTE_WAKEUP:
1433 udc->remote_wakeup = 0;
1435 case USB_DEVICE_TEST_MODE:
1436 mv_udc_testmode(udc, 0, false);
1441 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1442 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1443 switch (setup->wValue) {
1444 case USB_ENDPOINT_HALT:
1445 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1446 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1447 ? EP_DIR_IN : EP_DIR_OUT;
1448 if (setup->wValue != 0 || setup->wLength != 0
1449 || ep_num > udc->max_eps)
1451 ep = &udc->eps[ep_num * 2 + direction];
1454 spin_unlock(&udc->lock);
1455 ep_set_stall(udc, ep_num, direction, 0);
1456 spin_lock(&udc->lock);
1464 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1467 udc->ep0_state = DATA_STATE_XMIT;
1472 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1477 if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1478 == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1479 switch (setup->wValue) {
1480 case USB_DEVICE_REMOTE_WAKEUP:
1481 udc->remote_wakeup = 1;
1483 case USB_DEVICE_TEST_MODE:
1484 if (setup->wIndex & 0xFF
1485 && udc->gadget.speed != USB_SPEED_HIGH)
1487 if (udc->usb_state == USB_STATE_CONFIGURED
1488 || udc->usb_state == USB_STATE_ADDRESS
1489 || udc->usb_state == USB_STATE_DEFAULT)
1490 mv_udc_testmode(udc,
1491 setup->wIndex & 0xFF00, true);
1498 } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1499 == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1500 switch (setup->wValue) {
1501 case USB_ENDPOINT_HALT:
1502 ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1503 direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1504 ? EP_DIR_IN : EP_DIR_OUT;
1505 if (setup->wValue != 0 || setup->wLength != 0
1506 || ep_num > udc->max_eps)
1508 spin_unlock(&udc->lock);
1509 ep_set_stall(udc, ep_num, direction, 1);
1510 spin_lock(&udc->lock);
1518 if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1524 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1525 struct usb_ctrlrequest *setup)
1527 bool delegate = false;
1529 nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1531 dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1532 setup->bRequestType, setup->bRequest,
1533 setup->wValue, setup->wIndex, setup->wLength);
1534 /* We process some stardard setup requests here */
1535 if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1536 switch (setup->bRequest) {
1537 case USB_REQ_GET_STATUS:
1538 ch9getstatus(udc, ep_num, setup);
1541 case USB_REQ_SET_ADDRESS:
1542 ch9setaddress(udc, setup);
1545 case USB_REQ_CLEAR_FEATURE:
1546 ch9clearfeature(udc, setup);
1549 case USB_REQ_SET_FEATURE:
1550 ch9setfeature(udc, setup);
1559 /* delegate USB standard requests to the gadget driver */
1560 if (delegate == true) {
1561 /* USB requests handled by gadget */
1562 if (setup->wLength) {
1563 /* DATA phase from gadget, STATUS phase from udc */
1564 udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1565 ? EP_DIR_IN : EP_DIR_OUT;
1566 spin_unlock(&udc->lock);
1567 if (udc->driver->setup(&udc->gadget,
1568 &udc->local_setup_buff) < 0)
1570 spin_lock(&udc->lock);
1571 udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1572 ? DATA_STATE_XMIT : DATA_STATE_RECV;
1574 /* no DATA phase, IN STATUS phase from gadget */
1575 udc->ep0_dir = EP_DIR_IN;
1576 spin_unlock(&udc->lock);
1577 if (udc->driver->setup(&udc->gadget,
1578 &udc->local_setup_buff) < 0)
1580 spin_lock(&udc->lock);
1581 udc->ep0_state = WAIT_FOR_OUT_STATUS;
1586 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
1587 static void ep0_req_complete(struct mv_udc *udc,
1588 struct mv_ep *ep0, struct mv_req *req)
1592 if (udc->usb_state == USB_STATE_ADDRESS) {
1593 /* set the new address */
1594 new_addr = (u32)udc->dev_addr;
1595 writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1596 &udc->op_regs->deviceaddr);
1601 switch (udc->ep0_state) {
1602 case DATA_STATE_XMIT:
1603 /* receive status phase */
1604 if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1607 case DATA_STATE_RECV:
1608 /* send status phase */
1609 if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1612 case WAIT_FOR_OUT_STATUS:
1613 udc->ep0_state = WAIT_FOR_SETUP;
1615 case WAIT_FOR_SETUP:
1616 dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1624 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1629 dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1631 /* Clear bit in ENDPTSETUPSTAT */
1632 temp = readl(&udc->op_regs->epsetupstat);
1633 writel(temp | (1 << ep_num), &udc->op_regs->epsetupstat);
1635 /* while a hazard exists when setup package arrives */
1637 /* Set Setup Tripwire */
1638 temp = readl(&udc->op_regs->usbcmd);
1639 writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1641 /* Copy the setup packet to local buffer */
1642 memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1643 } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1645 /* Clear Setup Tripwire */
1646 temp = readl(&udc->op_regs->usbcmd);
1647 writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1650 static void irq_process_tr_complete(struct mv_udc *udc)
1653 int i, ep_num = 0, direction = 0;
1654 struct mv_ep *curr_ep;
1655 struct mv_req *curr_req, *temp_req;
1659 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1660 * because the setup packets are to be read ASAP
1663 /* Process all Setup packet received interrupts */
1664 tmp = readl(&udc->op_regs->epsetupstat);
1667 for (i = 0; i < udc->max_eps; i++) {
1668 if (tmp & (1 << i)) {
1669 get_setup_data(udc, i,
1670 (u8 *)(&udc->local_setup_buff));
1671 handle_setup_packet(udc, i,
1672 &udc->local_setup_buff);
1677 /* Don't clear the endpoint setup status register here.
1678 * It is cleared as a setup packet is read out of the buffer
1681 /* Process non-setup transaction complete interrupts */
1682 tmp = readl(&udc->op_regs->epcomplete);
1687 writel(tmp, &udc->op_regs->epcomplete);
1689 for (i = 0; i < udc->max_eps * 2; i++) {
1693 bit_pos = 1 << (ep_num + 16 * direction);
1695 if (!(bit_pos & tmp))
1699 curr_ep = &udc->eps[0];
1701 curr_ep = &udc->eps[i];
1702 /* process the req queue until an uncomplete request */
1703 list_for_each_entry_safe(curr_req, temp_req,
1704 &curr_ep->queue, queue) {
1705 status = process_ep_req(udc, i, curr_req);
1709 /* write back status to req */
1710 curr_req->req.status = status;
1712 /* ep0 request completion */
1714 ep0_req_complete(udc, curr_ep, curr_req);
1717 done(curr_ep, curr_req, status);
1723 void irq_process_reset(struct mv_udc *udc)
1728 udc->ep0_dir = EP_DIR_OUT;
1729 udc->ep0_state = WAIT_FOR_SETUP;
1730 udc->remote_wakeup = 0; /* default to 0 on reset */
1732 /* The address bits are past bit 25-31. Set the address */
1733 tmp = readl(&udc->op_regs->deviceaddr);
1734 tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1735 writel(tmp, &udc->op_regs->deviceaddr);
1737 /* Clear all the setup token semaphores */
1738 tmp = readl(&udc->op_regs->epsetupstat);
1739 writel(tmp, &udc->op_regs->epsetupstat);
1741 /* Clear all the endpoint complete status bits */
1742 tmp = readl(&udc->op_regs->epcomplete);
1743 writel(tmp, &udc->op_regs->epcomplete);
1745 /* wait until all endptprime bits cleared */
1746 loops = LOOPS(PRIME_TIMEOUT);
1747 while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1749 dev_err(&udc->dev->dev,
1750 "Timeout for ENDPTPRIME = 0x%x\n",
1751 readl(&udc->op_regs->epprime));
1758 /* Write 1s to the Flush register */
1759 writel((u32)~0, &udc->op_regs->epflush);
1761 if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1762 dev_info(&udc->dev->dev, "usb bus reset\n");
1763 udc->usb_state = USB_STATE_DEFAULT;
1764 /* reset all the queues, stop all USB activities */
1765 stop_activity(udc, udc->driver);
1767 dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1768 readl(&udc->op_regs->portsc));
1776 /* reset all the queues, stop all USB activities */
1777 stop_activity(udc, udc->driver);
1779 /* reset ep0 dQH and endptctrl */
1782 /* enable interrupt and set controller to run state */
1785 udc->usb_state = USB_STATE_ATTACHED;
1789 static void handle_bus_resume(struct mv_udc *udc)
1791 udc->usb_state = udc->resume_state;
1792 udc->resume_state = 0;
1794 /* report resume to the driver */
1796 if (udc->driver->resume) {
1797 spin_unlock(&udc->lock);
1798 udc->driver->resume(&udc->gadget);
1799 spin_lock(&udc->lock);
1804 static void irq_process_suspend(struct mv_udc *udc)
1806 udc->resume_state = udc->usb_state;
1807 udc->usb_state = USB_STATE_SUSPENDED;
1809 if (udc->driver->suspend) {
1810 spin_unlock(&udc->lock);
1811 udc->driver->suspend(&udc->gadget);
1812 spin_lock(&udc->lock);
1816 static void irq_process_port_change(struct mv_udc *udc)
1820 portsc = readl(&udc->op_regs->portsc[0]);
1821 if (!(portsc & PORTSCX_PORT_RESET)) {
1823 u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1825 case PORTSCX_PORT_SPEED_HIGH:
1826 udc->gadget.speed = USB_SPEED_HIGH;
1828 case PORTSCX_PORT_SPEED_FULL:
1829 udc->gadget.speed = USB_SPEED_FULL;
1831 case PORTSCX_PORT_SPEED_LOW:
1832 udc->gadget.speed = USB_SPEED_LOW;
1835 udc->gadget.speed = USB_SPEED_UNKNOWN;
1840 if (portsc & PORTSCX_PORT_SUSPEND) {
1841 udc->resume_state = udc->usb_state;
1842 udc->usb_state = USB_STATE_SUSPENDED;
1843 if (udc->driver->suspend) {
1844 spin_unlock(&udc->lock);
1845 udc->driver->suspend(&udc->gadget);
1846 spin_lock(&udc->lock);
1850 if (!(portsc & PORTSCX_PORT_SUSPEND)
1851 && udc->usb_state == USB_STATE_SUSPENDED) {
1852 handle_bus_resume(udc);
1855 if (!udc->resume_state)
1856 udc->usb_state = USB_STATE_DEFAULT;
1859 static void irq_process_error(struct mv_udc *udc)
1861 /* Increment the error count */
1865 static irqreturn_t mv_udc_irq(int irq, void *dev)
1867 struct mv_udc *udc = (struct mv_udc *)dev;
1870 spin_lock(&udc->lock);
1872 status = readl(&udc->op_regs->usbsts);
1873 intr = readl(&udc->op_regs->usbintr);
1877 spin_unlock(&udc->lock);
1881 /* Clear all the interrupts occurred */
1882 writel(status, &udc->op_regs->usbsts);
1884 if (status & USBSTS_ERR)
1885 irq_process_error(udc);
1887 if (status & USBSTS_RESET)
1888 irq_process_reset(udc);
1890 if (status & USBSTS_PORT_CHANGE)
1891 irq_process_port_change(udc);
1893 if (status & USBSTS_INT)
1894 irq_process_tr_complete(udc);
1896 if (status & USBSTS_SUSPEND)
1897 irq_process_suspend(udc);
1899 spin_unlock(&udc->lock);
1904 /* release device structure */
1905 static void gadget_release(struct device *_dev)
1907 struct mv_udc *udc = the_controller;
1909 complete(udc->done);
1912 static int __devexit mv_udc_remove(struct platform_device *dev)
1914 struct mv_udc *udc = the_controller;
1917 usb_del_gadget_udc(&udc->gadget);
1919 /* free memory allocated in probe */
1921 dma_pool_destroy(udc->dtd_pool);
1924 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
1925 udc->ep_dqh, udc->ep_dqh_dma);
1930 free_irq(udc->irq, &dev->dev);
1933 iounmap(udc->cap_regs);
1934 udc->cap_regs = NULL;
1937 iounmap((void *)udc->phy_regs);
1940 if (udc->status_req) {
1941 kfree(udc->status_req->req.buf);
1942 kfree(udc->status_req);
1945 for (clk_i = 0; clk_i <= udc->clknum; clk_i++)
1946 clk_put(udc->clk[clk_i]);
1948 device_unregister(&udc->gadget.dev);
1950 /* free dev, wait for the release() finished */
1951 wait_for_completion(udc->done);
1954 the_controller = NULL;
1959 static int __devinit mv_udc_probe(struct platform_device *dev)
1961 struct mv_usb_platform_data *pdata = dev->dev.platform_data;
1968 if (pdata == NULL) {
1969 dev_err(&dev->dev, "missing platform_data\n");
1973 size = sizeof(*udc) + sizeof(struct clk *) * pdata->clknum;
1974 udc = kzalloc(size, GFP_KERNEL);
1976 dev_err(&dev->dev, "failed to allocate memory for udc\n");
1980 the_controller = udc;
1981 udc->done = &release_done;
1982 udc->pdata = dev->dev.platform_data;
1983 spin_lock_init(&udc->lock);
1987 udc->clknum = pdata->clknum;
1988 for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
1989 udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
1990 if (IS_ERR(udc->clk[clk_i])) {
1991 retval = PTR_ERR(udc->clk[clk_i]);
1996 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
1998 dev_err(&dev->dev, "no I/O memory resource defined\n");
2003 udc->cap_regs = (struct mv_cap_regs __iomem *)
2004 ioremap(r->start, resource_size(r));
2005 if (udc->cap_regs == NULL) {
2006 dev_err(&dev->dev, "failed to map I/O memory\n");
2011 r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2013 dev_err(&dev->dev, "no phy I/O memory resource defined\n");
2015 goto err_iounmap_capreg;
2018 udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
2019 if (udc->phy_regs == 0) {
2020 dev_err(&dev->dev, "failed to map phy I/O memory\n");
2022 goto err_iounmap_capreg;
2025 /* we will acces controller register, so enable the clk */
2026 udc_clock_enable(udc);
2027 if (pdata->phy_init) {
2028 retval = pdata->phy_init(udc->phy_regs);
2030 dev_err(&dev->dev, "phy init error %d\n", retval);
2031 goto err_iounmap_phyreg;
2035 udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
2036 + (readl(&udc->cap_regs->caplength_hciversion)
2038 udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2041 * some platform will use usb to download image, it may not disconnect
2042 * usb gadget before loading kernel. So first stop udc here.
2045 writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2047 size = udc->max_eps * sizeof(struct mv_dqh) *2;
2048 size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2049 udc->ep_dqh = dma_alloc_coherent(&dev->dev, size,
2050 &udc->ep_dqh_dma, GFP_KERNEL);
2052 if (udc->ep_dqh == NULL) {
2053 dev_err(&dev->dev, "allocate dQH memory failed\n");
2055 goto err_disable_clock;
2057 udc->ep_dqh_size = size;
2059 /* create dTD dma_pool resource */
2060 udc->dtd_pool = dma_pool_create("mv_dtd",
2062 sizeof(struct mv_dtd),
2066 if (!udc->dtd_pool) {
2071 size = udc->max_eps * sizeof(struct mv_ep) *2;
2072 udc->eps = kzalloc(size, GFP_KERNEL);
2073 if (udc->eps == NULL) {
2074 dev_err(&dev->dev, "allocate ep memory failed\n");
2076 goto err_destroy_dma;
2079 /* initialize ep0 status request structure */
2080 udc->status_req = kzalloc(sizeof(struct mv_req), GFP_KERNEL);
2081 if (!udc->status_req) {
2082 dev_err(&dev->dev, "allocate status_req memory failed\n");
2086 INIT_LIST_HEAD(&udc->status_req->queue);
2088 /* allocate a small amount of memory to get valid address */
2089 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2090 udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
2092 udc->resume_state = USB_STATE_NOTATTACHED;
2093 udc->usb_state = USB_STATE_POWERED;
2094 udc->ep0_dir = EP_DIR_OUT;
2095 udc->remote_wakeup = 0;
2097 r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2099 dev_err(&dev->dev, "no IRQ resource defined\n");
2101 goto err_free_status_req;
2103 udc->irq = r->start;
2104 if (request_irq(udc->irq, mv_udc_irq,
2105 IRQF_SHARED, driver_name, udc)) {
2106 dev_err(&dev->dev, "Request irq %d for UDC failed\n",
2109 goto err_free_status_req;
2112 /* initialize gadget structure */
2113 udc->gadget.ops = &mv_ops; /* usb_gadget_ops */
2114 udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
2115 INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
2116 udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
2117 udc->gadget.is_dualspeed = 1; /* support dual speed */
2119 /* the "gadget" abstracts/virtualizes the controller */
2120 dev_set_name(&udc->gadget.dev, "gadget");
2121 udc->gadget.dev.parent = &dev->dev;
2122 udc->gadget.dev.dma_mask = dev->dev.dma_mask;
2123 udc->gadget.dev.release = gadget_release;
2124 udc->gadget.name = driver_name; /* gadget name */
2126 retval = device_register(&udc->gadget.dev);
2132 retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
2134 goto err_unregister;
2139 device_unregister(&udc->gadget.dev);
2141 free_irq(udc->irq, &dev->dev);
2142 err_free_status_req:
2143 kfree(udc->status_req->req.buf);
2144 kfree(udc->status_req);
2148 dma_pool_destroy(udc->dtd_pool);
2150 dma_free_coherent(&dev->dev, udc->ep_dqh_size,
2151 udc->ep_dqh, udc->ep_dqh_dma);
2153 if (udc->pdata->phy_deinit)
2154 udc->pdata->phy_deinit(udc->phy_regs);
2155 udc_clock_disable(udc);
2157 iounmap((void *)udc->phy_regs);
2159 iounmap(udc->cap_regs);
2161 for (clk_i--; clk_i >= 0; clk_i--)
2162 clk_put(udc->clk[clk_i]);
2163 the_controller = NULL;
2169 static int mv_udc_suspend(struct device *_dev)
2171 struct mv_udc *udc = the_controller;
2178 static int mv_udc_resume(struct device *_dev)
2180 struct mv_udc *udc = the_controller;
2183 if (udc->pdata->phy_init) {
2184 retval = udc->pdata->phy_init(udc->phy_regs);
2186 dev_err(&udc->dev->dev,
2187 "init phy error %d when resume back\n",
2200 static const struct dev_pm_ops mv_udc_pm_ops = {
2201 .suspend = mv_udc_suspend,
2202 .resume = mv_udc_resume,
2206 static struct platform_driver udc_driver = {
2207 .probe = mv_udc_probe,
2208 .remove = __exit_p(mv_udc_remove),
2210 .owner = THIS_MODULE,
2213 .pm = &mv_udc_pm_ops,
2217 MODULE_ALIAS("platform:pxa-u2o");
2219 MODULE_DESCRIPTION(DRIVER_DESC);
2220 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2221 MODULE_VERSION(DRIVER_VERSION);
2222 MODULE_LICENSE("GPL");
2225 static int __init init(void)
2227 return platform_driver_register(&udc_driver);
2232 static void __exit cleanup(void)
2234 platform_driver_unregister(&udc_driver);
2236 module_exit(cleanup);