Merge tag 'sound-5.8-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[platform/kernel/linux-rpi.git] / drivers / usb / chipidea / udc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * udc.c - ChipIdea UDC driver
4  *
5  * Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
6  *
7  * Author: David Lopo
8  */
9
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmapool.h>
13 #include <linux/err.h>
14 #include <linux/irqreturn.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/usb/ch9.h>
20 #include <linux/usb/gadget.h>
21 #include <linux/usb/otg-fsm.h>
22 #include <linux/usb/chipidea.h>
23
24 #include "ci.h"
25 #include "udc.h"
26 #include "bits.h"
27 #include "otg.h"
28 #include "otg_fsm.h"
29
30 /* control endpoint description */
31 static const struct usb_endpoint_descriptor
32 ctrl_endpt_out_desc = {
33         .bLength         = USB_DT_ENDPOINT_SIZE,
34         .bDescriptorType = USB_DT_ENDPOINT,
35
36         .bEndpointAddress = USB_DIR_OUT,
37         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
38         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
39 };
40
41 static const struct usb_endpoint_descriptor
42 ctrl_endpt_in_desc = {
43         .bLength         = USB_DT_ENDPOINT_SIZE,
44         .bDescriptorType = USB_DT_ENDPOINT,
45
46         .bEndpointAddress = USB_DIR_IN,
47         .bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
48         .wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
49 };
50
51 /**
52  * hw_ep_bit: calculates the bit number
53  * @num: endpoint number
54  * @dir: endpoint direction
55  *
56  * This function returns bit number
57  */
58 static inline int hw_ep_bit(int num, int dir)
59 {
60         return num + ((dir == TX) ? 16 : 0);
61 }
62
63 static inline int ep_to_bit(struct ci_hdrc *ci, int n)
64 {
65         int fill = 16 - ci->hw_ep_max / 2;
66
67         if (n >= ci->hw_ep_max / 2)
68                 n += fill;
69
70         return n;
71 }
72
73 /**
74  * hw_device_state: enables/disables interrupts (execute without interruption)
75  * @dma: 0 => disable, !0 => enable and set dma engine
76  *
77  * This function returns an error code
78  */
79 static int hw_device_state(struct ci_hdrc *ci, u32 dma)
80 {
81         if (dma) {
82                 hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
83                 /* interrupt, error, port change, reset, sleep/suspend */
84                 hw_write(ci, OP_USBINTR, ~0,
85                              USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
86         } else {
87                 hw_write(ci, OP_USBINTR, ~0, 0);
88         }
89         return 0;
90 }
91
92 /**
93  * hw_ep_flush: flush endpoint fifo (execute without interruption)
94  * @num: endpoint number
95  * @dir: endpoint direction
96  *
97  * This function returns an error code
98  */
99 static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
100 {
101         int n = hw_ep_bit(num, dir);
102
103         do {
104                 /* flush any pending transfer */
105                 hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
106                 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
107                         cpu_relax();
108         } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
109
110         return 0;
111 }
112
113 /**
114  * hw_ep_disable: disables endpoint (execute without interruption)
115  * @num: endpoint number
116  * @dir: endpoint direction
117  *
118  * This function returns an error code
119  */
120 static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
121 {
122         hw_write(ci, OP_ENDPTCTRL + num,
123                  (dir == TX) ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
124         return 0;
125 }
126
127 /**
128  * hw_ep_enable: enables endpoint (execute without interruption)
129  * @num:  endpoint number
130  * @dir:  endpoint direction
131  * @type: endpoint type
132  *
133  * This function returns an error code
134  */
135 static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
136 {
137         u32 mask, data;
138
139         if (dir == TX) {
140                 mask  = ENDPTCTRL_TXT;  /* type    */
141                 data  = type << __ffs(mask);
142
143                 mask |= ENDPTCTRL_TXS;  /* unstall */
144                 mask |= ENDPTCTRL_TXR;  /* reset data toggle */
145                 data |= ENDPTCTRL_TXR;
146                 mask |= ENDPTCTRL_TXE;  /* enable  */
147                 data |= ENDPTCTRL_TXE;
148         } else {
149                 mask  = ENDPTCTRL_RXT;  /* type    */
150                 data  = type << __ffs(mask);
151
152                 mask |= ENDPTCTRL_RXS;  /* unstall */
153                 mask |= ENDPTCTRL_RXR;  /* reset data toggle */
154                 data |= ENDPTCTRL_RXR;
155                 mask |= ENDPTCTRL_RXE;  /* enable  */
156                 data |= ENDPTCTRL_RXE;
157         }
158         hw_write(ci, OP_ENDPTCTRL + num, mask, data);
159         return 0;
160 }
161
162 /**
163  * hw_ep_get_halt: return endpoint halt status
164  * @num: endpoint number
165  * @dir: endpoint direction
166  *
167  * This function returns 1 if endpoint halted
168  */
169 static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
170 {
171         u32 mask = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
172
173         return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
174 }
175
176 /**
177  * hw_ep_prime: primes endpoint (execute without interruption)
178  * @num:     endpoint number
179  * @dir:     endpoint direction
180  * @is_ctrl: true if control endpoint
181  *
182  * This function returns an error code
183  */
184 static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
185 {
186         int n = hw_ep_bit(num, dir);
187
188         /* Synchronize before ep prime */
189         wmb();
190
191         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
192                 return -EAGAIN;
193
194         hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
195
196         while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
197                 cpu_relax();
198         if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
199                 return -EAGAIN;
200
201         /* status shoult be tested according with manual but it doesn't work */
202         return 0;
203 }
204
205 /**
206  * hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
207  *                 without interruption)
208  * @num:   endpoint number
209  * @dir:   endpoint direction
210  * @value: true => stall, false => unstall
211  *
212  * This function returns an error code
213  */
214 static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
215 {
216         if (value != 0 && value != 1)
217                 return -EINVAL;
218
219         do {
220                 enum ci_hw_regs reg = OP_ENDPTCTRL + num;
221                 u32 mask_xs = (dir == TX) ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
222                 u32 mask_xr = (dir == TX) ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
223
224                 /* data toggle - reserved for EP0 but it's in ESS */
225                 hw_write(ci, reg, mask_xs|mask_xr,
226                           value ? mask_xs : mask_xr);
227         } while (value != hw_ep_get_halt(ci, num, dir));
228
229         return 0;
230 }
231
232 /**
233  * hw_is_port_high_speed: test if port is high speed
234  *
235  * This function returns true if high speed port
236  */
237 static int hw_port_is_high_speed(struct ci_hdrc *ci)
238 {
239         return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
240                 hw_read(ci, OP_PORTSC, PORTSC_HSP);
241 }
242
243 /**
244  * hw_test_and_clear_complete: test & clear complete status (execute without
245  *                             interruption)
246  * @n: endpoint number
247  *
248  * This function returns complete status
249  */
250 static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
251 {
252         n = ep_to_bit(ci, n);
253         return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
254 }
255
256 /**
257  * hw_test_and_clear_intr_active: test & clear active interrupts (execute
258  *                                without interruption)
259  *
260  * This function returns active interrutps
261  */
262 static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
263 {
264         u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
265
266         hw_write(ci, OP_USBSTS, ~0, reg);
267         return reg;
268 }
269
270 /**
271  * hw_test_and_clear_setup_guard: test & clear setup guard (execute without
272  *                                interruption)
273  *
274  * This function returns guard value
275  */
276 static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
277 {
278         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
279 }
280
281 /**
282  * hw_test_and_set_setup_guard: test & set setup guard (execute without
283  *                              interruption)
284  *
285  * This function returns guard value
286  */
287 static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
288 {
289         return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
290 }
291
292 /**
293  * hw_usb_set_address: configures USB address (execute without interruption)
294  * @value: new USB address
295  *
296  * This function explicitly sets the address, without the "USBADRA" (advance)
297  * feature, which is not supported by older versions of the controller.
298  */
299 static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
300 {
301         hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
302                  value << __ffs(DEVICEADDR_USBADR));
303 }
304
305 /**
306  * hw_usb_reset: restart device after a bus reset (execute without
307  *               interruption)
308  *
309  * This function returns an error code
310  */
311 static int hw_usb_reset(struct ci_hdrc *ci)
312 {
313         hw_usb_set_address(ci, 0);
314
315         /* ESS flushes only at end?!? */
316         hw_write(ci, OP_ENDPTFLUSH,    ~0, ~0);
317
318         /* clear setup token semaphores */
319         hw_write(ci, OP_ENDPTSETUPSTAT, 0,  0);
320
321         /* clear complete status */
322         hw_write(ci, OP_ENDPTCOMPLETE,  0,  0);
323
324         /* wait until all bits cleared */
325         while (hw_read(ci, OP_ENDPTPRIME, ~0))
326                 udelay(10);             /* not RTOS friendly */
327
328         /* reset all endpoints ? */
329
330         /* reset internal status and wait for further instructions
331            no need to verify the port reset status (ESS does it) */
332
333         return 0;
334 }
335
336 /******************************************************************************
337  * UTIL block
338  *****************************************************************************/
339
340 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
341                         unsigned int length, struct scatterlist *s)
342 {
343         int i;
344         u32 temp;
345         struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
346                                                   GFP_ATOMIC);
347
348         if (node == NULL)
349                 return -ENOMEM;
350
351         node->ptr = dma_pool_zalloc(hwep->td_pool, GFP_ATOMIC, &node->dma);
352         if (node->ptr == NULL) {
353                 kfree(node);
354                 return -ENOMEM;
355         }
356
357         node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
358         node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
359         node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
360         if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
361                 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
362
363                 if (hwreq->req.length == 0
364                                 || hwreq->req.length % hwep->ep.maxpacket)
365                         mul++;
366                 node->ptr->token |= cpu_to_le32(mul << __ffs(TD_MULTO));
367         }
368
369         if (s) {
370                 temp = (u32) (sg_dma_address(s) + hwreq->req.actual);
371                 node->td_remaining_size = CI_MAX_BUF_SIZE - length;
372         } else {
373                 temp = (u32) (hwreq->req.dma + hwreq->req.actual);
374         }
375
376         if (length) {
377                 node->ptr->page[0] = cpu_to_le32(temp);
378                 for (i = 1; i < TD_PAGE_COUNT; i++) {
379                         u32 page = temp + i * CI_HDRC_PAGE_SIZE;
380                         page &= ~TD_RESERVED_MASK;
381                         node->ptr->page[i] = cpu_to_le32(page);
382                 }
383         }
384
385         hwreq->req.actual += length;
386
387         if (!list_empty(&hwreq->tds)) {
388                 /* get the last entry */
389                 lastnode = list_entry(hwreq->tds.prev,
390                                 struct td_node, td);
391                 lastnode->ptr->next = cpu_to_le32(node->dma);
392         }
393
394         INIT_LIST_HEAD(&node->td);
395         list_add_tail(&node->td, &hwreq->tds);
396
397         return 0;
398 }
399
400 /**
401  * _usb_addr: calculates endpoint address from direction & number
402  * @ep:  endpoint
403  */
404 static inline u8 _usb_addr(struct ci_hw_ep *ep)
405 {
406         return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
407 }
408
409 static int prepare_td_for_non_sg(struct ci_hw_ep *hwep,
410                 struct ci_hw_req *hwreq)
411 {
412         unsigned int rest = hwreq->req.length;
413         int pages = TD_PAGE_COUNT;
414         int ret = 0;
415
416         if (rest == 0) {
417                 ret = add_td_to_list(hwep, hwreq, 0, NULL);
418                 if (ret < 0)
419                         return ret;
420         }
421
422         /*
423          * The first buffer could be not page aligned.
424          * In that case we have to span into one extra td.
425          */
426         if (hwreq->req.dma % PAGE_SIZE)
427                 pages--;
428
429         while (rest > 0) {
430                 unsigned int count = min(hwreq->req.length - hwreq->req.actual,
431                         (unsigned int)(pages * CI_HDRC_PAGE_SIZE));
432
433                 ret = add_td_to_list(hwep, hwreq, count, NULL);
434                 if (ret < 0)
435                         return ret;
436
437                 rest -= count;
438         }
439
440         if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
441             && (hwreq->req.length % hwep->ep.maxpacket == 0)) {
442                 ret = add_td_to_list(hwep, hwreq, 0, NULL);
443                 if (ret < 0)
444                         return ret;
445         }
446
447         return ret;
448 }
449
450 static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
451                 struct scatterlist *s)
452 {
453         unsigned int rest = sg_dma_len(s);
454         int ret = 0;
455
456         hwreq->req.actual = 0;
457         while (rest > 0) {
458                 unsigned int count = min_t(unsigned int, rest,
459                                 CI_MAX_BUF_SIZE);
460
461                 ret = add_td_to_list(hwep, hwreq, count, s);
462                 if (ret < 0)
463                         return ret;
464
465                 rest -= count;
466         }
467
468         return ret;
469 }
470
471 static void ci_add_buffer_entry(struct td_node *node, struct scatterlist *s)
472 {
473         int empty_td_slot_index = (CI_MAX_BUF_SIZE - node->td_remaining_size)
474                         / CI_HDRC_PAGE_SIZE;
475         int i;
476
477         node->ptr->token +=
478                 cpu_to_le32(sg_dma_len(s) << __ffs(TD_TOTAL_BYTES));
479
480         for (i = empty_td_slot_index; i < TD_PAGE_COUNT; i++) {
481                 u32 page = (u32) sg_dma_address(s) +
482                         (i - empty_td_slot_index) * CI_HDRC_PAGE_SIZE;
483
484                 page &= ~TD_RESERVED_MASK;
485                 node->ptr->page[i] = cpu_to_le32(page);
486         }
487 }
488
489 static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
490 {
491         struct usb_request *req = &hwreq->req;
492         struct scatterlist *s = req->sg;
493         int ret = 0, i = 0;
494         struct td_node *node = NULL;
495
496         if (!s || req->zero || req->length == 0) {
497                 dev_err(hwep->ci->dev, "not supported operation for sg\n");
498                 return -EINVAL;
499         }
500
501         while (i++ < req->num_mapped_sgs) {
502                 if (sg_dma_address(s) % PAGE_SIZE) {
503                         dev_err(hwep->ci->dev, "not page aligned sg buffer\n");
504                         return -EINVAL;
505                 }
506
507                 if (node && (node->td_remaining_size >= sg_dma_len(s))) {
508                         ci_add_buffer_entry(node, s);
509                         node->td_remaining_size -= sg_dma_len(s);
510                 } else {
511                         ret = prepare_td_per_sg(hwep, hwreq, s);
512                         if (ret)
513                                 return ret;
514
515                         node = list_entry(hwreq->tds.prev,
516                                 struct td_node, td);
517                 }
518
519                 s = sg_next(s);
520         }
521
522         return ret;
523 }
524
525 /**
526  * _hardware_enqueue: configures a request at hardware level
527  * @hwep:   endpoint
528  * @hwreq:  request
529  *
530  * This function returns an error code
531  */
532 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
533 {
534         struct ci_hdrc *ci = hwep->ci;
535         int ret = 0;
536         struct td_node *firstnode, *lastnode;
537
538         /* don't queue twice */
539         if (hwreq->req.status == -EALREADY)
540                 return -EALREADY;
541
542         hwreq->req.status = -EALREADY;
543
544         ret = usb_gadget_map_request_by_dev(ci->dev->parent,
545                                             &hwreq->req, hwep->dir);
546         if (ret)
547                 return ret;
548
549         if (hwreq->req.num_mapped_sgs)
550                 ret = prepare_td_for_sg(hwep, hwreq);
551         else
552                 ret = prepare_td_for_non_sg(hwep, hwreq);
553
554         if (ret)
555                 return ret;
556
557         firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
558
559         lastnode = list_entry(hwreq->tds.prev,
560                 struct td_node, td);
561
562         lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
563         if (!hwreq->req.no_interrupt)
564                 lastnode->ptr->token |= cpu_to_le32(TD_IOC);
565         wmb();
566
567         hwreq->req.actual = 0;
568         if (!list_empty(&hwep->qh.queue)) {
569                 struct ci_hw_req *hwreqprev;
570                 int n = hw_ep_bit(hwep->num, hwep->dir);
571                 int tmp_stat;
572                 struct td_node *prevlastnode;
573                 u32 next = firstnode->dma & TD_ADDR_MASK;
574
575                 hwreqprev = list_entry(hwep->qh.queue.prev,
576                                 struct ci_hw_req, queue);
577                 prevlastnode = list_entry(hwreqprev->tds.prev,
578                                 struct td_node, td);
579
580                 prevlastnode->ptr->next = cpu_to_le32(next);
581                 wmb();
582                 if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
583                         goto done;
584                 do {
585                         hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
586                         tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
587                 } while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
588                 hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
589                 if (tmp_stat)
590                         goto done;
591         }
592
593         /*  QH configuration */
594         hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
595         hwep->qh.ptr->td.token &=
596                 cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
597
598         if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
599                 u32 mul = hwreq->req.length / hwep->ep.maxpacket;
600
601                 if (hwreq->req.length == 0
602                                 || hwreq->req.length % hwep->ep.maxpacket)
603                         mul++;
604                 hwep->qh.ptr->cap |= cpu_to_le32(mul << __ffs(QH_MULT));
605         }
606
607         ret = hw_ep_prime(ci, hwep->num, hwep->dir,
608                            hwep->type == USB_ENDPOINT_XFER_CONTROL);
609 done:
610         return ret;
611 }
612
613 /*
614  * free_pending_td: remove a pending request for the endpoint
615  * @hwep: endpoint
616  */
617 static void free_pending_td(struct ci_hw_ep *hwep)
618 {
619         struct td_node *pending = hwep->pending_td;
620
621         dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
622         hwep->pending_td = NULL;
623         kfree(pending);
624 }
625
626 static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
627                                            struct td_node *node)
628 {
629         hwep->qh.ptr->td.next = cpu_to_le32(node->dma);
630         hwep->qh.ptr->td.token &=
631                 cpu_to_le32(~(TD_STATUS_HALTED | TD_STATUS_ACTIVE));
632
633         return hw_ep_prime(ci, hwep->num, hwep->dir,
634                                 hwep->type == USB_ENDPOINT_XFER_CONTROL);
635 }
636
637 /**
638  * _hardware_dequeue: handles a request at hardware level
639  * @gadget: gadget
640  * @hwep:   endpoint
641  *
642  * This function returns an error code
643  */
644 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
645 {
646         u32 tmptoken;
647         struct td_node *node, *tmpnode;
648         unsigned remaining_length;
649         unsigned actual = hwreq->req.length;
650         struct ci_hdrc *ci = hwep->ci;
651
652         if (hwreq->req.status != -EALREADY)
653                 return -EINVAL;
654
655         hwreq->req.status = 0;
656
657         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
658                 tmptoken = le32_to_cpu(node->ptr->token);
659                 if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
660                         int n = hw_ep_bit(hwep->num, hwep->dir);
661
662                         if (ci->rev == CI_REVISION_24)
663                                 if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
664                                         reprime_dtd(ci, hwep, node);
665                         hwreq->req.status = -EALREADY;
666                         return -EBUSY;
667                 }
668
669                 remaining_length = (tmptoken & TD_TOTAL_BYTES);
670                 remaining_length >>= __ffs(TD_TOTAL_BYTES);
671                 actual -= remaining_length;
672
673                 hwreq->req.status = tmptoken & TD_STATUS;
674                 if ((TD_STATUS_HALTED & hwreq->req.status)) {
675                         hwreq->req.status = -EPIPE;
676                         break;
677                 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
678                         hwreq->req.status = -EPROTO;
679                         break;
680                 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
681                         hwreq->req.status = -EILSEQ;
682                         break;
683                 }
684
685                 if (remaining_length) {
686                         if (hwep->dir == TX) {
687                                 hwreq->req.status = -EPROTO;
688                                 break;
689                         }
690                 }
691                 /*
692                  * As the hardware could still address the freed td
693                  * which will run the udc unusable, the cleanup of the
694                  * td has to be delayed by one.
695                  */
696                 if (hwep->pending_td)
697                         free_pending_td(hwep);
698
699                 hwep->pending_td = node;
700                 list_del_init(&node->td);
701         }
702
703         usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
704                                         &hwreq->req, hwep->dir);
705
706         hwreq->req.actual += actual;
707
708         if (hwreq->req.status)
709                 return hwreq->req.status;
710
711         return hwreq->req.actual;
712 }
713
714 /**
715  * _ep_nuke: dequeues all endpoint requests
716  * @hwep: endpoint
717  *
718  * This function returns an error code
719  * Caller must hold lock
720  */
721 static int _ep_nuke(struct ci_hw_ep *hwep)
722 __releases(hwep->lock)
723 __acquires(hwep->lock)
724 {
725         struct td_node *node, *tmpnode;
726         if (hwep == NULL)
727                 return -EINVAL;
728
729         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
730
731         while (!list_empty(&hwep->qh.queue)) {
732
733                 /* pop oldest request */
734                 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
735                                                      struct ci_hw_req, queue);
736
737                 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
738                         dma_pool_free(hwep->td_pool, node->ptr, node->dma);
739                         list_del_init(&node->td);
740                         node->ptr = NULL;
741                         kfree(node);
742                 }
743
744                 list_del_init(&hwreq->queue);
745                 hwreq->req.status = -ESHUTDOWN;
746
747                 if (hwreq->req.complete != NULL) {
748                         spin_unlock(hwep->lock);
749                         usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
750                         spin_lock(hwep->lock);
751                 }
752         }
753
754         if (hwep->pending_td)
755                 free_pending_td(hwep);
756
757         return 0;
758 }
759
760 static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
761 {
762         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
763         int direction, retval = 0;
764         unsigned long flags;
765
766         if (ep == NULL || hwep->ep.desc == NULL)
767                 return -EINVAL;
768
769         if (usb_endpoint_xfer_isoc(hwep->ep.desc))
770                 return -EOPNOTSUPP;
771
772         spin_lock_irqsave(hwep->lock, flags);
773
774         if (value && hwep->dir == TX && check_transfer &&
775                 !list_empty(&hwep->qh.queue) &&
776                         !usb_endpoint_xfer_control(hwep->ep.desc)) {
777                 spin_unlock_irqrestore(hwep->lock, flags);
778                 return -EAGAIN;
779         }
780
781         direction = hwep->dir;
782         do {
783                 retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
784
785                 if (!value)
786                         hwep->wedge = 0;
787
788                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
789                         hwep->dir = (hwep->dir == TX) ? RX : TX;
790
791         } while (hwep->dir != direction);
792
793         spin_unlock_irqrestore(hwep->lock, flags);
794         return retval;
795 }
796
797
798 /**
799  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
800  * @gadget: gadget
801  *
802  * This function returns an error code
803  */
804 static int _gadget_stop_activity(struct usb_gadget *gadget)
805 {
806         struct usb_ep *ep;
807         struct ci_hdrc    *ci = container_of(gadget, struct ci_hdrc, gadget);
808         unsigned long flags;
809
810         /* flush all endpoints */
811         gadget_for_each_ep(ep, gadget) {
812                 usb_ep_fifo_flush(ep);
813         }
814         usb_ep_fifo_flush(&ci->ep0out->ep);
815         usb_ep_fifo_flush(&ci->ep0in->ep);
816
817         /* make sure to disable all endpoints */
818         gadget_for_each_ep(ep, gadget) {
819                 usb_ep_disable(ep);
820         }
821
822         if (ci->status != NULL) {
823                 usb_ep_free_request(&ci->ep0in->ep, ci->status);
824                 ci->status = NULL;
825         }
826
827         spin_lock_irqsave(&ci->lock, flags);
828         ci->gadget.speed = USB_SPEED_UNKNOWN;
829         ci->remote_wakeup = 0;
830         ci->suspended = 0;
831         spin_unlock_irqrestore(&ci->lock, flags);
832
833         return 0;
834 }
835
836 /******************************************************************************
837  * ISR block
838  *****************************************************************************/
839 /**
840  * isr_reset_handler: USB reset interrupt handler
841  * @ci: UDC device
842  *
843  * This function resets USB engine after a bus reset occurred
844  */
845 static void isr_reset_handler(struct ci_hdrc *ci)
846 __releases(ci->lock)
847 __acquires(ci->lock)
848 {
849         int retval;
850
851         spin_unlock(&ci->lock);
852         if (ci->gadget.speed != USB_SPEED_UNKNOWN)
853                 usb_gadget_udc_reset(&ci->gadget, ci->driver);
854
855         retval = _gadget_stop_activity(&ci->gadget);
856         if (retval)
857                 goto done;
858
859         retval = hw_usb_reset(ci);
860         if (retval)
861                 goto done;
862
863         ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
864         if (ci->status == NULL)
865                 retval = -ENOMEM;
866
867 done:
868         spin_lock(&ci->lock);
869
870         if (retval)
871                 dev_err(ci->dev, "error: %i\n", retval);
872 }
873
874 /**
875  * isr_get_status_complete: get_status request complete function
876  * @ep:  endpoint
877  * @req: request handled
878  *
879  * Caller must release lock
880  */
881 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
882 {
883         if (ep == NULL || req == NULL)
884                 return;
885
886         kfree(req->buf);
887         usb_ep_free_request(ep, req);
888 }
889
890 /**
891  * _ep_queue: queues (submits) an I/O request to an endpoint
892  * @ep:        endpoint
893  * @req:       request
894  * @gfp_flags: GFP flags (not used)
895  *
896  * Caller must hold lock
897  * This function returns an error code
898  */
899 static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
900                     gfp_t __maybe_unused gfp_flags)
901 {
902         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
903         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
904         struct ci_hdrc *ci = hwep->ci;
905         int retval = 0;
906
907         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
908                 return -EINVAL;
909
910         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
911                 if (req->length)
912                         hwep = (ci->ep0_dir == RX) ?
913                                ci->ep0out : ci->ep0in;
914                 if (!list_empty(&hwep->qh.queue)) {
915                         _ep_nuke(hwep);
916                         dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
917                                  _usb_addr(hwep));
918                 }
919         }
920
921         if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
922             hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) {
923                 dev_err(hwep->ci->dev, "request length too big for isochronous\n");
924                 return -EMSGSIZE;
925         }
926
927         /* first nuke then test link, e.g. previous status has not sent */
928         if (!list_empty(&hwreq->queue)) {
929                 dev_err(hwep->ci->dev, "request already in queue\n");
930                 return -EBUSY;
931         }
932
933         /* push request */
934         hwreq->req.status = -EINPROGRESS;
935         hwreq->req.actual = 0;
936
937         retval = _hardware_enqueue(hwep, hwreq);
938
939         if (retval == -EALREADY)
940                 retval = 0;
941         if (!retval)
942                 list_add_tail(&hwreq->queue, &hwep->qh.queue);
943
944         return retval;
945 }
946
947 /**
948  * isr_get_status_response: get_status request response
949  * @ci: ci struct
950  * @setup: setup request packet
951  *
952  * This function returns an error code
953  */
954 static int isr_get_status_response(struct ci_hdrc *ci,
955                                    struct usb_ctrlrequest *setup)
956 __releases(hwep->lock)
957 __acquires(hwep->lock)
958 {
959         struct ci_hw_ep *hwep = ci->ep0in;
960         struct usb_request *req = NULL;
961         gfp_t gfp_flags = GFP_ATOMIC;
962         int dir, num, retval;
963
964         if (hwep == NULL || setup == NULL)
965                 return -EINVAL;
966
967         spin_unlock(hwep->lock);
968         req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
969         spin_lock(hwep->lock);
970         if (req == NULL)
971                 return -ENOMEM;
972
973         req->complete = isr_get_status_complete;
974         req->length   = 2;
975         req->buf      = kzalloc(req->length, gfp_flags);
976         if (req->buf == NULL) {
977                 retval = -ENOMEM;
978                 goto err_free_req;
979         }
980
981         if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
982                 *(u16 *)req->buf = (ci->remote_wakeup << 1) |
983                         ci->gadget.is_selfpowered;
984         } else if ((setup->bRequestType & USB_RECIP_MASK) \
985                    == USB_RECIP_ENDPOINT) {
986                 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
987                         TX : RX;
988                 num =  le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
989                 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
990         }
991         /* else do nothing; reserved for future use */
992
993         retval = _ep_queue(&hwep->ep, req, gfp_flags);
994         if (retval)
995                 goto err_free_buf;
996
997         return 0;
998
999  err_free_buf:
1000         kfree(req->buf);
1001  err_free_req:
1002         spin_unlock(hwep->lock);
1003         usb_ep_free_request(&hwep->ep, req);
1004         spin_lock(hwep->lock);
1005         return retval;
1006 }
1007
1008 /**
1009  * isr_setup_status_complete: setup_status request complete function
1010  * @ep:  endpoint
1011  * @req: request handled
1012  *
1013  * Caller must release lock. Put the port in test mode if test mode
1014  * feature is selected.
1015  */
1016 static void
1017 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
1018 {
1019         struct ci_hdrc *ci = req->context;
1020         unsigned long flags;
1021
1022         if (ci->setaddr) {
1023                 hw_usb_set_address(ci, ci->address);
1024                 ci->setaddr = false;
1025                 if (ci->address)
1026                         usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
1027         }
1028
1029         spin_lock_irqsave(&ci->lock, flags);
1030         if (ci->test_mode)
1031                 hw_port_test_set(ci, ci->test_mode);
1032         spin_unlock_irqrestore(&ci->lock, flags);
1033 }
1034
1035 /**
1036  * isr_setup_status_phase: queues the status phase of a setup transation
1037  * @ci: ci struct
1038  *
1039  * This function returns an error code
1040  */
1041 static int isr_setup_status_phase(struct ci_hdrc *ci)
1042 {
1043         struct ci_hw_ep *hwep;
1044
1045         /*
1046          * Unexpected USB controller behavior, caused by bad signal integrity
1047          * or ground reference problems, can lead to isr_setup_status_phase
1048          * being called with ci->status equal to NULL.
1049          * If this situation occurs, you should review your USB hardware design.
1050          */
1051         if (WARN_ON_ONCE(!ci->status))
1052                 return -EPIPE;
1053
1054         hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
1055         ci->status->context = ci;
1056         ci->status->complete = isr_setup_status_complete;
1057
1058         return _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
1059 }
1060
1061 /**
1062  * isr_tr_complete_low: transaction complete low level handler
1063  * @hwep: endpoint
1064  *
1065  * This function returns an error code
1066  * Caller must hold lock
1067  */
1068 static int isr_tr_complete_low(struct ci_hw_ep *hwep)
1069 __releases(hwep->lock)
1070 __acquires(hwep->lock)
1071 {
1072         struct ci_hw_req *hwreq, *hwreqtemp;
1073         struct ci_hw_ep *hweptemp = hwep;
1074         int retval = 0;
1075
1076         list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
1077                         queue) {
1078                 retval = _hardware_dequeue(hwep, hwreq);
1079                 if (retval < 0)
1080                         break;
1081                 list_del_init(&hwreq->queue);
1082                 if (hwreq->req.complete != NULL) {
1083                         spin_unlock(hwep->lock);
1084                         if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
1085                                         hwreq->req.length)
1086                                 hweptemp = hwep->ci->ep0in;
1087                         usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
1088                         spin_lock(hwep->lock);
1089                 }
1090         }
1091
1092         if (retval == -EBUSY)
1093                 retval = 0;
1094
1095         return retval;
1096 }
1097
1098 static int otg_a_alt_hnp_support(struct ci_hdrc *ci)
1099 {
1100         dev_warn(&ci->gadget.dev,
1101                 "connect the device to an alternate port if you want HNP\n");
1102         return isr_setup_status_phase(ci);
1103 }
1104
1105 /**
1106  * isr_setup_packet_handler: setup packet handler
1107  * @ci: UDC descriptor
1108  *
1109  * This function handles setup packet 
1110  */
1111 static void isr_setup_packet_handler(struct ci_hdrc *ci)
1112 __releases(ci->lock)
1113 __acquires(ci->lock)
1114 {
1115         struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
1116         struct usb_ctrlrequest req;
1117         int type, num, dir, err = -EINVAL;
1118         u8 tmode = 0;
1119
1120         /*
1121          * Flush data and handshake transactions of previous
1122          * setup packet.
1123          */
1124         _ep_nuke(ci->ep0out);
1125         _ep_nuke(ci->ep0in);
1126
1127         /* read_setup_packet */
1128         do {
1129                 hw_test_and_set_setup_guard(ci);
1130                 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1131         } while (!hw_test_and_clear_setup_guard(ci));
1132
1133         type = req.bRequestType;
1134
1135         ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1136
1137         switch (req.bRequest) {
1138         case USB_REQ_CLEAR_FEATURE:
1139                 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1140                                 le16_to_cpu(req.wValue) ==
1141                                 USB_ENDPOINT_HALT) {
1142                         if (req.wLength != 0)
1143                                 break;
1144                         num  = le16_to_cpu(req.wIndex);
1145                         dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1146                         num &= USB_ENDPOINT_NUMBER_MASK;
1147                         if (dir == TX)
1148                                 num += ci->hw_ep_max / 2;
1149                         if (!ci->ci_hw_ep[num].wedge) {
1150                                 spin_unlock(&ci->lock);
1151                                 err = usb_ep_clear_halt(
1152                                         &ci->ci_hw_ep[num].ep);
1153                                 spin_lock(&ci->lock);
1154                                 if (err)
1155                                         break;
1156                         }
1157                         err = isr_setup_status_phase(ci);
1158                 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1159                                 le16_to_cpu(req.wValue) ==
1160                                 USB_DEVICE_REMOTE_WAKEUP) {
1161                         if (req.wLength != 0)
1162                                 break;
1163                         ci->remote_wakeup = 0;
1164                         err = isr_setup_status_phase(ci);
1165                 } else {
1166                         goto delegate;
1167                 }
1168                 break;
1169         case USB_REQ_GET_STATUS:
1170                 if ((type != (USB_DIR_IN|USB_RECIP_DEVICE) ||
1171                         le16_to_cpu(req.wIndex) == OTG_STS_SELECTOR) &&
1172                     type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
1173                     type != (USB_DIR_IN|USB_RECIP_INTERFACE))
1174                         goto delegate;
1175                 if (le16_to_cpu(req.wLength) != 2 ||
1176                     le16_to_cpu(req.wValue)  != 0)
1177                         break;
1178                 err = isr_get_status_response(ci, &req);
1179                 break;
1180         case USB_REQ_SET_ADDRESS:
1181                 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
1182                         goto delegate;
1183                 if (le16_to_cpu(req.wLength) != 0 ||
1184                     le16_to_cpu(req.wIndex)  != 0)
1185                         break;
1186                 ci->address = (u8)le16_to_cpu(req.wValue);
1187                 ci->setaddr = true;
1188                 err = isr_setup_status_phase(ci);
1189                 break;
1190         case USB_REQ_SET_FEATURE:
1191                 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1192                                 le16_to_cpu(req.wValue) ==
1193                                 USB_ENDPOINT_HALT) {
1194                         if (req.wLength != 0)
1195                                 break;
1196                         num  = le16_to_cpu(req.wIndex);
1197                         dir = (num & USB_ENDPOINT_DIR_MASK) ? TX : RX;
1198                         num &= USB_ENDPOINT_NUMBER_MASK;
1199                         if (dir == TX)
1200                                 num += ci->hw_ep_max / 2;
1201
1202                         spin_unlock(&ci->lock);
1203                         err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
1204                         spin_lock(&ci->lock);
1205                         if (!err)
1206                                 isr_setup_status_phase(ci);
1207                 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
1208                         if (req.wLength != 0)
1209                                 break;
1210                         switch (le16_to_cpu(req.wValue)) {
1211                         case USB_DEVICE_REMOTE_WAKEUP:
1212                                 ci->remote_wakeup = 1;
1213                                 err = isr_setup_status_phase(ci);
1214                                 break;
1215                         case USB_DEVICE_TEST_MODE:
1216                                 tmode = le16_to_cpu(req.wIndex) >> 8;
1217                                 switch (tmode) {
1218                                 case TEST_J:
1219                                 case TEST_K:
1220                                 case TEST_SE0_NAK:
1221                                 case TEST_PACKET:
1222                                 case TEST_FORCE_EN:
1223                                         ci->test_mode = tmode;
1224                                         err = isr_setup_status_phase(
1225                                                         ci);
1226                                         break;
1227                                 default:
1228                                         break;
1229                                 }
1230                                 break;
1231                         case USB_DEVICE_B_HNP_ENABLE:
1232                                 if (ci_otg_is_fsm_mode(ci)) {
1233                                         ci->gadget.b_hnp_enable = 1;
1234                                         err = isr_setup_status_phase(
1235                                                         ci);
1236                                 }
1237                                 break;
1238                         case USB_DEVICE_A_ALT_HNP_SUPPORT:
1239                                 if (ci_otg_is_fsm_mode(ci))
1240                                         err = otg_a_alt_hnp_support(ci);
1241                                 break;
1242                         case USB_DEVICE_A_HNP_SUPPORT:
1243                                 if (ci_otg_is_fsm_mode(ci)) {
1244                                         ci->gadget.a_hnp_support = 1;
1245                                         err = isr_setup_status_phase(
1246                                                         ci);
1247                                 }
1248                                 break;
1249                         default:
1250                                 goto delegate;
1251                         }
1252                 } else {
1253                         goto delegate;
1254                 }
1255                 break;
1256         default:
1257 delegate:
1258                 if (req.wLength == 0)   /* no data phase */
1259                         ci->ep0_dir = TX;
1260
1261                 spin_unlock(&ci->lock);
1262                 err = ci->driver->setup(&ci->gadget, &req);
1263                 spin_lock(&ci->lock);
1264                 break;
1265         }
1266
1267         if (err < 0) {
1268                 spin_unlock(&ci->lock);
1269                 if (_ep_set_halt(&hwep->ep, 1, false))
1270                         dev_err(ci->dev, "error: _ep_set_halt\n");
1271                 spin_lock(&ci->lock);
1272         }
1273 }
1274
1275 /**
1276  * isr_tr_complete_handler: transaction complete interrupt handler
1277  * @ci: UDC descriptor
1278  *
1279  * This function handles traffic events
1280  */
1281 static void isr_tr_complete_handler(struct ci_hdrc *ci)
1282 __releases(ci->lock)
1283 __acquires(ci->lock)
1284 {
1285         unsigned i;
1286         int err;
1287
1288         for (i = 0; i < ci->hw_ep_max; i++) {
1289                 struct ci_hw_ep *hwep  = &ci->ci_hw_ep[i];
1290
1291                 if (hwep->ep.desc == NULL)
1292                         continue;   /* not configured */
1293
1294                 if (hw_test_and_clear_complete(ci, i)) {
1295                         err = isr_tr_complete_low(hwep);
1296                         if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1297                                 if (err > 0)   /* needs status phase */
1298                                         err = isr_setup_status_phase(ci);
1299                                 if (err < 0) {
1300                                         spin_unlock(&ci->lock);
1301                                         if (_ep_set_halt(&hwep->ep, 1, false))
1302                                                 dev_err(ci->dev,
1303                                                 "error: _ep_set_halt\n");
1304                                         spin_lock(&ci->lock);
1305                                 }
1306                         }
1307                 }
1308
1309                 /* Only handle setup packet below */
1310                 if (i == 0 &&
1311                         hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
1312                         isr_setup_packet_handler(ci);
1313         }
1314 }
1315
1316 /******************************************************************************
1317  * ENDPT block
1318  *****************************************************************************/
1319 /**
1320  * ep_enable: configure endpoint, making it usable
1321  *
1322  * Check usb_ep_enable() at "usb_gadget.h" for details
1323  */
1324 static int ep_enable(struct usb_ep *ep,
1325                      const struct usb_endpoint_descriptor *desc)
1326 {
1327         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1328         int retval = 0;
1329         unsigned long flags;
1330         u32 cap = 0;
1331
1332         if (ep == NULL || desc == NULL)
1333                 return -EINVAL;
1334
1335         spin_lock_irqsave(hwep->lock, flags);
1336
1337         /* only internal SW should enable ctrl endpts */
1338
1339         if (!list_empty(&hwep->qh.queue)) {
1340                 dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
1341                 spin_unlock_irqrestore(hwep->lock, flags);
1342                 return -EBUSY;
1343         }
1344
1345         hwep->ep.desc = desc;
1346
1347         hwep->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
1348         hwep->num  = usb_endpoint_num(desc);
1349         hwep->type = usb_endpoint_type(desc);
1350
1351         hwep->ep.maxpacket = usb_endpoint_maxp(desc);
1352         hwep->ep.mult = usb_endpoint_maxp_mult(desc);
1353
1354         if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1355                 cap |= QH_IOS;
1356
1357         cap |= QH_ZLT;
1358         cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1359         /*
1360          * For ISO-TX, we set mult at QH as the largest value, and use
1361          * MultO at TD as real mult value.
1362          */
1363         if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
1364                 cap |= 3 << __ffs(QH_MULT);
1365
1366         hwep->qh.ptr->cap = cpu_to_le32(cap);
1367
1368         hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE);   /* needed? */
1369
1370         if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
1371                 dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
1372                 retval = -EINVAL;
1373         }
1374
1375         /*
1376          * Enable endpoints in the HW other than ep0 as ep0
1377          * is always enabled
1378          */
1379         if (hwep->num)
1380                 retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
1381                                        hwep->type);
1382
1383         spin_unlock_irqrestore(hwep->lock, flags);
1384         return retval;
1385 }
1386
1387 /**
1388  * ep_disable: endpoint is no longer usable
1389  *
1390  * Check usb_ep_disable() at "usb_gadget.h" for details
1391  */
1392 static int ep_disable(struct usb_ep *ep)
1393 {
1394         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1395         int direction, retval = 0;
1396         unsigned long flags;
1397
1398         if (ep == NULL)
1399                 return -EINVAL;
1400         else if (hwep->ep.desc == NULL)
1401                 return -EBUSY;
1402
1403         spin_lock_irqsave(hwep->lock, flags);
1404         if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1405                 spin_unlock_irqrestore(hwep->lock, flags);
1406                 return 0;
1407         }
1408
1409         /* only internal SW should disable ctrl endpts */
1410
1411         direction = hwep->dir;
1412         do {
1413                 retval |= _ep_nuke(hwep);
1414                 retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
1415
1416                 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1417                         hwep->dir = (hwep->dir == TX) ? RX : TX;
1418
1419         } while (hwep->dir != direction);
1420
1421         hwep->ep.desc = NULL;
1422
1423         spin_unlock_irqrestore(hwep->lock, flags);
1424         return retval;
1425 }
1426
1427 /**
1428  * ep_alloc_request: allocate a request object to use with this endpoint
1429  *
1430  * Check usb_ep_alloc_request() at "usb_gadget.h" for details
1431  */
1432 static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1433 {
1434         struct ci_hw_req *hwreq = NULL;
1435
1436         if (ep == NULL)
1437                 return NULL;
1438
1439         hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
1440         if (hwreq != NULL) {
1441                 INIT_LIST_HEAD(&hwreq->queue);
1442                 INIT_LIST_HEAD(&hwreq->tds);
1443         }
1444
1445         return (hwreq == NULL) ? NULL : &hwreq->req;
1446 }
1447
1448 /**
1449  * ep_free_request: frees a request object
1450  *
1451  * Check usb_ep_free_request() at "usb_gadget.h" for details
1452  */
1453 static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
1454 {
1455         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1456         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1457         struct td_node *node, *tmpnode;
1458         unsigned long flags;
1459
1460         if (ep == NULL || req == NULL) {
1461                 return;
1462         } else if (!list_empty(&hwreq->queue)) {
1463                 dev_err(hwep->ci->dev, "freeing queued request\n");
1464                 return;
1465         }
1466
1467         spin_lock_irqsave(hwep->lock, flags);
1468
1469         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1470                 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1471                 list_del_init(&node->td);
1472                 node->ptr = NULL;
1473                 kfree(node);
1474         }
1475
1476         kfree(hwreq);
1477
1478         spin_unlock_irqrestore(hwep->lock, flags);
1479 }
1480
1481 /**
1482  * ep_queue: queues (submits) an I/O request to an endpoint
1483  *
1484  * Check usb_ep_queue()* at usb_gadget.h" for details
1485  */
1486 static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1487                     gfp_t __maybe_unused gfp_flags)
1488 {
1489         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1490         int retval = 0;
1491         unsigned long flags;
1492
1493         if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
1494                 return -EINVAL;
1495
1496         spin_lock_irqsave(hwep->lock, flags);
1497         if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1498                 spin_unlock_irqrestore(hwep->lock, flags);
1499                 return 0;
1500         }
1501         retval = _ep_queue(ep, req, gfp_flags);
1502         spin_unlock_irqrestore(hwep->lock, flags);
1503         return retval;
1504 }
1505
1506 /**
1507  * ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
1508  *
1509  * Check usb_ep_dequeue() at "usb_gadget.h" for details
1510  */
1511 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1512 {
1513         struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
1514         struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1515         unsigned long flags;
1516         struct td_node *node, *tmpnode;
1517
1518         if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1519                 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
1520                 list_empty(&hwep->qh.queue))
1521                 return -EINVAL;
1522
1523         spin_lock_irqsave(hwep->lock, flags);
1524         if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1525                 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1526
1527         list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1528                 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1529                 list_del(&node->td);
1530                 kfree(node);
1531         }
1532
1533         /* pop request */
1534         list_del_init(&hwreq->queue);
1535
1536         usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
1537
1538         req->status = -ECONNRESET;
1539
1540         if (hwreq->req.complete != NULL) {
1541                 spin_unlock(hwep->lock);
1542                 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
1543                 spin_lock(hwep->lock);
1544         }
1545
1546         spin_unlock_irqrestore(hwep->lock, flags);
1547         return 0;
1548 }
1549
1550 /**
1551  * ep_set_halt: sets the endpoint halt feature
1552  *
1553  * Check usb_ep_set_halt() at "usb_gadget.h" for details
1554  */
1555 static int ep_set_halt(struct usb_ep *ep, int value)
1556 {
1557         return _ep_set_halt(ep, value, true);
1558 }
1559
1560 /**
1561  * ep_set_wedge: sets the halt feature and ignores clear requests
1562  *
1563  * Check usb_ep_set_wedge() at "usb_gadget.h" for details
1564  */
1565 static int ep_set_wedge(struct usb_ep *ep)
1566 {
1567         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1568         unsigned long flags;
1569
1570         if (ep == NULL || hwep->ep.desc == NULL)
1571                 return -EINVAL;
1572
1573         spin_lock_irqsave(hwep->lock, flags);
1574         hwep->wedge = 1;
1575         spin_unlock_irqrestore(hwep->lock, flags);
1576
1577         return usb_ep_set_halt(ep);
1578 }
1579
1580 /**
1581  * ep_fifo_flush: flushes contents of a fifo
1582  *
1583  * Check usb_ep_fifo_flush() at "usb_gadget.h" for details
1584  */
1585 static void ep_fifo_flush(struct usb_ep *ep)
1586 {
1587         struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1588         unsigned long flags;
1589
1590         if (ep == NULL) {
1591                 dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
1592                 return;
1593         }
1594
1595         spin_lock_irqsave(hwep->lock, flags);
1596         if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1597                 spin_unlock_irqrestore(hwep->lock, flags);
1598                 return;
1599         }
1600
1601         hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1602
1603         spin_unlock_irqrestore(hwep->lock, flags);
1604 }
1605
1606 /**
1607  * Endpoint-specific part of the API to the USB controller hardware
1608  * Check "usb_gadget.h" for details
1609  */
1610 static const struct usb_ep_ops usb_ep_ops = {
1611         .enable        = ep_enable,
1612         .disable       = ep_disable,
1613         .alloc_request = ep_alloc_request,
1614         .free_request  = ep_free_request,
1615         .queue         = ep_queue,
1616         .dequeue       = ep_dequeue,
1617         .set_halt      = ep_set_halt,
1618         .set_wedge     = ep_set_wedge,
1619         .fifo_flush    = ep_fifo_flush,
1620 };
1621
1622 /******************************************************************************
1623  * GADGET block
1624  *****************************************************************************/
1625 /**
1626  * ci_hdrc_gadget_connect: caller makes sure gadget driver is binded
1627  */
1628 static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
1629 {
1630         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1631
1632         if (is_active) {
1633                 pm_runtime_get_sync(ci->dev);
1634                 hw_device_reset(ci);
1635                 spin_lock_irq(&ci->lock);
1636                 if (ci->driver) {
1637                         hw_device_state(ci, ci->ep0out->qh.dma);
1638                         usb_gadget_set_state(_gadget, USB_STATE_POWERED);
1639                         spin_unlock_irq(&ci->lock);
1640                         usb_udc_vbus_handler(_gadget, true);
1641                 } else {
1642                         spin_unlock_irq(&ci->lock);
1643                 }
1644         } else {
1645                 usb_udc_vbus_handler(_gadget, false);
1646                 if (ci->driver)
1647                         ci->driver->disconnect(&ci->gadget);
1648                 hw_device_state(ci, 0);
1649                 if (ci->platdata->notify_event)
1650                         ci->platdata->notify_event(ci,
1651                         CI_HDRC_CONTROLLER_STOPPED_EVENT);
1652                 _gadget_stop_activity(&ci->gadget);
1653                 pm_runtime_put_sync(ci->dev);
1654                 usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
1655         }
1656 }
1657
1658 static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1659 {
1660         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1661         unsigned long flags;
1662         int ret = 0;
1663
1664         spin_lock_irqsave(&ci->lock, flags);
1665         ci->vbus_active = is_active;
1666         spin_unlock_irqrestore(&ci->lock, flags);
1667
1668         if (ci->usb_phy)
1669                 usb_phy_set_charger_state(ci->usb_phy, is_active ?
1670                         USB_CHARGER_PRESENT : USB_CHARGER_ABSENT);
1671
1672         if (ci->platdata->notify_event)
1673                 ret = ci->platdata->notify_event(ci,
1674                                 CI_HDRC_CONTROLLER_VBUS_EVENT);
1675
1676         if (ci->driver)
1677                 ci_hdrc_gadget_connect(_gadget, is_active);
1678
1679         return ret;
1680 }
1681
1682 static int ci_udc_wakeup(struct usb_gadget *_gadget)
1683 {
1684         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1685         unsigned long flags;
1686         int ret = 0;
1687
1688         spin_lock_irqsave(&ci->lock, flags);
1689         if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1690                 spin_unlock_irqrestore(&ci->lock, flags);
1691                 return 0;
1692         }
1693         if (!ci->remote_wakeup) {
1694                 ret = -EOPNOTSUPP;
1695                 goto out;
1696         }
1697         if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
1698                 ret = -EINVAL;
1699                 goto out;
1700         }
1701         hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
1702 out:
1703         spin_unlock_irqrestore(&ci->lock, flags);
1704         return ret;
1705 }
1706
1707 static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
1708 {
1709         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1710
1711         if (ci->usb_phy)
1712                 return usb_phy_set_power(ci->usb_phy, ma);
1713         return -ENOTSUPP;
1714 }
1715
1716 static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
1717 {
1718         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1719         struct ci_hw_ep *hwep = ci->ep0in;
1720         unsigned long flags;
1721
1722         spin_lock_irqsave(hwep->lock, flags);
1723         _gadget->is_selfpowered = (is_on != 0);
1724         spin_unlock_irqrestore(hwep->lock, flags);
1725
1726         return 0;
1727 }
1728
1729 /* Change Data+ pullup status
1730  * this func is used by usb_gadget_connect/disconnect
1731  */
1732 static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
1733 {
1734         struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
1735
1736         /*
1737          * Data+ pullup controlled by OTG state machine in OTG fsm mode;
1738          * and don't touch Data+ in host mode for dual role config.
1739          */
1740         if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
1741                 return 0;
1742
1743         pm_runtime_get_sync(ci->dev);
1744         if (is_on)
1745                 hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
1746         else
1747                 hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
1748         pm_runtime_put_sync(ci->dev);
1749
1750         return 0;
1751 }
1752
1753 static int ci_udc_start(struct usb_gadget *gadget,
1754                          struct usb_gadget_driver *driver);
1755 static int ci_udc_stop(struct usb_gadget *gadget);
1756
1757 /* Match ISOC IN from the highest endpoint */
1758 static struct usb_ep *ci_udc_match_ep(struct usb_gadget *gadget,
1759                               struct usb_endpoint_descriptor *desc,
1760                               struct usb_ss_ep_comp_descriptor *comp_desc)
1761 {
1762         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1763         struct usb_ep *ep;
1764
1765         if (usb_endpoint_xfer_isoc(desc) && usb_endpoint_dir_in(desc)) {
1766                 list_for_each_entry_reverse(ep, &ci->gadget.ep_list, ep_list) {
1767                         if (ep->caps.dir_in && !ep->claimed)
1768                                 return ep;
1769                 }
1770         }
1771
1772         return NULL;
1773 }
1774
1775 /**
1776  * Device operations part of the API to the USB controller hardware,
1777  * which don't involve endpoints (or i/o)
1778  * Check  "usb_gadget.h" for details
1779  */
1780 static const struct usb_gadget_ops usb_gadget_ops = {
1781         .vbus_session   = ci_udc_vbus_session,
1782         .wakeup         = ci_udc_wakeup,
1783         .set_selfpowered        = ci_udc_selfpowered,
1784         .pullup         = ci_udc_pullup,
1785         .vbus_draw      = ci_udc_vbus_draw,
1786         .udc_start      = ci_udc_start,
1787         .udc_stop       = ci_udc_stop,
1788         .match_ep       = ci_udc_match_ep,
1789 };
1790
1791 static int init_eps(struct ci_hdrc *ci)
1792 {
1793         int retval = 0, i, j;
1794
1795         for (i = 0; i < ci->hw_ep_max/2; i++)
1796                 for (j = RX; j <= TX; j++) {
1797                         int k = i + j * ci->hw_ep_max/2;
1798                         struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
1799
1800                         scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
1801                                         (j == TX)  ? "in" : "out");
1802
1803                         hwep->ci          = ci;
1804                         hwep->lock         = &ci->lock;
1805                         hwep->td_pool      = ci->td_pool;
1806
1807                         hwep->ep.name      = hwep->name;
1808                         hwep->ep.ops       = &usb_ep_ops;
1809
1810                         if (i == 0) {
1811                                 hwep->ep.caps.type_control = true;
1812                         } else {
1813                                 hwep->ep.caps.type_iso = true;
1814                                 hwep->ep.caps.type_bulk = true;
1815                                 hwep->ep.caps.type_int = true;
1816                         }
1817
1818                         if (j == TX)
1819                                 hwep->ep.caps.dir_in = true;
1820                         else
1821                                 hwep->ep.caps.dir_out = true;
1822
1823                         /*
1824                          * for ep0: maxP defined in desc, for other
1825                          * eps, maxP is set by epautoconfig() called
1826                          * by gadget layer
1827                          */
1828                         usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
1829
1830                         INIT_LIST_HEAD(&hwep->qh.queue);
1831                         hwep->qh.ptr = dma_pool_zalloc(ci->qh_pool, GFP_KERNEL,
1832                                                        &hwep->qh.dma);
1833                         if (hwep->qh.ptr == NULL)
1834                                 retval = -ENOMEM;
1835
1836                         /*
1837                          * set up shorthands for ep0 out and in endpoints,
1838                          * don't add to gadget's ep_list
1839                          */
1840                         if (i == 0) {
1841                                 if (j == RX)
1842                                         ci->ep0out = hwep;
1843                                 else
1844                                         ci->ep0in = hwep;
1845
1846                                 usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
1847                                 continue;
1848                         }
1849
1850                         list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
1851                 }
1852
1853         return retval;
1854 }
1855
1856 static void destroy_eps(struct ci_hdrc *ci)
1857 {
1858         int i;
1859
1860         for (i = 0; i < ci->hw_ep_max; i++) {
1861                 struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
1862
1863                 if (hwep->pending_td)
1864                         free_pending_td(hwep);
1865                 dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
1866         }
1867 }
1868
1869 /**
1870  * ci_udc_start: register a gadget driver
1871  * @gadget: our gadget
1872  * @driver: the driver being registered
1873  *
1874  * Interrupts are enabled here.
1875  */
1876 static int ci_udc_start(struct usb_gadget *gadget,
1877                          struct usb_gadget_driver *driver)
1878 {
1879         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1880         int retval;
1881
1882         if (driver->disconnect == NULL)
1883                 return -EINVAL;
1884
1885         ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
1886         retval = usb_ep_enable(&ci->ep0out->ep);
1887         if (retval)
1888                 return retval;
1889
1890         ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
1891         retval = usb_ep_enable(&ci->ep0in->ep);
1892         if (retval)
1893                 return retval;
1894
1895         ci->driver = driver;
1896
1897         /* Start otg fsm for B-device */
1898         if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
1899                 ci_hdrc_otg_fsm_start(ci);
1900                 return retval;
1901         }
1902
1903         if (ci->vbus_active)
1904                 ci_hdrc_gadget_connect(gadget, 1);
1905         else
1906                 usb_udc_vbus_handler(&ci->gadget, false);
1907
1908         return retval;
1909 }
1910
1911 static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
1912 {
1913         if (!ci_otg_is_fsm_mode(ci))
1914                 return;
1915
1916         mutex_lock(&ci->fsm.lock);
1917         if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
1918                 ci->fsm.a_bidl_adis_tmout = 1;
1919                 ci_hdrc_otg_fsm_start(ci);
1920         } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
1921                 ci->fsm.protocol = PROTO_UNDEF;
1922                 ci->fsm.otg->state = OTG_STATE_UNDEFINED;
1923         }
1924         mutex_unlock(&ci->fsm.lock);
1925 }
1926
1927 /**
1928  * ci_udc_stop: unregister a gadget driver
1929  */
1930 static int ci_udc_stop(struct usb_gadget *gadget)
1931 {
1932         struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1933         unsigned long flags;
1934
1935         spin_lock_irqsave(&ci->lock, flags);
1936         ci->driver = NULL;
1937
1938         if (ci->vbus_active) {
1939                 hw_device_state(ci, 0);
1940                 spin_unlock_irqrestore(&ci->lock, flags);
1941                 if (ci->platdata->notify_event)
1942                         ci->platdata->notify_event(ci,
1943                         CI_HDRC_CONTROLLER_STOPPED_EVENT);
1944                 _gadget_stop_activity(&ci->gadget);
1945                 spin_lock_irqsave(&ci->lock, flags);
1946                 pm_runtime_put(ci->dev);
1947         }
1948
1949         spin_unlock_irqrestore(&ci->lock, flags);
1950
1951         ci_udc_stop_for_otg_fsm(ci);
1952         return 0;
1953 }
1954
1955 /******************************************************************************
1956  * BUS block
1957  *****************************************************************************/
1958 /**
1959  * udc_irq: ci interrupt handler
1960  *
1961  * This function returns IRQ_HANDLED if the IRQ has been handled
1962  * It locks access to registers
1963  */
1964 static irqreturn_t udc_irq(struct ci_hdrc *ci)
1965 {
1966         irqreturn_t retval;
1967         u32 intr;
1968
1969         if (ci == NULL)
1970                 return IRQ_HANDLED;
1971
1972         spin_lock(&ci->lock);
1973
1974         if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
1975                 if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
1976                                 USBMODE_CM_DC) {
1977                         spin_unlock(&ci->lock);
1978                         return IRQ_NONE;
1979                 }
1980         }
1981         intr = hw_test_and_clear_intr_active(ci);
1982
1983         if (intr) {
1984                 /* order defines priority - do NOT change it */
1985                 if (USBi_URI & intr)
1986                         isr_reset_handler(ci);
1987
1988                 if (USBi_PCI & intr) {
1989                         ci->gadget.speed = hw_port_is_high_speed(ci) ?
1990                                 USB_SPEED_HIGH : USB_SPEED_FULL;
1991                         if (ci->suspended) {
1992                                 if (ci->driver->resume) {
1993                                         spin_unlock(&ci->lock);
1994                                         ci->driver->resume(&ci->gadget);
1995                                         spin_lock(&ci->lock);
1996                                 }
1997                                 ci->suspended = 0;
1998                                 usb_gadget_set_state(&ci->gadget,
1999                                                 ci->resume_state);
2000                         }
2001                 }
2002
2003                 if (USBi_UI  & intr)
2004                         isr_tr_complete_handler(ci);
2005
2006                 if ((USBi_SLI & intr) && !(ci->suspended)) {
2007                         ci->suspended = 1;
2008                         ci->resume_state = ci->gadget.state;
2009                         if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
2010                             ci->driver->suspend) {
2011                                 spin_unlock(&ci->lock);
2012                                 ci->driver->suspend(&ci->gadget);
2013                                 spin_lock(&ci->lock);
2014                         }
2015                         usb_gadget_set_state(&ci->gadget,
2016                                         USB_STATE_SUSPENDED);
2017                 }
2018                 retval = IRQ_HANDLED;
2019         } else {
2020                 retval = IRQ_NONE;
2021         }
2022         spin_unlock(&ci->lock);
2023
2024         return retval;
2025 }
2026
2027 /**
2028  * udc_start: initialize gadget role
2029  * @ci: chipidea controller
2030  */
2031 static int udc_start(struct ci_hdrc *ci)
2032 {
2033         struct device *dev = ci->dev;
2034         struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
2035         int retval = 0;
2036
2037         ci->gadget.ops          = &usb_gadget_ops;
2038         ci->gadget.speed        = USB_SPEED_UNKNOWN;
2039         ci->gadget.max_speed    = USB_SPEED_HIGH;
2040         ci->gadget.name         = ci->platdata->name;
2041         ci->gadget.otg_caps     = otg_caps;
2042         ci->gadget.sg_supported = 1;
2043
2044         if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA)
2045                 ci->gadget.quirk_avoids_skb_reserve = 1;
2046
2047         if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
2048                                                 otg_caps->adp_support))
2049                 ci->gadget.is_otg = 1;
2050
2051         INIT_LIST_HEAD(&ci->gadget.ep_list);
2052
2053         /* alloc resources */
2054         ci->qh_pool = dma_pool_create("ci_hw_qh", dev->parent,
2055                                        sizeof(struct ci_hw_qh),
2056                                        64, CI_HDRC_PAGE_SIZE);
2057         if (ci->qh_pool == NULL)
2058                 return -ENOMEM;
2059
2060         ci->td_pool = dma_pool_create("ci_hw_td", dev->parent,
2061                                        sizeof(struct ci_hw_td),
2062                                        64, CI_HDRC_PAGE_SIZE);
2063         if (ci->td_pool == NULL) {
2064                 retval = -ENOMEM;
2065                 goto free_qh_pool;
2066         }
2067
2068         retval = init_eps(ci);
2069         if (retval)
2070                 goto free_pools;
2071
2072         ci->gadget.ep0 = &ci->ep0in->ep;
2073
2074         retval = usb_add_gadget_udc(dev, &ci->gadget);
2075         if (retval)
2076                 goto destroy_eps;
2077
2078         return retval;
2079
2080 destroy_eps:
2081         destroy_eps(ci);
2082 free_pools:
2083         dma_pool_destroy(ci->td_pool);
2084 free_qh_pool:
2085         dma_pool_destroy(ci->qh_pool);
2086         return retval;
2087 }
2088
2089 /**
2090  * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
2091  *
2092  * No interrupts active, the IRQ has been released
2093  */
2094 void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
2095 {
2096         if (!ci->roles[CI_ROLE_GADGET])
2097                 return;
2098
2099         usb_del_gadget_udc(&ci->gadget);
2100
2101         destroy_eps(ci);
2102
2103         dma_pool_destroy(ci->td_pool);
2104         dma_pool_destroy(ci->qh_pool);
2105 }
2106
2107 static int udc_id_switch_for_device(struct ci_hdrc *ci)
2108 {
2109         if (ci->platdata->pins_device)
2110                 pinctrl_select_state(ci->platdata->pctl,
2111                                      ci->platdata->pins_device);
2112
2113         if (ci->is_otg)
2114                 /* Clear and enable BSV irq */
2115                 hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
2116                                         OTGSC_BSVIS | OTGSC_BSVIE);
2117
2118         return 0;
2119 }
2120
2121 static void udc_id_switch_for_host(struct ci_hdrc *ci)
2122 {
2123         /*
2124          * host doesn't care B_SESSION_VALID event
2125          * so clear and disbale BSV irq
2126          */
2127         if (ci->is_otg)
2128                 hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
2129
2130         ci->vbus_active = 0;
2131
2132         if (ci->platdata->pins_device && ci->platdata->pins_default)
2133                 pinctrl_select_state(ci->platdata->pctl,
2134                                      ci->platdata->pins_default);
2135 }
2136
2137 /**
2138  * ci_hdrc_gadget_init - initialize device related bits
2139  * ci: the controller
2140  *
2141  * This function initializes the gadget, if the device is "device capable".
2142  */
2143 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
2144 {
2145         struct ci_role_driver *rdrv;
2146         int ret;
2147
2148         if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
2149                 return -ENXIO;
2150
2151         rdrv = devm_kzalloc(ci->dev, sizeof(*rdrv), GFP_KERNEL);
2152         if (!rdrv)
2153                 return -ENOMEM;
2154
2155         rdrv->start     = udc_id_switch_for_device;
2156         rdrv->stop      = udc_id_switch_for_host;
2157         rdrv->irq       = udc_irq;
2158         rdrv->name      = "gadget";
2159
2160         ret = udc_start(ci);
2161         if (!ret)
2162                 ci->roles[CI_ROLE_GADGET] = rdrv;
2163
2164         return ret;
2165 }