e534b98205ca6525b8bcde744f8691c6be513695
[platform/kernel/linux-rpi.git] / drivers / usb / dwc3 / gadget.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
4  *
5  * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6  *
7  * Authors: Felipe Balbi <balbi@ti.com>,
8  *          Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/dma-mapping.h>
21
22 #include <linux/usb/ch9.h>
23 #include <linux/usb/gadget.h>
24
25 #include "debug.h"
26 #include "core.h"
27 #include "gadget.h"
28 #include "io.h"
29
30 #define DWC3_ALIGN_FRAME(d, n)  (((d)->frame_number + ((d)->interval * (n))) \
31                                         & ~((d)->interval - 1))
32
33 /**
34  * dwc3_gadget_set_test_mode - enables usb2 test modes
35  * @dwc: pointer to our context structure
36  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
37  *
38  * Caller should take care of locking. This function will return 0 on
39  * success or -EINVAL if wrong Test Selector is passed.
40  */
41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
42 {
43         u32             reg;
44
45         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
46         reg &= ~DWC3_DCTL_TSTCTRL_MASK;
47
48         switch (mode) {
49         case USB_TEST_J:
50         case USB_TEST_K:
51         case USB_TEST_SE0_NAK:
52         case USB_TEST_PACKET:
53         case USB_TEST_FORCE_ENABLE:
54                 reg |= mode << 1;
55                 break;
56         default:
57                 return -EINVAL;
58         }
59
60         dwc3_gadget_dctl_write_safe(dwc, reg);
61
62         return 0;
63 }
64
65 /**
66  * dwc3_gadget_get_link_state - gets current state of usb link
67  * @dwc: pointer to our context structure
68  *
69  * Caller should take care of locking. This function will
70  * return the link state on success (>= 0) or -ETIMEDOUT.
71  */
72 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
73 {
74         u32             reg;
75
76         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
77
78         return DWC3_DSTS_USBLNKST(reg);
79 }
80
81 /**
82  * dwc3_gadget_set_link_state - sets usb link to a particular state
83  * @dwc: pointer to our context structure
84  * @state: the state to put link into
85  *
86  * Caller should take care of locking. This function will
87  * return 0 on success or -ETIMEDOUT.
88  */
89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
90 {
91         int             retries = 10000;
92         u32             reg;
93
94         /*
95          * Wait until device controller is ready. Only applies to 1.94a and
96          * later RTL.
97          */
98         if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
99                 while (--retries) {
100                         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
101                         if (reg & DWC3_DSTS_DCNRD)
102                                 udelay(5);
103                         else
104                                 break;
105                 }
106
107                 if (retries <= 0)
108                         return -ETIMEDOUT;
109         }
110
111         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
112         reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
113
114         /* set no action before sending new link state change */
115         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
116
117         /* set requested state */
118         reg |= DWC3_DCTL_ULSTCHNGREQ(state);
119         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
120
121         /*
122          * The following code is racy when called from dwc3_gadget_wakeup,
123          * and is not needed, at least on newer versions
124          */
125         if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
126                 return 0;
127
128         /* wait for a change in DSTS */
129         retries = 10000;
130         while (--retries) {
131                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
132
133                 if (DWC3_DSTS_USBLNKST(reg) == state)
134                         return 0;
135
136                 udelay(5);
137         }
138
139         return -ETIMEDOUT;
140 }
141
142 /**
143  * dwc3_ep_inc_trb - increment a trb index.
144  * @index: Pointer to the TRB index to increment.
145  *
146  * The index should never point to the link TRB. After incrementing,
147  * if it is point to the link TRB, wrap around to the beginning. The
148  * link TRB is always at the last TRB entry.
149  */
150 static void dwc3_ep_inc_trb(u8 *index)
151 {
152         (*index)++;
153         if (*index == (DWC3_TRB_NUM - 1))
154                 *index = 0;
155 }
156
157 /**
158  * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
159  * @dep: The endpoint whose enqueue pointer we're incrementing
160  */
161 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
162 {
163         dwc3_ep_inc_trb(&dep->trb_enqueue);
164 }
165
166 /**
167  * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
168  * @dep: The endpoint whose enqueue pointer we're incrementing
169  */
170 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
171 {
172         dwc3_ep_inc_trb(&dep->trb_dequeue);
173 }
174
175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
176                 struct dwc3_request *req, int status)
177 {
178         struct dwc3                     *dwc = dep->dwc;
179
180         list_del(&req->list);
181         req->remaining = 0;
182         req->needs_extra_trb = false;
183
184         if (req->request.status == -EINPROGRESS)
185                 req->request.status = status;
186
187         if (req->trb)
188                 usb_gadget_unmap_request_by_dev(dwc->sysdev,
189                                 &req->request, req->direction);
190
191         req->trb = NULL;
192         trace_dwc3_gadget_giveback(req);
193
194         if (dep->number > 1)
195                 pm_runtime_put(dwc->dev);
196 }
197
198 /**
199  * dwc3_gadget_giveback - call struct usb_request's ->complete callback
200  * @dep: The endpoint to whom the request belongs to
201  * @req: The request we're giving back
202  * @status: completion code for the request
203  *
204  * Must be called with controller's lock held and interrupts disabled. This
205  * function will unmap @req and call its ->complete() callback to notify upper
206  * layers that it has completed.
207  */
208 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
209                 int status)
210 {
211         struct dwc3                     *dwc = dep->dwc;
212
213         dwc3_gadget_del_and_unmap_request(dep, req, status);
214         req->status = DWC3_REQUEST_STATUS_COMPLETED;
215
216         spin_unlock(&dwc->lock);
217         usb_gadget_giveback_request(&dep->endpoint, &req->request);
218         spin_lock(&dwc->lock);
219 }
220
221 /**
222  * dwc3_send_gadget_generic_command - issue a generic command for the controller
223  * @dwc: pointer to the controller context
224  * @cmd: the command to be issued
225  * @param: command parameter
226  *
227  * Caller should take care of locking. Issue @cmd with a given @param to @dwc
228  * and wait for its completion.
229  */
230 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
231                 u32 param)
232 {
233         u32             timeout = 500;
234         int             status = 0;
235         int             ret = 0;
236         u32             reg;
237
238         dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
239         dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
240
241         do {
242                 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
243                 if (!(reg & DWC3_DGCMD_CMDACT)) {
244                         status = DWC3_DGCMD_STATUS(reg);
245                         if (status)
246                                 ret = -EINVAL;
247                         break;
248                 }
249         } while (--timeout);
250
251         if (!timeout) {
252                 ret = -ETIMEDOUT;
253                 status = -ETIMEDOUT;
254         }
255
256         trace_dwc3_gadget_generic_cmd(cmd, param, status);
257
258         return ret;
259 }
260
261 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
262
263 /**
264  * dwc3_send_gadget_ep_cmd - issue an endpoint command
265  * @dep: the endpoint to which the command is going to be issued
266  * @cmd: the command to be issued
267  * @params: parameters to the command
268  *
269  * Caller should handle locking. This function will issue @cmd with given
270  * @params to @dep and wait for its completion.
271  */
272 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
273                 struct dwc3_gadget_ep_cmd_params *params)
274 {
275         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
276         struct dwc3             *dwc = dep->dwc;
277         u32                     timeout = 5000;
278         u32                     saved_config = 0;
279         u32                     reg;
280
281         int                     cmd_status = 0;
282         int                     ret = -EINVAL;
283
284         /*
285          * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
286          * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
287          * endpoint command.
288          *
289          * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
290          * settings. Restore them after the command is completed.
291          *
292          * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
293          */
294         if (dwc->gadget->speed <= USB_SPEED_HIGH) {
295                 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
296                 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
297                         saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
298                         reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
299                 }
300
301                 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
302                         saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
303                         reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
304                 }
305
306                 if (saved_config)
307                         dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
308         }
309
310         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
311                 int link_state;
312
313                 /*
314                  * Initiate remote wakeup if the link state is in U3 when
315                  * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
316                  * link state is in U1/U2, no remote wakeup is needed. The Start
317                  * Transfer command will initiate the link recovery.
318                  */
319                 link_state = dwc3_gadget_get_link_state(dwc);
320                 switch (link_state) {
321                 case DWC3_LINK_STATE_U2:
322                         if (dwc->gadget->speed >= USB_SPEED_SUPER)
323                                 break;
324
325                         fallthrough;
326                 case DWC3_LINK_STATE_U3:
327                         ret = __dwc3_gadget_wakeup(dwc);
328                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
329                                         ret);
330                         break;
331                 }
332         }
333
334         dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
335         dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
336         dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
337
338         /*
339          * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
340          * not relying on XferNotReady, we can make use of a special "No
341          * Response Update Transfer" command where we should clear both CmdAct
342          * and CmdIOC bits.
343          *
344          * With this, we don't need to wait for command completion and can
345          * straight away issue further commands to the endpoint.
346          *
347          * NOTICE: We're making an assumption that control endpoints will never
348          * make use of Update Transfer command. This is a safe assumption
349          * because we can never have more than one request at a time with
350          * Control Endpoints. If anybody changes that assumption, this chunk
351          * needs to be updated accordingly.
352          */
353         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
354                         !usb_endpoint_xfer_isoc(desc))
355                 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
356         else
357                 cmd |= DWC3_DEPCMD_CMDACT;
358
359         dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
360         do {
361                 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
362                 if (!(reg & DWC3_DEPCMD_CMDACT)) {
363                         cmd_status = DWC3_DEPCMD_STATUS(reg);
364
365                         switch (cmd_status) {
366                         case 0:
367                                 ret = 0;
368                                 break;
369                         case DEPEVT_TRANSFER_NO_RESOURCE:
370                                 dev_WARN(dwc->dev, "No resource for %s\n",
371                                          dep->name);
372                                 ret = -EINVAL;
373                                 break;
374                         case DEPEVT_TRANSFER_BUS_EXPIRY:
375                                 /*
376                                  * SW issues START TRANSFER command to
377                                  * isochronous ep with future frame interval. If
378                                  * future interval time has already passed when
379                                  * core receives the command, it will respond
380                                  * with an error status of 'Bus Expiry'.
381                                  *
382                                  * Instead of always returning -EINVAL, let's
383                                  * give a hint to the gadget driver that this is
384                                  * the case by returning -EAGAIN.
385                                  */
386                                 ret = -EAGAIN;
387                                 break;
388                         default:
389                                 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
390                         }
391
392                         break;
393                 }
394         } while (--timeout);
395
396         if (timeout == 0) {
397                 ret = -ETIMEDOUT;
398                 cmd_status = -ETIMEDOUT;
399         }
400
401         trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
402
403         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
404                 if (ret == 0)
405                         dep->flags |= DWC3_EP_TRANSFER_STARTED;
406
407                 if (ret != -ETIMEDOUT)
408                         dwc3_gadget_ep_get_transfer_index(dep);
409         }
410
411         if (saved_config) {
412                 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
413                 reg |= saved_config;
414                 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
415         }
416
417         return ret;
418 }
419
420 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
421 {
422         struct dwc3 *dwc = dep->dwc;
423         struct dwc3_gadget_ep_cmd_params params;
424         u32 cmd = DWC3_DEPCMD_CLEARSTALL;
425
426         /*
427          * As of core revision 2.60a the recommended programming model
428          * is to set the ClearPendIN bit when issuing a Clear Stall EP
429          * command for IN endpoints. This is to prevent an issue where
430          * some (non-compliant) hosts may not send ACK TPs for pending
431          * IN transfers due to a mishandled error condition. Synopsys
432          * STAR 9000614252.
433          */
434         if (dep->direction &&
435             !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
436             (dwc->gadget->speed >= USB_SPEED_SUPER))
437                 cmd |= DWC3_DEPCMD_CLEARPENDIN;
438
439         memset(&params, 0, sizeof(params));
440
441         return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
442 }
443
444 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
445                 struct dwc3_trb *trb)
446 {
447         u32             offset = (char *) trb - (char *) dep->trb_pool;
448
449         return dep->trb_pool_dma + offset;
450 }
451
452 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
453 {
454         struct dwc3             *dwc = dep->dwc;
455
456         if (dep->trb_pool)
457                 return 0;
458
459         dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
460                         sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
461                         &dep->trb_pool_dma, GFP_KERNEL);
462         if (!dep->trb_pool) {
463                 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
464                                 dep->name);
465                 return -ENOMEM;
466         }
467
468         return 0;
469 }
470
471 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
472 {
473         struct dwc3             *dwc = dep->dwc;
474
475         dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
476                         dep->trb_pool, dep->trb_pool_dma);
477
478         dep->trb_pool = NULL;
479         dep->trb_pool_dma = 0;
480 }
481
482 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
483 {
484         struct dwc3_gadget_ep_cmd_params params;
485
486         memset(&params, 0x00, sizeof(params));
487
488         params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
489
490         return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
491                         &params);
492 }
493
494 /**
495  * dwc3_gadget_start_config - configure ep resources
496  * @dep: endpoint that is being enabled
497  *
498  * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
499  * completion, it will set Transfer Resource for all available endpoints.
500  *
501  * The assignment of transfer resources cannot perfectly follow the data book
502  * due to the fact that the controller driver does not have all knowledge of the
503  * configuration in advance. It is given this information piecemeal by the
504  * composite gadget framework after every SET_CONFIGURATION and
505  * SET_INTERFACE. Trying to follow the databook programming model in this
506  * scenario can cause errors. For two reasons:
507  *
508  * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
509  * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
510  * incorrect in the scenario of multiple interfaces.
511  *
512  * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
513  * endpoint on alt setting (8.1.6).
514  *
515  * The following simplified method is used instead:
516  *
517  * All hardware endpoints can be assigned a transfer resource and this setting
518  * will stay persistent until either a core reset or hibernation. So whenever we
519  * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
520  * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
521  * guaranteed that there are as many transfer resources as endpoints.
522  *
523  * This function is called for each endpoint when it is being enabled but is
524  * triggered only when called for EP0-out, which always happens first, and which
525  * should only happen in one of the above conditions.
526  */
527 static int dwc3_gadget_start_config(struct dwc3_ep *dep)
528 {
529         struct dwc3_gadget_ep_cmd_params params;
530         struct dwc3             *dwc;
531         u32                     cmd;
532         int                     i;
533         int                     ret;
534
535         if (dep->number)
536                 return 0;
537
538         memset(&params, 0x00, sizeof(params));
539         cmd = DWC3_DEPCMD_DEPSTARTCFG;
540         dwc = dep->dwc;
541
542         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
543         if (ret)
544                 return ret;
545
546         for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
547                 struct dwc3_ep *dep = dwc->eps[i];
548
549                 if (!dep)
550                         continue;
551
552                 ret = dwc3_gadget_set_xfer_resource(dep);
553                 if (ret)
554                         return ret;
555         }
556
557         return 0;
558 }
559
560 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
561 {
562         const struct usb_ss_ep_comp_descriptor *comp_desc;
563         const struct usb_endpoint_descriptor *desc;
564         struct dwc3_gadget_ep_cmd_params params;
565         struct dwc3 *dwc = dep->dwc;
566
567         comp_desc = dep->endpoint.comp_desc;
568         desc = dep->endpoint.desc;
569
570         memset(&params, 0x00, sizeof(params));
571
572         params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
573                 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
574
575         /* Burst size is only needed in SuperSpeed mode */
576         if (dwc->gadget->speed >= USB_SPEED_SUPER) {
577                 u32 burst = dep->endpoint.maxburst;
578
579                 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
580         }
581
582         params.param0 |= action;
583         if (action == DWC3_DEPCFG_ACTION_RESTORE)
584                 params.param2 |= dep->saved_state;
585
586         if (usb_endpoint_xfer_control(desc))
587                 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
588
589         if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
590                 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
591
592         if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
593                 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
594                         | DWC3_DEPCFG_XFER_COMPLETE_EN
595                         | DWC3_DEPCFG_STREAM_EVENT_EN;
596                 dep->stream_capable = true;
597         }
598
599         if (!usb_endpoint_xfer_control(desc))
600                 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
601
602         /*
603          * We are doing 1:1 mapping for endpoints, meaning
604          * Physical Endpoints 2 maps to Logical Endpoint 2 and
605          * so on. We consider the direction bit as part of the physical
606          * endpoint number. So USB endpoint 0x81 is 0x03.
607          */
608         params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
609
610         /*
611          * We must use the lower 16 TX FIFOs even though
612          * HW might have more
613          */
614         if (dep->direction)
615                 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
616
617         if (desc->bInterval) {
618                 u8 bInterval_m1;
619
620                 /*
621                  * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
622                  *
623                  * NOTE: The programming guide incorrectly stated bInterval_m1
624                  * must be set to 0 when operating in fullspeed. Internally the
625                  * controller does not have this limitation. See DWC_usb3x
626                  * programming guide section 3.2.2.1.
627                  */
628                 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
629
630                 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
631                     dwc->gadget->speed == USB_SPEED_FULL)
632                         dep->interval = desc->bInterval;
633                 else
634                         dep->interval = 1 << (desc->bInterval - 1);
635
636                 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
637         }
638
639         return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
640 }
641
642 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
643                 bool interrupt);
644
645 /**
646  * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
647  * @dwc: pointer to the DWC3 context
648  * @nfifos: number of fifos to calculate for
649  *
650  * Calculates the size value based on the equation below:
651  *
652  * DWC3 revision 280A and prior:
653  * fifo_size = mult * (max_packet / mdwidth) + 1;
654  *
655  * DWC3 revision 290A and onwards:
656  * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
657  *
658  * The max packet size is set to 1024, as the txfifo requirements mainly apply
659  * to super speed USB use cases.  However, it is safe to overestimate the fifo
660  * allocations for other scenarios, i.e. high speed USB.
661  */
662 static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
663 {
664         int max_packet = 1024;
665         int fifo_size;
666         int mdwidth;
667
668         mdwidth = dwc3_mdwidth(dwc);
669
670         /* MDWIDTH is represented in bits, we need it in bytes */
671         mdwidth >>= 3;
672
673         if (DWC3_VER_IS_PRIOR(DWC3, 290A))
674                 fifo_size = mult * (max_packet / mdwidth) + 1;
675         else
676                 fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
677         return fifo_size;
678 }
679
680 /**
681  * dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
682  * @dwc: pointer to the DWC3 context
683  *
684  * Iterates through all the endpoint registers and clears the previous txfifo
685  * allocations.
686  */
687 void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
688 {
689         struct dwc3_ep *dep;
690         int fifo_depth;
691         int size;
692         int num;
693
694         if (!dwc->do_fifo_resize)
695                 return;
696
697         /* Read ep0IN related TXFIFO size */
698         dep = dwc->eps[1];
699         size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
700         if (DWC3_IP_IS(DWC3))
701                 fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
702         else
703                 fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
704
705         dwc->last_fifo_depth = fifo_depth;
706         /* Clear existing TXFIFO for all IN eps except ep0 */
707         for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
708              num += 2) {
709                 dep = dwc->eps[num];
710                 /* Don't change TXFRAMNUM on usb31 version */
711                 size = DWC3_IP_IS(DWC3) ? 0 :
712                         dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
713                                    DWC31_GTXFIFOSIZ_TXFRAMNUM;
714
715                 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
716                 dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
717         }
718         dwc->num_ep_resized = 0;
719 }
720
721 /*
722  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
723  * @dwc: pointer to our context structure
724  *
725  * This function will a best effort FIFO allocation in order
726  * to improve FIFO usage and throughput, while still allowing
727  * us to enable as many endpoints as possible.
728  *
729  * Keep in mind that this operation will be highly dependent
730  * on the configured size for RAM1 - which contains TxFifo -,
731  * the amount of endpoints enabled on coreConsultant tool, and
732  * the width of the Master Bus.
733  *
734  * In general, FIFO depths are represented with the following equation:
735  *
736  * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
737  *
738  * In conjunction with dwc3_gadget_check_config(), this resizing logic will
739  * ensure that all endpoints will have enough internal memory for one max
740  * packet per endpoint.
741  */
742 static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
743 {
744         struct dwc3 *dwc = dep->dwc;
745         int fifo_0_start;
746         int ram1_depth;
747         int fifo_size;
748         int min_depth;
749         int num_in_ep;
750         int remaining;
751         int num_fifos = 1;
752         int fifo;
753         int tmp;
754
755         if (!dwc->do_fifo_resize)
756                 return 0;
757
758         /* resize IN endpoints except ep0 */
759         if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
760                 return 0;
761
762         /* bail if already resized */
763         if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
764                 return 0;
765
766         ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
767
768         if ((dep->endpoint.maxburst > 1 &&
769              usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
770             usb_endpoint_xfer_isoc(dep->endpoint.desc))
771                 num_fifos = 3;
772
773         if (dep->endpoint.maxburst > 6 &&
774             usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
775                 num_fifos = dwc->tx_fifo_resize_max_num;
776
777         /* FIFO size for a single buffer */
778         fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
779
780         /* Calculate the number of remaining EPs w/o any FIFO */
781         num_in_ep = dwc->max_cfg_eps;
782         num_in_ep -= dwc->num_ep_resized;
783
784         /* Reserve at least one FIFO for the number of IN EPs */
785         min_depth = num_in_ep * (fifo + 1);
786         remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
787         remaining = max_t(int, 0, remaining);
788         /*
789          * We've already reserved 1 FIFO per EP, so check what we can fit in
790          * addition to it.  If there is not enough remaining space, allocate
791          * all the remaining space to the EP.
792          */
793         fifo_size = (num_fifos - 1) * fifo;
794         if (remaining < fifo_size)
795                 fifo_size = remaining;
796
797         fifo_size += fifo;
798         /* Last increment according to the TX FIFO size equation */
799         fifo_size++;
800
801         /* Check if TXFIFOs start at non-zero addr */
802         tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
803         fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
804
805         fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
806         if (DWC3_IP_IS(DWC3))
807                 dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
808         else
809                 dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
810
811         /* Check fifo size allocation doesn't exceed available RAM size. */
812         if (dwc->last_fifo_depth >= ram1_depth) {
813                 dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
814                         dwc->last_fifo_depth, ram1_depth,
815                         dep->endpoint.name, fifo_size);
816                 if (DWC3_IP_IS(DWC3))
817                         fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
818                 else
819                         fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
820
821                 dwc->last_fifo_depth -= fifo_size;
822                 return -ENOMEM;
823         }
824
825         dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
826         dep->flags |= DWC3_EP_TXFIFO_RESIZED;
827         dwc->num_ep_resized++;
828
829         return 0;
830 }
831
832 /**
833  * __dwc3_gadget_ep_enable - initializes a hw endpoint
834  * @dep: endpoint to be initialized
835  * @action: one of INIT, MODIFY or RESTORE
836  *
837  * Caller should take care of locking. Execute all necessary commands to
838  * initialize a HW endpoint so it can be used by a gadget driver.
839  */
840 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
841 {
842         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
843         struct dwc3             *dwc = dep->dwc;
844
845         u32                     reg;
846         int                     ret;
847
848         if (!(dep->flags & DWC3_EP_ENABLED)) {
849                 ret = dwc3_gadget_resize_tx_fifos(dep);
850                 if (ret)
851                         return ret;
852
853                 ret = dwc3_gadget_start_config(dep);
854                 if (ret)
855                         return ret;
856         }
857
858         ret = dwc3_gadget_set_ep_config(dep, action);
859         if (ret)
860                 return ret;
861
862         if (!(dep->flags & DWC3_EP_ENABLED)) {
863                 struct dwc3_trb *trb_st_hw;
864                 struct dwc3_trb *trb_link;
865
866                 dep->type = usb_endpoint_type(desc);
867                 dep->flags |= DWC3_EP_ENABLED;
868
869                 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
870                 reg |= DWC3_DALEPENA_EP(dep->number);
871                 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
872
873                 if (usb_endpoint_xfer_control(desc))
874                         goto out;
875
876                 /* Initialize the TRB ring */
877                 dep->trb_dequeue = 0;
878                 dep->trb_enqueue = 0;
879                 memset(dep->trb_pool, 0,
880                        sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
881
882                 /* Link TRB. The HWO bit is never reset */
883                 trb_st_hw = &dep->trb_pool[0];
884
885                 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
886                 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
887                 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
888                 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
889                 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
890         }
891
892         /*
893          * Issue StartTransfer here with no-op TRB so we can always rely on No
894          * Response Update Transfer command.
895          */
896         if (usb_endpoint_xfer_bulk(desc) ||
897                         usb_endpoint_xfer_int(desc)) {
898                 struct dwc3_gadget_ep_cmd_params params;
899                 struct dwc3_trb *trb;
900                 dma_addr_t trb_dma;
901                 u32 cmd;
902
903                 memset(&params, 0, sizeof(params));
904                 trb = &dep->trb_pool[0];
905                 trb_dma = dwc3_trb_dma_offset(dep, trb);
906
907                 params.param0 = upper_32_bits(trb_dma);
908                 params.param1 = lower_32_bits(trb_dma);
909
910                 cmd = DWC3_DEPCMD_STARTTRANSFER;
911
912                 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
913                 if (ret < 0)
914                         return ret;
915
916                 if (dep->stream_capable) {
917                         /*
918                          * For streams, at start, there maybe a race where the
919                          * host primes the endpoint before the function driver
920                          * queues a request to initiate a stream. In that case,
921                          * the controller will not see the prime to generate the
922                          * ERDY and start stream. To workaround this, issue a
923                          * no-op TRB as normal, but end it immediately. As a
924                          * result, when the function driver queues the request,
925                          * the next START_TRANSFER command will cause the
926                          * controller to generate an ERDY to initiate the
927                          * stream.
928                          */
929                         dwc3_stop_active_transfer(dep, true, true);
930
931                         /*
932                          * All stream eps will reinitiate stream on NoStream
933                          * rejection until we can determine that the host can
934                          * prime after the first transfer.
935                          *
936                          * However, if the controller is capable of
937                          * TXF_FLUSH_BYPASS, then IN direction endpoints will
938                          * automatically restart the stream without the driver
939                          * initiation.
940                          */
941                         if (!dep->direction ||
942                             !(dwc->hwparams.hwparams9 &
943                               DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS))
944                                 dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
945                 }
946         }
947
948 out:
949         trace_dwc3_gadget_ep_enable(dep);
950
951         return 0;
952 }
953
954 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
955 {
956         struct dwc3_request             *req;
957
958         dwc3_stop_active_transfer(dep, true, false);
959
960         /* - giveback all requests to gadget driver */
961         while (!list_empty(&dep->started_list)) {
962                 req = next_request(&dep->started_list);
963
964                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
965         }
966
967         while (!list_empty(&dep->pending_list)) {
968                 req = next_request(&dep->pending_list);
969
970                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
971         }
972
973         while (!list_empty(&dep->cancelled_list)) {
974                 req = next_request(&dep->cancelled_list);
975
976                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
977         }
978 }
979
980 /**
981  * __dwc3_gadget_ep_disable - disables a hw endpoint
982  * @dep: the endpoint to disable
983  *
984  * This function undoes what __dwc3_gadget_ep_enable did and also removes
985  * requests which are currently being processed by the hardware and those which
986  * are not yet scheduled.
987  *
988  * Caller should take care of locking.
989  */
990 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
991 {
992         struct dwc3             *dwc = dep->dwc;
993         u32                     reg;
994
995         trace_dwc3_gadget_ep_disable(dep);
996
997         /* make sure HW endpoint isn't stalled */
998         if (dep->flags & DWC3_EP_STALL)
999                 __dwc3_gadget_ep_set_halt(dep, 0, false);
1000
1001         reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1002         reg &= ~DWC3_DALEPENA_EP(dep->number);
1003         dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1004
1005         /* Clear out the ep descriptors for non-ep0 */
1006         if (dep->number > 1) {
1007                 dep->endpoint.comp_desc = NULL;
1008                 dep->endpoint.desc = NULL;
1009         }
1010
1011         dwc3_remove_requests(dwc, dep);
1012
1013         dep->stream_capable = false;
1014         dep->type = 0;
1015         dep->flags &= DWC3_EP_TXFIFO_RESIZED;
1016
1017         return 0;
1018 }
1019
1020 /* -------------------------------------------------------------------------- */
1021
1022 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
1023                 const struct usb_endpoint_descriptor *desc)
1024 {
1025         return -EINVAL;
1026 }
1027
1028 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
1029 {
1030         return -EINVAL;
1031 }
1032
1033 /* -------------------------------------------------------------------------- */
1034
1035 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
1036                 const struct usb_endpoint_descriptor *desc)
1037 {
1038         struct dwc3_ep                  *dep;
1039         struct dwc3                     *dwc;
1040         unsigned long                   flags;
1041         int                             ret;
1042
1043         if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
1044                 pr_debug("dwc3: invalid parameters\n");
1045                 return -EINVAL;
1046         }
1047
1048         if (!desc->wMaxPacketSize) {
1049                 pr_debug("dwc3: missing wMaxPacketSize\n");
1050                 return -EINVAL;
1051         }
1052
1053         dep = to_dwc3_ep(ep);
1054         dwc = dep->dwc;
1055
1056         if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
1057                                         "%s is already enabled\n",
1058                                         dep->name))
1059                 return 0;
1060
1061         spin_lock_irqsave(&dwc->lock, flags);
1062         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
1063         spin_unlock_irqrestore(&dwc->lock, flags);
1064
1065         return ret;
1066 }
1067
1068 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
1069 {
1070         struct dwc3_ep                  *dep;
1071         struct dwc3                     *dwc;
1072         unsigned long                   flags;
1073         int                             ret;
1074
1075         if (!ep) {
1076                 pr_debug("dwc3: invalid parameters\n");
1077                 return -EINVAL;
1078         }
1079
1080         dep = to_dwc3_ep(ep);
1081         dwc = dep->dwc;
1082
1083         if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
1084                                         "%s is already disabled\n",
1085                                         dep->name))
1086                 return 0;
1087
1088         spin_lock_irqsave(&dwc->lock, flags);
1089         ret = __dwc3_gadget_ep_disable(dep);
1090         spin_unlock_irqrestore(&dwc->lock, flags);
1091
1092         return ret;
1093 }
1094
1095 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
1096                 gfp_t gfp_flags)
1097 {
1098         struct dwc3_request             *req;
1099         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1100
1101         req = kzalloc(sizeof(*req), gfp_flags);
1102         if (!req)
1103                 return NULL;
1104
1105         req->direction  = dep->direction;
1106         req->epnum      = dep->number;
1107         req->dep        = dep;
1108         req->status     = DWC3_REQUEST_STATUS_UNKNOWN;
1109
1110         trace_dwc3_alloc_request(req);
1111
1112         return &req->request;
1113 }
1114
1115 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
1116                 struct usb_request *request)
1117 {
1118         struct dwc3_request             *req = to_dwc3_request(request);
1119
1120         trace_dwc3_free_request(req);
1121         kfree(req);
1122 }
1123
1124 /**
1125  * dwc3_ep_prev_trb - returns the previous TRB in the ring
1126  * @dep: The endpoint with the TRB ring
1127  * @index: The index of the current TRB in the ring
1128  *
1129  * Returns the TRB prior to the one pointed to by the index. If the
1130  * index is 0, we will wrap backwards, skip the link TRB, and return
1131  * the one just before that.
1132  */
1133 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
1134 {
1135         u8 tmp = index;
1136
1137         if (!tmp)
1138                 tmp = DWC3_TRB_NUM - 1;
1139
1140         return &dep->trb_pool[tmp - 1];
1141 }
1142
1143 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
1144 {
1145         u8                      trbs_left;
1146
1147         /*
1148          * If the enqueue & dequeue are equal then the TRB ring is either full
1149          * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
1150          * pending to be processed by the driver.
1151          */
1152         if (dep->trb_enqueue == dep->trb_dequeue) {
1153                 /*
1154                  * If there is any request remained in the started_list at
1155                  * this point, that means there is no TRB available.
1156                  */
1157                 if (!list_empty(&dep->started_list))
1158                         return 0;
1159
1160                 return DWC3_TRB_NUM - 1;
1161         }
1162
1163         trbs_left = dep->trb_dequeue - dep->trb_enqueue;
1164         trbs_left &= (DWC3_TRB_NUM - 1);
1165
1166         if (dep->trb_dequeue < dep->trb_enqueue)
1167                 trbs_left--;
1168
1169         return trbs_left;
1170 }
1171
1172 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
1173                 dma_addr_t dma, unsigned int length, unsigned int chain,
1174                 unsigned int node, unsigned int stream_id,
1175                 unsigned int short_not_ok, unsigned int no_interrupt,
1176                 unsigned int is_last, bool must_interrupt)
1177 {
1178         struct dwc3             *dwc = dep->dwc;
1179         struct usb_gadget       *gadget = dwc->gadget;
1180         enum usb_device_speed   speed = gadget->speed;
1181
1182         trb->size = DWC3_TRB_SIZE_LENGTH(length);
1183         trb->bpl = lower_32_bits(dma);
1184         trb->bph = upper_32_bits(dma);
1185
1186         switch (usb_endpoint_type(dep->endpoint.desc)) {
1187         case USB_ENDPOINT_XFER_CONTROL:
1188                 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
1189                 break;
1190
1191         case USB_ENDPOINT_XFER_ISOC:
1192                 if (!node) {
1193                         trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
1194
1195                         /*
1196                          * USB Specification 2.0 Section 5.9.2 states that: "If
1197                          * there is only a single transaction in the microframe,
1198                          * only a DATA0 data packet PID is used.  If there are
1199                          * two transactions per microframe, DATA1 is used for
1200                          * the first transaction data packet and DATA0 is used
1201                          * for the second transaction data packet.  If there are
1202                          * three transactions per microframe, DATA2 is used for
1203                          * the first transaction data packet, DATA1 is used for
1204                          * the second, and DATA0 is used for the third."
1205                          *
1206                          * IOW, we should satisfy the following cases:
1207                          *
1208                          * 1) length <= maxpacket
1209                          *      - DATA0
1210                          *
1211                          * 2) maxpacket < length <= (2 * maxpacket)
1212                          *      - DATA1, DATA0
1213                          *
1214                          * 3) (2 * maxpacket) < length <= (3 * maxpacket)
1215                          *      - DATA2, DATA1, DATA0
1216                          */
1217                         if (speed == USB_SPEED_HIGH) {
1218                                 struct usb_ep *ep = &dep->endpoint;
1219                                 unsigned int mult = 2;
1220                                 unsigned int maxp = usb_endpoint_maxp(ep->desc);
1221
1222                                 if (length <= (2 * maxp))
1223                                         mult--;
1224
1225                                 if (length <= maxp)
1226                                         mult--;
1227
1228                                 trb->size |= DWC3_TRB_SIZE_PCM1(mult);
1229                         }
1230                 } else {
1231                         trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
1232                 }
1233
1234                 /* always enable Interrupt on Missed ISOC */
1235                 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1236                 break;
1237
1238         case USB_ENDPOINT_XFER_BULK:
1239         case USB_ENDPOINT_XFER_INT:
1240                 trb->ctrl = DWC3_TRBCTL_NORMAL;
1241                 break;
1242         default:
1243                 /*
1244                  * This is only possible with faulty memory because we
1245                  * checked it already :)
1246                  */
1247                 dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
1248                                 usb_endpoint_type(dep->endpoint.desc));
1249         }
1250
1251         /*
1252          * Enable Continue on Short Packet
1253          * when endpoint is not a stream capable
1254          */
1255         if (usb_endpoint_dir_out(dep->endpoint.desc)) {
1256                 if (!dep->stream_capable)
1257                         trb->ctrl |= DWC3_TRB_CTRL_CSP;
1258
1259                 if (short_not_ok)
1260                         trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1261         }
1262
1263         if ((!no_interrupt && !chain) || must_interrupt)
1264                 trb->ctrl |= DWC3_TRB_CTRL_IOC;
1265
1266         if (chain)
1267                 trb->ctrl |= DWC3_TRB_CTRL_CHN;
1268         else if (dep->stream_capable && is_last)
1269                 trb->ctrl |= DWC3_TRB_CTRL_LST;
1270
1271         if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
1272                 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
1273
1274         /*
1275          * As per data book 4.2.3.2TRB Control Bit Rules section
1276          *
1277          * The controller autonomously checks the HWO field of a TRB to determine if the
1278          * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
1279          * is valid before setting the HWO field to '1'. In most systems, this means that
1280          * software must update the fourth DWORD of a TRB last.
1281          *
1282          * However there is a possibility of CPU re-ordering here which can cause
1283          * controller to observe the HWO bit set prematurely.
1284          * Add a write memory barrier to prevent CPU re-ordering.
1285          */
1286         wmb();
1287         trb->ctrl |= DWC3_TRB_CTRL_HWO;
1288
1289         dwc3_ep_inc_enq(dep);
1290
1291         trace_dwc3_prepare_trb(dep, trb);
1292 }
1293
1294 /**
1295  * dwc3_prepare_one_trb - setup one TRB from one request
1296  * @dep: endpoint for which this request is prepared
1297  * @req: dwc3_request pointer
1298  * @trb_length: buffer size of the TRB
1299  * @chain: should this TRB be chained to the next?
1300  * @node: only for isochronous endpoints. First TRB needs different type.
1301  * @use_bounce_buffer: set to use bounce buffer
1302  * @must_interrupt: set to interrupt on TRB completion
1303  */
1304 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
1305                 struct dwc3_request *req, unsigned int trb_length,
1306                 unsigned int chain, unsigned int node, bool use_bounce_buffer,
1307                 bool must_interrupt)
1308 {
1309         struct dwc3_trb         *trb;
1310         dma_addr_t              dma;
1311         unsigned int            stream_id = req->request.stream_id;
1312         unsigned int            short_not_ok = req->request.short_not_ok;
1313         unsigned int            no_interrupt = req->request.no_interrupt;
1314         unsigned int            is_last = req->request.is_last;
1315
1316         if (use_bounce_buffer)
1317                 dma = dep->dwc->bounce_addr;
1318         else if (req->request.num_sgs > 0)
1319                 dma = sg_dma_address(req->start_sg);
1320         else
1321                 dma = req->request.dma;
1322
1323         trb = &dep->trb_pool[dep->trb_enqueue];
1324
1325         if (!req->trb) {
1326                 dwc3_gadget_move_started_request(req);
1327                 req->trb = trb;
1328                 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
1329         }
1330
1331         req->num_trbs++;
1332
1333         __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
1334                         stream_id, short_not_ok, no_interrupt, is_last,
1335                         must_interrupt);
1336 }
1337
1338 static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
1339 {
1340         unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1341         unsigned int rem = req->request.length % maxp;
1342
1343         if ((req->request.length && req->request.zero && !rem &&
1344                         !usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
1345                         (!req->direction && rem))
1346                 return true;
1347
1348         return false;
1349 }
1350
1351 /**
1352  * dwc3_prepare_last_sg - prepare TRBs for the last SG entry
1353  * @dep: The endpoint that the request belongs to
1354  * @req: The request to prepare
1355  * @entry_length: The last SG entry size
1356  * @node: Indicates whether this is not the first entry (for isoc only)
1357  *
1358  * Return the number of TRBs prepared.
1359  */
1360 static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
1361                 struct dwc3_request *req, unsigned int entry_length,
1362                 unsigned int node)
1363 {
1364         unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1365         unsigned int rem = req->request.length % maxp;
1366         unsigned int num_trbs = 1;
1367
1368         if (dwc3_needs_extra_trb(dep, req))
1369                 num_trbs++;
1370
1371         if (dwc3_calc_trbs_left(dep) < num_trbs)
1372                 return 0;
1373
1374         req->needs_extra_trb = num_trbs > 1;
1375
1376         /* Prepare a normal TRB */
1377         if (req->direction || req->request.length)
1378                 dwc3_prepare_one_trb(dep, req, entry_length,
1379                                 req->needs_extra_trb, node, false, false);
1380
1381         /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
1382         if ((!req->direction && !req->request.length) || req->needs_extra_trb)
1383                 dwc3_prepare_one_trb(dep, req,
1384                                 req->direction ? 0 : maxp - rem,
1385                                 false, 1, true, false);
1386
1387         return num_trbs;
1388 }
1389
1390 static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
1391                 struct dwc3_request *req)
1392 {
1393         struct scatterlist *sg = req->start_sg;
1394         struct scatterlist *s;
1395         int             i;
1396         unsigned int length = req->request.length;
1397         unsigned int remaining = req->request.num_mapped_sgs
1398                 - req->num_queued_sgs;
1399         unsigned int num_trbs = req->num_trbs;
1400         bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
1401
1402         /*
1403          * If we resume preparing the request, then get the remaining length of
1404          * the request and resume where we left off.
1405          */
1406         for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
1407                 length -= sg_dma_len(s);
1408
1409         for_each_sg(sg, s, remaining, i) {
1410                 unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
1411                 unsigned int trb_length;
1412                 bool must_interrupt = false;
1413                 bool last_sg = false;
1414
1415                 trb_length = min_t(unsigned int, length, sg_dma_len(s));
1416
1417                 length -= trb_length;
1418
1419                 /*
1420                  * IOMMU driver is coalescing the list of sgs which shares a
1421                  * page boundary into one and giving it to USB driver. With
1422                  * this the number of sgs mapped is not equal to the number of
1423                  * sgs passed. So mark the chain bit to false if it isthe last
1424                  * mapped sg.
1425                  */
1426                 if ((i == remaining - 1) || !length)
1427                         last_sg = true;
1428
1429                 if (!num_trbs_left)
1430                         break;
1431
1432                 if (last_sg) {
1433                         if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
1434                                 break;
1435                 } else {
1436                         /*
1437                          * Look ahead to check if we have enough TRBs for the
1438                          * next SG entry. If not, set interrupt on this TRB to
1439                          * resume preparing the next SG entry when more TRBs are
1440                          * free.
1441                          */
1442                         if (num_trbs_left == 1 || (needs_extra_trb &&
1443                                         num_trbs_left <= 2 &&
1444                                         sg_dma_len(sg_next(s)) >= length))
1445                                 must_interrupt = true;
1446
1447                         dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
1448                                         must_interrupt);
1449                 }
1450
1451                 /*
1452                  * There can be a situation where all sgs in sglist are not
1453                  * queued because of insufficient trb number. To handle this
1454                  * case, update start_sg to next sg to be queued, so that
1455                  * we have free trbs we can continue queuing from where we
1456                  * previously stopped
1457                  */
1458                 if (!last_sg)
1459                         req->start_sg = sg_next(s);
1460
1461                 req->num_queued_sgs++;
1462                 req->num_pending_sgs--;
1463
1464                 /*
1465                  * The number of pending SG entries may not correspond to the
1466                  * number of mapped SG entries. If all the data are queued, then
1467                  * don't include unused SG entries.
1468                  */
1469                 if (length == 0) {
1470                         req->num_pending_sgs = 0;
1471                         break;
1472                 }
1473
1474                 if (must_interrupt)
1475                         break;
1476         }
1477
1478         return req->num_trbs - num_trbs;
1479 }
1480
1481 static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
1482                 struct dwc3_request *req)
1483 {
1484         return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
1485 }
1486
1487 /*
1488  * dwc3_prepare_trbs - setup TRBs from requests
1489  * @dep: endpoint for which requests are being prepared
1490  *
1491  * The function goes through the requests list and sets up TRBs for the
1492  * transfers. The function returns once there are no more TRBs available or
1493  * it runs out of requests.
1494  *
1495  * Returns the number of TRBs prepared or negative errno.
1496  */
1497 static int dwc3_prepare_trbs(struct dwc3_ep *dep)
1498 {
1499         struct dwc3_request     *req, *n;
1500         int                     ret = 0;
1501
1502         BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
1503
1504         /*
1505          * We can get in a situation where there's a request in the started list
1506          * but there weren't enough TRBs to fully kick it in the first time
1507          * around, so it has been waiting for more TRBs to be freed up.
1508          *
1509          * In that case, we should check if we have a request with pending_sgs
1510          * in the started list and prepare TRBs for that request first,
1511          * otherwise we will prepare TRBs completely out of order and that will
1512          * break things.
1513          */
1514         list_for_each_entry(req, &dep->started_list, list) {
1515                 if (req->num_pending_sgs > 0) {
1516                         ret = dwc3_prepare_trbs_sg(dep, req);
1517                         if (!ret || req->num_pending_sgs)
1518                                 return ret;
1519                 }
1520
1521                 if (!dwc3_calc_trbs_left(dep))
1522                         return ret;
1523
1524                 /*
1525                  * Don't prepare beyond a transfer. In DWC_usb32, its transfer
1526                  * burst capability may try to read and use TRBs beyond the
1527                  * active transfer instead of stopping.
1528                  */
1529                 if (dep->stream_capable && req->request.is_last)
1530                         return ret;
1531         }
1532
1533         list_for_each_entry_safe(req, n, &dep->pending_list, list) {
1534                 struct dwc3     *dwc = dep->dwc;
1535
1536                 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
1537                                                     dep->direction);
1538                 if (ret)
1539                         return ret;
1540
1541                 req->sg                 = req->request.sg;
1542                 req->start_sg           = req->sg;
1543                 req->num_queued_sgs     = 0;
1544                 req->num_pending_sgs    = req->request.num_mapped_sgs;
1545
1546                 if (req->num_pending_sgs > 0) {
1547                         ret = dwc3_prepare_trbs_sg(dep, req);
1548                         if (req->num_pending_sgs)
1549                                 return ret;
1550                 } else {
1551                         ret = dwc3_prepare_trbs_linear(dep, req);
1552                 }
1553
1554                 if (!ret || !dwc3_calc_trbs_left(dep))
1555                         return ret;
1556
1557                 /*
1558                  * Don't prepare beyond a transfer. In DWC_usb32, its transfer
1559                  * burst capability may try to read and use TRBs beyond the
1560                  * active transfer instead of stopping.
1561                  */
1562                 if (dep->stream_capable && req->request.is_last)
1563                         return ret;
1564         }
1565
1566         return ret;
1567 }
1568
1569 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
1570
1571 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
1572 {
1573         struct dwc3_gadget_ep_cmd_params params;
1574         struct dwc3_request             *req;
1575         int                             starting;
1576         int                             ret;
1577         u32                             cmd;
1578
1579         /*
1580          * Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
1581          * This happens when we need to stop and restart a transfer such as in
1582          * the case of reinitiating a stream or retrying an isoc transfer.
1583          */
1584         ret = dwc3_prepare_trbs(dep);
1585         if (ret < 0)
1586                 return ret;
1587
1588         starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
1589
1590         /*
1591          * If there's no new TRB prepared and we don't need to restart a
1592          * transfer, there's no need to update the transfer.
1593          */
1594         if (!ret && !starting)
1595                 return ret;
1596
1597         req = next_request(&dep->started_list);
1598         if (!req) {
1599                 dep->flags |= DWC3_EP_PENDING_REQUEST;
1600                 return 0;
1601         }
1602
1603         memset(&params, 0, sizeof(params));
1604
1605         if (starting) {
1606                 params.param0 = upper_32_bits(req->trb_dma);
1607                 params.param1 = lower_32_bits(req->trb_dma);
1608                 cmd = DWC3_DEPCMD_STARTTRANSFER;
1609
1610                 if (dep->stream_capable)
1611                         cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
1612
1613                 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
1614                         cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
1615         } else {
1616                 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1617                         DWC3_DEPCMD_PARAM(dep->resource_index);
1618         }
1619
1620         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1621         if (ret < 0) {
1622                 struct dwc3_request *tmp;
1623
1624                 if (ret == -EAGAIN)
1625                         return ret;
1626
1627                 dwc3_stop_active_transfer(dep, true, true);
1628
1629                 list_for_each_entry_safe(req, tmp, &dep->started_list, list)
1630                         dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED);
1631
1632                 /* If ep isn't started, then there's no end transfer pending */
1633                 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
1634                         dwc3_gadget_ep_cleanup_cancelled_requests(dep);
1635
1636                 return ret;
1637         }
1638
1639         if (dep->stream_capable && req->request.is_last)
1640                 dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
1641
1642         return 0;
1643 }
1644
1645 static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
1646 {
1647         u32                     reg;
1648
1649         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1650         return DWC3_DSTS_SOFFN(reg);
1651 }
1652
1653 /**
1654  * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
1655  * @dep: isoc endpoint
1656  *
1657  * This function tests for the correct combination of BIT[15:14] from the 16-bit
1658  * microframe number reported by the XferNotReady event for the future frame
1659  * number to start the isoc transfer.
1660  *
1661  * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
1662  * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
1663  * XferNotReady event are invalid. The driver uses this number to schedule the
1664  * isochronous transfer and passes it to the START TRANSFER command. Because
1665  * this number is invalid, the command may fail. If BIT[15:14] matches the
1666  * internal 16-bit microframe, the START TRANSFER command will pass and the
1667  * transfer will start at the scheduled time, if it is off by 1, the command
1668  * will still pass, but the transfer will start 2 seconds in the future. For all
1669  * other conditions, the START TRANSFER command will fail with bus-expiry.
1670  *
1671  * In order to workaround this issue, we can test for the correct combination of
1672  * BIT[15:14] by sending START TRANSFER commands with different values of
1673  * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
1674  * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
1675  * As the result, within the 4 possible combinations for BIT[15:14], there will
1676  * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
1677  * command status will result in a 2-second delay start. The smaller BIT[15:14]
1678  * value is the correct combination.
1679  *
1680  * Since there are only 4 outcomes and the results are ordered, we can simply
1681  * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
1682  * deduce the smaller successful combination.
1683  *
1684  * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
1685  * of BIT[15:14]. The correct combination is as follow:
1686  *
1687  * if test0 fails and test1 passes, BIT[15:14] is 'b01
1688  * if test0 fails and test1 fails, BIT[15:14] is 'b10
1689  * if test0 passes and test1 fails, BIT[15:14] is 'b11
1690  * if test0 passes and test1 passes, BIT[15:14] is 'b00
1691  *
1692  * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
1693  * endpoints.
1694  */
1695 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
1696 {
1697         int cmd_status = 0;
1698         bool test0;
1699         bool test1;
1700
1701         while (dep->combo_num < 2) {
1702                 struct dwc3_gadget_ep_cmd_params params;
1703                 u32 test_frame_number;
1704                 u32 cmd;
1705
1706                 /*
1707                  * Check if we can start isoc transfer on the next interval or
1708                  * 4 uframes in the future with BIT[15:14] as dep->combo_num
1709                  */
1710                 test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK;
1711                 test_frame_number |= dep->combo_num << 14;
1712                 test_frame_number += max_t(u32, 4, dep->interval);
1713
1714                 params.param0 = upper_32_bits(dep->dwc->bounce_addr);
1715                 params.param1 = lower_32_bits(dep->dwc->bounce_addr);
1716
1717                 cmd = DWC3_DEPCMD_STARTTRANSFER;
1718                 cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
1719                 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1720
1721                 /* Redo if some other failure beside bus-expiry is received */
1722                 if (cmd_status && cmd_status != -EAGAIN) {
1723                         dep->start_cmd_status = 0;
1724                         dep->combo_num = 0;
1725                         return 0;
1726                 }
1727
1728                 /* Store the first test status */
1729                 if (dep->combo_num == 0)
1730                         dep->start_cmd_status = cmd_status;
1731
1732                 dep->combo_num++;
1733
1734                 /*
1735                  * End the transfer if the START_TRANSFER command is successful
1736                  * to wait for the next XferNotReady to test the command again
1737                  */
1738                 if (cmd_status == 0) {
1739                         dwc3_stop_active_transfer(dep, true, true);
1740                         return 0;
1741                 }
1742         }
1743
1744         /* test0 and test1 are both completed at this point */
1745         test0 = (dep->start_cmd_status == 0);
1746         test1 = (cmd_status == 0);
1747
1748         if (!test0 && test1)
1749                 dep->combo_num = 1;
1750         else if (!test0 && !test1)
1751                 dep->combo_num = 2;
1752         else if (test0 && !test1)
1753                 dep->combo_num = 3;
1754         else if (test0 && test1)
1755                 dep->combo_num = 0;
1756
1757         dep->frame_number &= DWC3_FRNUMBER_MASK;
1758         dep->frame_number |= dep->combo_num << 14;
1759         dep->frame_number += max_t(u32, 4, dep->interval);
1760
1761         /* Reinitialize test variables */
1762         dep->start_cmd_status = 0;
1763         dep->combo_num = 0;
1764
1765         return __dwc3_gadget_kick_transfer(dep);
1766 }
1767
1768 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
1769 {
1770         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
1771         struct dwc3 *dwc = dep->dwc;
1772         int ret;
1773         int i;
1774
1775         if (list_empty(&dep->pending_list) &&
1776             list_empty(&dep->started_list)) {
1777                 dep->flags |= DWC3_EP_PENDING_REQUEST;
1778                 return -EAGAIN;
1779         }
1780
1781         if (!dwc->dis_start_transfer_quirk &&
1782             (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
1783              DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
1784                 if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
1785                         return dwc3_gadget_start_isoc_quirk(dep);
1786         }
1787
1788         if (desc->bInterval <= 14 &&
1789             dwc->gadget->speed >= USB_SPEED_HIGH) {
1790                 u32 frame = __dwc3_gadget_get_frame(dwc);
1791                 bool rollover = frame <
1792                                 (dep->frame_number & DWC3_FRNUMBER_MASK);
1793
1794                 /*
1795                  * frame_number is set from XferNotReady and may be already
1796                  * out of date. DSTS only provides the lower 14 bit of the
1797                  * current frame number. So add the upper two bits of
1798                  * frame_number and handle a possible rollover.
1799                  * This will provide the correct frame_number unless more than
1800                  * rollover has happened since XferNotReady.
1801                  */
1802
1803                 dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) |
1804                                      frame;
1805                 if (rollover)
1806                         dep->frame_number += BIT(14);
1807         }
1808
1809         for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
1810                 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1);
1811
1812                 ret = __dwc3_gadget_kick_transfer(dep);
1813                 if (ret != -EAGAIN)
1814                         break;
1815         }
1816
1817         /*
1818          * After a number of unsuccessful start attempts due to bus-expiry
1819          * status, issue END_TRANSFER command and retry on the next XferNotReady
1820          * event.
1821          */
1822         if (ret == -EAGAIN) {
1823                 struct dwc3_gadget_ep_cmd_params params;
1824                 u32 cmd;
1825
1826                 cmd = DWC3_DEPCMD_ENDTRANSFER |
1827                         DWC3_DEPCMD_CMDIOC |
1828                         DWC3_DEPCMD_PARAM(dep->resource_index);
1829
1830                 dep->resource_index = 0;
1831                 memset(&params, 0, sizeof(params));
1832
1833                 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1834                 if (!ret)
1835                         dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
1836         }
1837
1838         return ret;
1839 }
1840
1841 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1842 {
1843         struct dwc3             *dwc = dep->dwc;
1844
1845         if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
1846                 dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
1847                                 dep->name);
1848                 return -ESHUTDOWN;
1849         }
1850
1851         if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
1852                                 &req->request, req->dep->name))
1853                 return -EINVAL;
1854
1855         if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
1856                                 "%s: request %pK already in flight\n",
1857                                 dep->name, &req->request))
1858                 return -EINVAL;
1859
1860         pm_runtime_get(dwc->dev);
1861
1862         req->request.actual     = 0;
1863         req->request.status     = -EINPROGRESS;
1864
1865         trace_dwc3_ep_queue(req);
1866
1867         list_add_tail(&req->list, &dep->pending_list);
1868         req->status = DWC3_REQUEST_STATUS_QUEUED;
1869
1870         if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
1871                 return 0;
1872
1873         /*
1874          * Start the transfer only after the END_TRANSFER is completed
1875          * and endpoint STALL is cleared.
1876          */
1877         if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
1878             (dep->flags & DWC3_EP_WEDGE) ||
1879             (dep->flags & DWC3_EP_STALL)) {
1880                 dep->flags |= DWC3_EP_DELAY_START;
1881                 return 0;
1882         }
1883
1884         /*
1885          * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1886          * wait for a XferNotReady event so we will know what's the current
1887          * (micro-)frame number.
1888          *
1889          * Without this trick, we are very, very likely gonna get Bus Expiry
1890          * errors which will force us issue EndTransfer command.
1891          */
1892         if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1893                 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
1894                                 !(dep->flags & DWC3_EP_TRANSFER_STARTED))
1895                         return 0;
1896
1897                 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
1898                         if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
1899                                 return __dwc3_gadget_start_isoc(dep);
1900                 }
1901         }
1902
1903         __dwc3_gadget_kick_transfer(dep);
1904
1905         return 0;
1906 }
1907
1908 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1909         gfp_t gfp_flags)
1910 {
1911         struct dwc3_request             *req = to_dwc3_request(request);
1912         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1913         struct dwc3                     *dwc = dep->dwc;
1914
1915         unsigned long                   flags;
1916
1917         int                             ret;
1918
1919         spin_lock_irqsave(&dwc->lock, flags);
1920         ret = __dwc3_gadget_ep_queue(dep, req);
1921         spin_unlock_irqrestore(&dwc->lock, flags);
1922
1923         return ret;
1924 }
1925
1926 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
1927 {
1928         int i;
1929
1930         /* If req->trb is not set, then the request has not started */
1931         if (!req->trb)
1932                 return;
1933
1934         /*
1935          * If request was already started, this means we had to
1936          * stop the transfer. With that we also need to ignore
1937          * all TRBs used by the request, however TRBs can only
1938          * be modified after completion of END_TRANSFER
1939          * command. So what we do here is that we wait for
1940          * END_TRANSFER completion and only after that, we jump
1941          * over TRBs by clearing HWO and incrementing dequeue
1942          * pointer.
1943          */
1944         for (i = 0; i < req->num_trbs; i++) {
1945                 struct dwc3_trb *trb;
1946
1947                 trb = &dep->trb_pool[dep->trb_dequeue];
1948                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1949                 dwc3_ep_inc_deq(dep);
1950         }
1951
1952         req->num_trbs = 0;
1953 }
1954
1955 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
1956 {
1957         struct dwc3_request             *req;
1958         struct dwc3                     *dwc = dep->dwc;
1959
1960         while (!list_empty(&dep->cancelled_list)) {
1961                 req = next_request(&dep->cancelled_list);
1962                 dwc3_gadget_ep_skip_trbs(dep, req);
1963                 switch (req->status) {
1964                 case DWC3_REQUEST_STATUS_DISCONNECTED:
1965                         dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
1966                         break;
1967                 case DWC3_REQUEST_STATUS_DEQUEUED:
1968                         dwc3_gadget_giveback(dep, req, -ECONNRESET);
1969                         break;
1970                 case DWC3_REQUEST_STATUS_STALLED:
1971                         dwc3_gadget_giveback(dep, req, -EPIPE);
1972                         break;
1973                 default:
1974                         dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status);
1975                         dwc3_gadget_giveback(dep, req, -ECONNRESET);
1976                         break;
1977                 }
1978                 /*
1979                  * The endpoint is disabled, let the dwc3_remove_requests()
1980                  * handle the cleanup.
1981                  */
1982                 if (!dep->endpoint.desc)
1983                         break;
1984         }
1985 }
1986
1987 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1988                 struct usb_request *request)
1989 {
1990         struct dwc3_request             *req = to_dwc3_request(request);
1991         struct dwc3_request             *r = NULL;
1992
1993         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1994         struct dwc3                     *dwc = dep->dwc;
1995
1996         unsigned long                   flags;
1997         int                             ret = 0;
1998
1999         trace_dwc3_ep_dequeue(req);
2000
2001         spin_lock_irqsave(&dwc->lock, flags);
2002
2003         list_for_each_entry(r, &dep->cancelled_list, list) {
2004                 if (r == req)
2005                         goto out;
2006         }
2007
2008         list_for_each_entry(r, &dep->pending_list, list) {
2009                 if (r == req) {
2010                         dwc3_gadget_giveback(dep, req, -ECONNRESET);
2011                         goto out;
2012                 }
2013         }
2014
2015         list_for_each_entry(r, &dep->started_list, list) {
2016                 if (r == req) {
2017                         struct dwc3_request *t;
2018
2019                         /* wait until it is processed */
2020                         dwc3_stop_active_transfer(dep, true, true);
2021
2022                         /*
2023                          * Remove any started request if the transfer is
2024                          * cancelled.
2025                          */
2026                         list_for_each_entry_safe(r, t, &dep->started_list, list)
2027                                 dwc3_gadget_move_cancelled_request(r,
2028                                                 DWC3_REQUEST_STATUS_DEQUEUED);
2029
2030                         dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
2031
2032                         goto out;
2033                 }
2034         }
2035
2036         dev_err(dwc->dev, "request %pK was not queued to %s\n",
2037                 request, ep->name);
2038         ret = -EINVAL;
2039 out:
2040         spin_unlock_irqrestore(&dwc->lock, flags);
2041
2042         return ret;
2043 }
2044
2045 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
2046 {
2047         struct dwc3_gadget_ep_cmd_params        params;
2048         struct dwc3                             *dwc = dep->dwc;
2049         struct dwc3_request                     *req;
2050         struct dwc3_request                     *tmp;
2051         int                                     ret;
2052
2053         if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2054                 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
2055                 return -EINVAL;
2056         }
2057
2058         memset(&params, 0x00, sizeof(params));
2059
2060         if (value) {
2061                 struct dwc3_trb *trb;
2062
2063                 unsigned int transfer_in_flight;
2064                 unsigned int started;
2065
2066                 if (dep->number > 1)
2067                         trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
2068                 else
2069                         trb = &dwc->ep0_trb[dep->trb_enqueue];
2070
2071                 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
2072                 started = !list_empty(&dep->started_list);
2073
2074                 if (!protocol && ((dep->direction && transfer_in_flight) ||
2075                                 (!dep->direction && started))) {
2076                         return -EAGAIN;
2077                 }
2078
2079                 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
2080                                 &params);
2081                 if (ret)
2082                         dev_err(dwc->dev, "failed to set STALL on %s\n",
2083                                         dep->name);
2084                 else
2085                         dep->flags |= DWC3_EP_STALL;
2086         } else {
2087                 /*
2088                  * Don't issue CLEAR_STALL command to control endpoints. The
2089                  * controller automatically clears the STALL when it receives
2090                  * the SETUP token.
2091                  */
2092                 if (dep->number <= 1) {
2093                         dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2094                         return 0;
2095                 }
2096
2097                 dwc3_stop_active_transfer(dep, true, true);
2098
2099                 list_for_each_entry_safe(req, tmp, &dep->started_list, list)
2100                         dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_STALLED);
2101
2102                 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
2103                         dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
2104                         return 0;
2105                 }
2106
2107                 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
2108
2109                 ret = dwc3_send_clear_stall_ep_cmd(dep);
2110                 if (ret) {
2111                         dev_err(dwc->dev, "failed to clear STALL on %s\n",
2112                                         dep->name);
2113                         return ret;
2114                 }
2115
2116                 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2117
2118                 if ((dep->flags & DWC3_EP_DELAY_START) &&
2119                     !usb_endpoint_xfer_isoc(dep->endpoint.desc))
2120                         __dwc3_gadget_kick_transfer(dep);
2121
2122                 dep->flags &= ~DWC3_EP_DELAY_START;
2123         }
2124
2125         return ret;
2126 }
2127
2128 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2129 {
2130         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
2131         struct dwc3                     *dwc = dep->dwc;
2132
2133         unsigned long                   flags;
2134
2135         int                             ret;
2136
2137         spin_lock_irqsave(&dwc->lock, flags);
2138         ret = __dwc3_gadget_ep_set_halt(dep, value, false);
2139         spin_unlock_irqrestore(&dwc->lock, flags);
2140
2141         return ret;
2142 }
2143
2144 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
2145 {
2146         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
2147         struct dwc3                     *dwc = dep->dwc;
2148         unsigned long                   flags;
2149         int                             ret;
2150
2151         spin_lock_irqsave(&dwc->lock, flags);
2152         dep->flags |= DWC3_EP_WEDGE;
2153
2154         if (dep->number == 0 || dep->number == 1)
2155                 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
2156         else
2157                 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
2158         spin_unlock_irqrestore(&dwc->lock, flags);
2159
2160         return ret;
2161 }
2162
2163 /* -------------------------------------------------------------------------- */
2164
2165 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
2166         .bLength        = USB_DT_ENDPOINT_SIZE,
2167         .bDescriptorType = USB_DT_ENDPOINT,
2168         .bmAttributes   = USB_ENDPOINT_XFER_CONTROL,
2169 };
2170
2171 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
2172         .enable         = dwc3_gadget_ep0_enable,
2173         .disable        = dwc3_gadget_ep0_disable,
2174         .alloc_request  = dwc3_gadget_ep_alloc_request,
2175         .free_request   = dwc3_gadget_ep_free_request,
2176         .queue          = dwc3_gadget_ep0_queue,
2177         .dequeue        = dwc3_gadget_ep_dequeue,
2178         .set_halt       = dwc3_gadget_ep0_set_halt,
2179         .set_wedge      = dwc3_gadget_ep_set_wedge,
2180 };
2181
2182 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
2183         .enable         = dwc3_gadget_ep_enable,
2184         .disable        = dwc3_gadget_ep_disable,
2185         .alloc_request  = dwc3_gadget_ep_alloc_request,
2186         .free_request   = dwc3_gadget_ep_free_request,
2187         .queue          = dwc3_gadget_ep_queue,
2188         .dequeue        = dwc3_gadget_ep_dequeue,
2189         .set_halt       = dwc3_gadget_ep_set_halt,
2190         .set_wedge      = dwc3_gadget_ep_set_wedge,
2191 };
2192
2193 /* -------------------------------------------------------------------------- */
2194
2195 static int dwc3_gadget_get_frame(struct usb_gadget *g)
2196 {
2197         struct dwc3             *dwc = gadget_to_dwc(g);
2198
2199         return __dwc3_gadget_get_frame(dwc);
2200 }
2201
2202 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
2203 {
2204         int                     retries;
2205
2206         int                     ret;
2207         u32                     reg;
2208
2209         u8                      link_state;
2210
2211         /*
2212          * According to the Databook Remote wakeup request should
2213          * be issued only when the device is in early suspend state.
2214          *
2215          * We can check that via USB Link State bits in DSTS register.
2216          */
2217         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2218
2219         link_state = DWC3_DSTS_USBLNKST(reg);
2220
2221         switch (link_state) {
2222         case DWC3_LINK_STATE_RESET:
2223         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
2224         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
2225         case DWC3_LINK_STATE_U2:        /* in HS, means Sleep (L1) */
2226         case DWC3_LINK_STATE_U1:
2227         case DWC3_LINK_STATE_RESUME:
2228                 break;
2229         default:
2230                 return -EINVAL;
2231         }
2232
2233         ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
2234         if (ret < 0) {
2235                 dev_err(dwc->dev, "failed to put link in Recovery\n");
2236                 return ret;
2237         }
2238
2239         /* Recent versions do this automatically */
2240         if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
2241                 /* write zeroes to Link Change Request */
2242                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2243                 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
2244                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2245         }
2246
2247         /* poll until Link State changes to ON */
2248         retries = 20000;
2249
2250         while (retries--) {
2251                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2252
2253                 /* in HS, means ON */
2254                 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
2255                         break;
2256         }
2257
2258         if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
2259                 dev_err(dwc->dev, "failed to send remote wakeup\n");
2260                 return -EINVAL;
2261         }
2262
2263         return 0;
2264 }
2265
2266 static int dwc3_gadget_wakeup(struct usb_gadget *g)
2267 {
2268         struct dwc3             *dwc = gadget_to_dwc(g);
2269         unsigned long           flags;
2270         int                     ret;
2271
2272         spin_lock_irqsave(&dwc->lock, flags);
2273         ret = __dwc3_gadget_wakeup(dwc);
2274         spin_unlock_irqrestore(&dwc->lock, flags);
2275
2276         return ret;
2277 }
2278
2279 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
2280                 int is_selfpowered)
2281 {
2282         struct dwc3             *dwc = gadget_to_dwc(g);
2283         unsigned long           flags;
2284
2285         spin_lock_irqsave(&dwc->lock, flags);
2286         g->is_selfpowered = !!is_selfpowered;
2287         spin_unlock_irqrestore(&dwc->lock, flags);
2288
2289         return 0;
2290 }
2291
2292 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2293 {
2294         u32 epnum;
2295
2296         for (epnum = 2; epnum < dwc->num_eps; epnum++) {
2297                 struct dwc3_ep *dep;
2298
2299                 dep = dwc->eps[epnum];
2300                 if (!dep)
2301                         continue;
2302
2303                 dwc3_remove_requests(dwc, dep);
2304         }
2305 }
2306
2307 static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
2308 {
2309         enum usb_ssp_rate       ssp_rate = dwc->gadget_ssp_rate;
2310         u32                     reg;
2311
2312         if (ssp_rate == USB_SSP_GEN_UNKNOWN)
2313                 ssp_rate = dwc->max_ssp_rate;
2314
2315         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2316         reg &= ~DWC3_DCFG_SPEED_MASK;
2317         reg &= ~DWC3_DCFG_NUMLANES(~0);
2318
2319         if (ssp_rate == USB_SSP_GEN_1x2)
2320                 reg |= DWC3_DCFG_SUPERSPEED;
2321         else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
2322                 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2323
2324         if (ssp_rate != USB_SSP_GEN_2x1 &&
2325             dwc->max_ssp_rate != USB_SSP_GEN_2x1)
2326                 reg |= DWC3_DCFG_NUMLANES(1);
2327
2328         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2329 }
2330
2331 static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
2332 {
2333         enum usb_device_speed   speed;
2334         u32                     reg;
2335
2336         speed = dwc->gadget_max_speed;
2337         if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
2338                 speed = dwc->maximum_speed;
2339
2340         if (speed == USB_SPEED_SUPER_PLUS &&
2341             DWC3_IP_IS(DWC32)) {
2342                 __dwc3_gadget_set_ssp_rate(dwc);
2343                 return;
2344         }
2345
2346         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2347         reg &= ~(DWC3_DCFG_SPEED_MASK);
2348
2349         /*
2350          * WORKAROUND: DWC3 revision < 2.20a have an issue
2351          * which would cause metastability state on Run/Stop
2352          * bit if we try to force the IP to USB2-only mode.
2353          *
2354          * Because of that, we cannot configure the IP to any
2355          * speed other than the SuperSpeed
2356          *
2357          * Refers to:
2358          *
2359          * STAR#9000525659: Clock Domain Crossing on DCTL in
2360          * USB 2.0 Mode
2361          */
2362         if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
2363             !dwc->dis_metastability_quirk) {
2364                 reg |= DWC3_DCFG_SUPERSPEED;
2365         } else {
2366                 switch (speed) {
2367                 case USB_SPEED_FULL:
2368                         reg |= DWC3_DCFG_FULLSPEED;
2369                         break;
2370                 case USB_SPEED_HIGH:
2371                         reg |= DWC3_DCFG_HIGHSPEED;
2372                         break;
2373                 case USB_SPEED_SUPER:
2374                         reg |= DWC3_DCFG_SUPERSPEED;
2375                         break;
2376                 case USB_SPEED_SUPER_PLUS:
2377                         if (DWC3_IP_IS(DWC3))
2378                                 reg |= DWC3_DCFG_SUPERSPEED;
2379                         else
2380                                 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2381                         break;
2382                 default:
2383                         dev_err(dwc->dev, "invalid speed (%d)\n", speed);
2384
2385                         if (DWC3_IP_IS(DWC3))
2386                                 reg |= DWC3_DCFG_SUPERSPEED;
2387                         else
2388                                 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2389                 }
2390         }
2391
2392         if (DWC3_IP_IS(DWC32) &&
2393             speed > USB_SPEED_UNKNOWN &&
2394             speed < USB_SPEED_SUPER_PLUS)
2395                 reg &= ~DWC3_DCFG_NUMLANES(~0);
2396
2397         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2398 }
2399
2400 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
2401 {
2402         u32                     reg;
2403         u32                     timeout = 500;
2404
2405         if (pm_runtime_suspended(dwc->dev))
2406                 return 0;
2407
2408         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2409         if (is_on) {
2410                 if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
2411                         reg &= ~DWC3_DCTL_TRGTULST_MASK;
2412                         reg |= DWC3_DCTL_TRGTULST_RX_DET;
2413                 }
2414
2415                 if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
2416                         reg &= ~DWC3_DCTL_KEEP_CONNECT;
2417                 reg |= DWC3_DCTL_RUN_STOP;
2418
2419                 if (dwc->has_hibernation)
2420                         reg |= DWC3_DCTL_KEEP_CONNECT;
2421
2422                 __dwc3_gadget_set_speed(dwc);
2423                 dwc->pullups_connected = true;
2424         } else {
2425                 reg &= ~DWC3_DCTL_RUN_STOP;
2426
2427                 if (dwc->has_hibernation && !suspend)
2428                         reg &= ~DWC3_DCTL_KEEP_CONNECT;
2429
2430                 dwc->pullups_connected = false;
2431         }
2432
2433         dwc3_gadget_dctl_write_safe(dwc, reg);
2434
2435         do {
2436                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2437                 reg &= DWC3_DSTS_DEVCTRLHLT;
2438         } while (--timeout && !(!is_on ^ !reg));
2439
2440         if (!timeout)
2441                 return -ETIMEDOUT;
2442
2443         return 0;
2444 }
2445
2446 static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
2447 static void __dwc3_gadget_stop(struct dwc3 *dwc);
2448 static int __dwc3_gadget_start(struct dwc3 *dwc);
2449
2450 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
2451 {
2452         struct dwc3             *dwc = gadget_to_dwc(g);
2453         unsigned long           flags;
2454         int                     ret;
2455
2456         is_on = !!is_on;
2457
2458         /*
2459          * Per databook, when we want to stop the gadget, if a control transfer
2460          * is still in process, complete it and get the core into setup phase.
2461          */
2462         if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
2463                 reinit_completion(&dwc->ep0_in_setup);
2464
2465                 ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
2466                                 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
2467                 if (ret == 0)
2468                         dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
2469         }
2470
2471         /*
2472          * Avoid issuing a runtime resume if the device is already in the
2473          * suspended state during gadget disconnect.  DWC3 gadget was already
2474          * halted/stopped during runtime suspend.
2475          */
2476         if (!is_on) {
2477                 pm_runtime_barrier(dwc->dev);
2478                 if (pm_runtime_suspended(dwc->dev))
2479                         return 0;
2480         }
2481
2482         /*
2483          * Check the return value for successful resume, or error.  For a
2484          * successful resume, the DWC3 runtime PM resume routine will handle
2485          * the run stop sequence, so avoid duplicate operations here.
2486          */
2487         ret = pm_runtime_get_sync(dwc->dev);
2488         if (!ret || ret < 0) {
2489                 pm_runtime_put(dwc->dev);
2490                 return 0;
2491         }
2492
2493         /*
2494          * Synchronize and disable any further event handling while controller
2495          * is being enabled/disabled.
2496          */
2497         disable_irq(dwc->irq_gadget);
2498
2499         spin_lock_irqsave(&dwc->lock, flags);
2500
2501         if (!is_on) {
2502                 u32 count;
2503
2504                 dwc->connected = false;
2505                 /*
2506                  * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
2507                  * Section 4.1.8 Table 4-7, it states that for a device-initiated
2508                  * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
2509                  * command for any active transfers" before clearing the RunStop
2510                  * bit.
2511                  */
2512                 dwc3_stop_active_transfers(dwc);
2513                 __dwc3_gadget_stop(dwc);
2514
2515                 /*
2516                  * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
2517                  * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
2518                  * "software needs to acknowledge the events that are generated
2519                  * (by writing to GEVNTCOUNTn) while it is waiting for this bit
2520                  * to be set to '1'."
2521                  */
2522                 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2523                 count &= DWC3_GEVNTCOUNT_MASK;
2524                 if (count > 0) {
2525                         dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
2526                         dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
2527                                                 dwc->ev_buf->length;
2528                 }
2529         } else {
2530                 __dwc3_gadget_start(dwc);
2531         }
2532
2533         ret = dwc3_gadget_run_stop(dwc, is_on, false);
2534         spin_unlock_irqrestore(&dwc->lock, flags);
2535         enable_irq(dwc->irq_gadget);
2536
2537         pm_runtime_put(dwc->dev);
2538
2539         return ret;
2540 }
2541
2542 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
2543 {
2544         u32                     reg;
2545
2546         /* Enable all but Start and End of Frame IRQs */
2547         reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
2548                         DWC3_DEVTEN_CMDCMPLTEN |
2549                         DWC3_DEVTEN_ERRTICERREN |
2550                         DWC3_DEVTEN_WKUPEVTEN |
2551                         DWC3_DEVTEN_CONNECTDONEEN |
2552                         DWC3_DEVTEN_USBRSTEN |
2553                         DWC3_DEVTEN_DISCONNEVTEN);
2554
2555         if (DWC3_VER_IS_PRIOR(DWC3, 250A))
2556                 reg |= DWC3_DEVTEN_ULSTCNGEN;
2557
2558         /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
2559         if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
2560                 reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
2561
2562         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2563 }
2564
2565 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
2566 {
2567         /* mask all interrupts */
2568         dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2569 }
2570
2571 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
2572 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
2573
2574 /**
2575  * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
2576  * @dwc: pointer to our context structure
2577  *
2578  * The following looks like complex but it's actually very simple. In order to
2579  * calculate the number of packets we can burst at once on OUT transfers, we're
2580  * gonna use RxFIFO size.
2581  *
2582  * To calculate RxFIFO size we need two numbers:
2583  * MDWIDTH = size, in bits, of the internal memory bus
2584  * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
2585  *
2586  * Given these two numbers, the formula is simple:
2587  *
2588  * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
2589  *
2590  * 24 bytes is for 3x SETUP packets
2591  * 16 bytes is a clock domain crossing tolerance
2592  *
2593  * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
2594  */
2595 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
2596 {
2597         u32 ram2_depth;
2598         u32 mdwidth;
2599         u32 nump;
2600         u32 reg;
2601
2602         ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
2603         mdwidth = dwc3_mdwidth(dwc);
2604
2605         nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
2606         nump = min_t(u32, nump, 16);
2607
2608         /* update NumP */
2609         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2610         reg &= ~DWC3_DCFG_NUMP_MASK;
2611         reg |= nump << DWC3_DCFG_NUMP_SHIFT;
2612         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2613 }
2614
2615 static int __dwc3_gadget_start(struct dwc3 *dwc)
2616 {
2617         struct dwc3_ep          *dep;
2618         int                     ret = 0;
2619         u32                     reg;
2620
2621         /*
2622          * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
2623          * the core supports IMOD, disable it.
2624          */
2625         if (dwc->imod_interval) {
2626                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
2627                 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
2628         } else if (dwc3_has_imod(dwc)) {
2629                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
2630         }
2631
2632         /*
2633          * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
2634          * field instead of letting dwc3 itself calculate that automatically.
2635          *
2636          * This way, we maximize the chances that we'll be able to get several
2637          * bursts of data without going through any sort of endpoint throttling.
2638          */
2639         reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
2640         if (DWC3_IP_IS(DWC3))
2641                 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
2642         else
2643                 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
2644
2645         dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
2646
2647         dwc3_gadget_setup_nump(dwc);
2648
2649         /*
2650          * Currently the controller handles single stream only. So, Ignore
2651          * Packet Pending bit for stream selection and don't search for another
2652          * stream if the host sends Data Packet with PP=0 (for OUT direction) or
2653          * ACK with NumP=0 and PP=0 (for IN direction). This slightly improves
2654          * the stream performance.
2655          */
2656         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2657         reg |= DWC3_DCFG_IGNSTRMPP;
2658         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2659
2660         /* Start with SuperSpeed Default */
2661         dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2662
2663         dep = dwc->eps[0];
2664         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
2665         if (ret) {
2666                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2667                 goto err0;
2668         }
2669
2670         dep = dwc->eps[1];
2671         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
2672         if (ret) {
2673                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2674                 goto err1;
2675         }
2676
2677         /* begin to receive SETUP packets */
2678         dwc->ep0state = EP0_SETUP_PHASE;
2679         dwc->link_state = DWC3_LINK_STATE_SS_DIS;
2680         dwc->delayed_status = false;
2681         dwc3_ep0_out_start(dwc);
2682
2683         dwc3_gadget_enable_irq(dwc);
2684
2685         return 0;
2686
2687 err1:
2688         __dwc3_gadget_ep_disable(dwc->eps[0]);
2689
2690 err0:
2691         return ret;
2692 }
2693
2694 static int dwc3_gadget_start(struct usb_gadget *g,
2695                 struct usb_gadget_driver *driver)
2696 {
2697         struct dwc3             *dwc = gadget_to_dwc(g);
2698         unsigned long           flags;
2699         int                     ret;
2700         int                     irq;
2701
2702         irq = dwc->irq_gadget;
2703         ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
2704                         IRQF_SHARED, "dwc3", dwc->ev_buf);
2705         if (ret) {
2706                 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2707                                 irq, ret);
2708                 return ret;
2709         }
2710
2711         spin_lock_irqsave(&dwc->lock, flags);
2712         dwc->gadget_driver      = driver;
2713         spin_unlock_irqrestore(&dwc->lock, flags);
2714
2715         return 0;
2716 }
2717
2718 static void __dwc3_gadget_stop(struct dwc3 *dwc)
2719 {
2720         dwc3_gadget_disable_irq(dwc);
2721         __dwc3_gadget_ep_disable(dwc->eps[0]);
2722         __dwc3_gadget_ep_disable(dwc->eps[1]);
2723 }
2724
2725 static int dwc3_gadget_stop(struct usb_gadget *g)
2726 {
2727         struct dwc3             *dwc = gadget_to_dwc(g);
2728         unsigned long           flags;
2729
2730         spin_lock_irqsave(&dwc->lock, flags);
2731         dwc->gadget_driver      = NULL;
2732         dwc->max_cfg_eps = 0;
2733         spin_unlock_irqrestore(&dwc->lock, flags);
2734
2735         free_irq(dwc->irq_gadget, dwc->ev_buf);
2736
2737         return 0;
2738 }
2739
2740 static void dwc3_gadget_config_params(struct usb_gadget *g,
2741                                       struct usb_dcd_config_params *params)
2742 {
2743         struct dwc3             *dwc = gadget_to_dwc(g);
2744
2745         params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
2746         params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
2747
2748         /* Recommended BESL */
2749         if (!dwc->dis_enblslpm_quirk) {
2750                 /*
2751                  * If the recommended BESL baseline is 0 or if the BESL deep is
2752                  * less than 2, Microsoft's Windows 10 host usb stack will issue
2753                  * a usb reset immediately after it receives the extended BOS
2754                  * descriptor and the enumeration will fail. To maintain
2755                  * compatibility with the Windows' usb stack, let's set the
2756                  * recommended BESL baseline to 1 and clamp the BESL deep to be
2757                  * within 2 to 15.
2758                  */
2759                 params->besl_baseline = 1;
2760                 if (dwc->is_utmi_l1_suspend)
2761                         params->besl_deep =
2762                                 clamp_t(u8, dwc->hird_threshold, 2, 15);
2763         }
2764
2765         /* U1 Device exit Latency */
2766         if (dwc->dis_u1_entry_quirk)
2767                 params->bU1devExitLat = 0;
2768         else
2769                 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
2770
2771         /* U2 Device exit Latency */
2772         if (dwc->dis_u2_entry_quirk)
2773                 params->bU2DevExitLat = 0;
2774         else
2775                 params->bU2DevExitLat =
2776                                 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
2777 }
2778
2779 static void dwc3_gadget_set_speed(struct usb_gadget *g,
2780                                   enum usb_device_speed speed)
2781 {
2782         struct dwc3             *dwc = gadget_to_dwc(g);
2783         unsigned long           flags;
2784
2785         spin_lock_irqsave(&dwc->lock, flags);
2786         dwc->gadget_max_speed = speed;
2787         spin_unlock_irqrestore(&dwc->lock, flags);
2788 }
2789
2790 static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
2791                                      enum usb_ssp_rate rate)
2792 {
2793         struct dwc3             *dwc = gadget_to_dwc(g);
2794         unsigned long           flags;
2795
2796         spin_lock_irqsave(&dwc->lock, flags);
2797         dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
2798         dwc->gadget_ssp_rate = rate;
2799         spin_unlock_irqrestore(&dwc->lock, flags);
2800 }
2801
2802 static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
2803 {
2804         struct dwc3             *dwc = gadget_to_dwc(g);
2805         union power_supply_propval      val = {0};
2806         int                             ret;
2807
2808         if (dwc->usb2_phy)
2809                 return usb_phy_set_power(dwc->usb2_phy, mA);
2810
2811         if (!dwc->usb_psy)
2812                 return -EOPNOTSUPP;
2813
2814         val.intval = 1000 * mA;
2815         ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
2816
2817         return ret;
2818 }
2819
2820 /**
2821  * dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
2822  * @g: pointer to the USB gadget
2823  *
2824  * Used to record the maximum number of endpoints being used in a USB composite
2825  * device. (across all configurations)  This is to be used in the calculation
2826  * of the TXFIFO sizes when resizing internal memory for individual endpoints.
2827  * It will help ensured that the resizing logic reserves enough space for at
2828  * least one max packet.
2829  */
2830 static int dwc3_gadget_check_config(struct usb_gadget *g)
2831 {
2832         struct dwc3 *dwc = gadget_to_dwc(g);
2833         struct usb_ep *ep;
2834         int fifo_size = 0;
2835         int ram1_depth;
2836         int ep_num = 0;
2837
2838         if (!dwc->do_fifo_resize)
2839                 return 0;
2840
2841         list_for_each_entry(ep, &g->ep_list, ep_list) {
2842                 /* Only interested in the IN endpoints */
2843                 if (ep->claimed && (ep->address & USB_DIR_IN))
2844                         ep_num++;
2845         }
2846
2847         if (ep_num <= dwc->max_cfg_eps)
2848                 return 0;
2849
2850         /* Update the max number of eps in the composition */
2851         dwc->max_cfg_eps = ep_num;
2852
2853         fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
2854         /* Based on the equation, increment by one for every ep */
2855         fifo_size += dwc->max_cfg_eps;
2856
2857         /* Check if we can fit a single fifo per endpoint */
2858         ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
2859         if (fifo_size > ram1_depth)
2860                 return -ENOMEM;
2861
2862         return 0;
2863 }
2864
2865 static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
2866 {
2867         struct dwc3             *dwc = gadget_to_dwc(g);
2868         unsigned long           flags;
2869
2870         spin_lock_irqsave(&dwc->lock, flags);
2871         dwc->async_callbacks = enable;
2872         spin_unlock_irqrestore(&dwc->lock, flags);
2873 }
2874
2875 static const struct usb_gadget_ops dwc3_gadget_ops = {
2876         .get_frame              = dwc3_gadget_get_frame,
2877         .wakeup                 = dwc3_gadget_wakeup,
2878         .set_selfpowered        = dwc3_gadget_set_selfpowered,
2879         .pullup                 = dwc3_gadget_pullup,
2880         .udc_start              = dwc3_gadget_start,
2881         .udc_stop               = dwc3_gadget_stop,
2882         .udc_set_speed          = dwc3_gadget_set_speed,
2883         .udc_set_ssp_rate       = dwc3_gadget_set_ssp_rate,
2884         .get_config_params      = dwc3_gadget_config_params,
2885         .vbus_draw              = dwc3_gadget_vbus_draw,
2886         .check_config           = dwc3_gadget_check_config,
2887         .udc_async_callbacks    = dwc3_gadget_async_callbacks,
2888 };
2889
2890 /* -------------------------------------------------------------------------- */
2891
2892 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
2893 {
2894         struct dwc3 *dwc = dep->dwc;
2895
2896         usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
2897         dep->endpoint.maxburst = 1;
2898         dep->endpoint.ops = &dwc3_gadget_ep0_ops;
2899         if (!dep->direction)
2900                 dwc->gadget->ep0 = &dep->endpoint;
2901
2902         dep->endpoint.caps.type_control = true;
2903
2904         return 0;
2905 }
2906
2907 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
2908 {
2909         struct dwc3 *dwc = dep->dwc;
2910         u32 mdwidth;
2911         int size;
2912
2913         mdwidth = dwc3_mdwidth(dwc);
2914
2915         /* MDWIDTH is represented in bits, we need it in bytes */
2916         mdwidth /= 8;
2917
2918         size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
2919         if (DWC3_IP_IS(DWC3))
2920                 size = DWC3_GTXFIFOSIZ_TXFDEP(size);
2921         else
2922                 size = DWC31_GTXFIFOSIZ_TXFDEP(size);
2923
2924         /* FIFO Depth is in MDWDITH bytes. Multiply */
2925         size *= mdwidth;
2926
2927         /*
2928          * To meet performance requirement, a minimum TxFIFO size of 3x
2929          * MaxPacketSize is recommended for endpoints that support burst and a
2930          * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
2931          * support burst. Use those numbers and we can calculate the max packet
2932          * limit as below.
2933          */
2934         if (dwc->maximum_speed >= USB_SPEED_SUPER)
2935                 size /= 3;
2936         else
2937                 size /= 2;
2938
2939         usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2940
2941         dep->endpoint.max_streams = 16;
2942         dep->endpoint.ops = &dwc3_gadget_ep_ops;
2943         list_add_tail(&dep->endpoint.ep_list,
2944                         &dwc->gadget->ep_list);
2945         dep->endpoint.caps.type_iso = true;
2946         dep->endpoint.caps.type_bulk = true;
2947         dep->endpoint.caps.type_int = true;
2948
2949         return dwc3_alloc_trb_pool(dep);
2950 }
2951
2952 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
2953 {
2954         struct dwc3 *dwc = dep->dwc;
2955         u32 mdwidth;
2956         int size;
2957
2958         mdwidth = dwc3_mdwidth(dwc);
2959
2960         /* MDWIDTH is represented in bits, convert to bytes */
2961         mdwidth /= 8;
2962
2963         /* All OUT endpoints share a single RxFIFO space */
2964         size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
2965         if (DWC3_IP_IS(DWC3))
2966                 size = DWC3_GRXFIFOSIZ_RXFDEP(size);
2967         else
2968                 size = DWC31_GRXFIFOSIZ_RXFDEP(size);
2969
2970         /* FIFO depth is in MDWDITH bytes */
2971         size *= mdwidth;
2972
2973         /*
2974          * To meet performance requirement, a minimum recommended RxFIFO size
2975          * is defined as follow:
2976          * RxFIFO size >= (3 x MaxPacketSize) +
2977          * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
2978          *
2979          * Then calculate the max packet limit as below.
2980          */
2981         size -= (3 * 8) + 16;
2982         if (size < 0)
2983                 size = 0;
2984         else
2985                 size /= 3;
2986
2987         usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2988         dep->endpoint.max_streams = 16;
2989         dep->endpoint.ops = &dwc3_gadget_ep_ops;
2990         list_add_tail(&dep->endpoint.ep_list,
2991                         &dwc->gadget->ep_list);
2992         dep->endpoint.caps.type_iso = true;
2993         dep->endpoint.caps.type_bulk = true;
2994         dep->endpoint.caps.type_int = true;
2995
2996         return dwc3_alloc_trb_pool(dep);
2997 }
2998
2999 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
3000 {
3001         struct dwc3_ep                  *dep;
3002         bool                            direction = epnum & 1;
3003         int                             ret;
3004         u8                              num = epnum >> 1;
3005
3006         dep = kzalloc(sizeof(*dep), GFP_KERNEL);
3007         if (!dep)
3008                 return -ENOMEM;
3009
3010         dep->dwc = dwc;
3011         dep->number = epnum;
3012         dep->direction = direction;
3013         dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
3014         dwc->eps[epnum] = dep;
3015         dep->combo_num = 0;
3016         dep->start_cmd_status = 0;
3017
3018         snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
3019                         direction ? "in" : "out");
3020
3021         dep->endpoint.name = dep->name;
3022
3023         if (!(dep->number > 1)) {
3024                 dep->endpoint.desc = &dwc3_gadget_ep0_desc;
3025                 dep->endpoint.comp_desc = NULL;
3026         }
3027
3028         if (num == 0)
3029                 ret = dwc3_gadget_init_control_endpoint(dep);
3030         else if (direction)
3031                 ret = dwc3_gadget_init_in_endpoint(dep);
3032         else
3033                 ret = dwc3_gadget_init_out_endpoint(dep);
3034
3035         if (ret)
3036                 return ret;
3037
3038         dep->endpoint.caps.dir_in = direction;
3039         dep->endpoint.caps.dir_out = !direction;
3040
3041         INIT_LIST_HEAD(&dep->pending_list);
3042         INIT_LIST_HEAD(&dep->started_list);
3043         INIT_LIST_HEAD(&dep->cancelled_list);
3044
3045         dwc3_debugfs_create_endpoint_dir(dep);
3046
3047         return 0;
3048 }
3049
3050 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
3051 {
3052         u8                              epnum;
3053
3054         INIT_LIST_HEAD(&dwc->gadget->ep_list);
3055
3056         for (epnum = 0; epnum < total; epnum++) {
3057                 int                     ret;
3058
3059                 ret = dwc3_gadget_init_endpoint(dwc, epnum);
3060                 if (ret)
3061                         return ret;
3062         }
3063
3064         return 0;
3065 }
3066
3067 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
3068 {
3069         struct dwc3_ep                  *dep;
3070         u8                              epnum;
3071
3072         for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
3073                 dep = dwc->eps[epnum];
3074                 if (!dep)
3075                         continue;
3076                 /*
3077                  * Physical endpoints 0 and 1 are special; they form the
3078                  * bi-directional USB endpoint 0.
3079                  *
3080                  * For those two physical endpoints, we don't allocate a TRB
3081                  * pool nor do we add them the endpoints list. Due to that, we
3082                  * shouldn't do these two operations otherwise we would end up
3083                  * with all sorts of bugs when removing dwc3.ko.
3084                  */
3085                 if (epnum != 0 && epnum != 1) {
3086                         dwc3_free_trb_pool(dep);
3087                         list_del(&dep->endpoint.ep_list);
3088                 }
3089
3090                 debugfs_remove_recursive(debugfs_lookup(dep->name,
3091                                 debugfs_lookup(dev_name(dep->dwc->dev),
3092                                                usb_debug_root)));
3093                 kfree(dep);
3094         }
3095 }
3096
3097 /* -------------------------------------------------------------------------- */
3098
3099 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
3100                 struct dwc3_request *req, struct dwc3_trb *trb,
3101                 const struct dwc3_event_depevt *event, int status, int chain)
3102 {
3103         unsigned int            count;
3104
3105         dwc3_ep_inc_deq(dep);
3106
3107         trace_dwc3_complete_trb(dep, trb);
3108         req->num_trbs--;
3109
3110         /*
3111          * If we're in the middle of series of chained TRBs and we
3112          * receive a short transfer along the way, DWC3 will skip
3113          * through all TRBs including the last TRB in the chain (the
3114          * where CHN bit is zero. DWC3 will also avoid clearing HWO
3115          * bit and SW has to do it manually.
3116          *
3117          * We're going to do that here to avoid problems of HW trying
3118          * to use bogus TRBs for transfers.
3119          */
3120         if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
3121                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
3122
3123         /*
3124          * For isochronous transfers, the first TRB in a service interval must
3125          * have the Isoc-First type. Track and report its interval frame number.
3126          */
3127         if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3128             (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
3129                 unsigned int frame_number;
3130
3131                 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
3132                 frame_number &= ~(dep->interval - 1);
3133                 req->request.frame_number = frame_number;
3134         }
3135
3136         /*
3137          * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
3138          * this TRB points to the bounce buffer address, it's a MPS alignment
3139          * TRB. Don't add it to req->remaining calculation.
3140          */
3141         if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
3142             trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
3143                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
3144                 return 1;
3145         }
3146
3147         count = trb->size & DWC3_TRB_SIZE_MASK;
3148         req->remaining += count;
3149
3150         if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
3151                 return 1;
3152
3153         if (event->status & DEPEVT_STATUS_SHORT && !chain)
3154                 return 1;
3155
3156         if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
3157             (trb->ctrl & DWC3_TRB_CTRL_LST))
3158                 return 1;
3159
3160         return 0;
3161 }
3162
3163 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
3164                 struct dwc3_request *req, const struct dwc3_event_depevt *event,
3165                 int status)
3166 {
3167         struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
3168         struct scatterlist *sg = req->sg;
3169         struct scatterlist *s;
3170         unsigned int num_queued = req->num_queued_sgs;
3171         unsigned int i;
3172         int ret = 0;
3173
3174         for_each_sg(sg, s, num_queued, i) {
3175                 trb = &dep->trb_pool[dep->trb_dequeue];
3176
3177                 req->sg = sg_next(s);
3178                 req->num_queued_sgs--;
3179
3180                 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
3181                                 trb, event, status, true);
3182                 if (ret)
3183                         break;
3184         }
3185
3186         return ret;
3187 }
3188
3189 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
3190                 struct dwc3_request *req, const struct dwc3_event_depevt *event,
3191                 int status)
3192 {
3193         struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
3194
3195         return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
3196                         event, status, false);
3197 }
3198
3199 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
3200 {
3201         return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
3202 }
3203
3204 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
3205                 const struct dwc3_event_depevt *event,
3206                 struct dwc3_request *req, int status)
3207 {
3208         int request_status;
3209         int ret;
3210
3211         if (req->request.num_mapped_sgs)
3212                 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
3213                                 status);
3214         else
3215                 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
3216                                 status);
3217
3218         req->request.actual = req->request.length - req->remaining;
3219
3220         if (!dwc3_gadget_ep_request_completed(req))
3221                 goto out;
3222
3223         if (req->needs_extra_trb) {
3224                 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
3225                                 status);
3226                 req->needs_extra_trb = false;
3227         }
3228
3229         /*
3230          * The event status only reflects the status of the TRB with IOC set.
3231          * For the requests that don't set interrupt on completion, the driver
3232          * needs to check and return the status of the completed TRBs associated
3233          * with the request. Use the status of the last TRB of the request.
3234          */
3235         if (req->request.no_interrupt) {
3236                 struct dwc3_trb *trb;
3237
3238                 trb = dwc3_ep_prev_trb(dep, dep->trb_dequeue);
3239                 switch (DWC3_TRB_SIZE_TRBSTS(trb->size)) {
3240                 case DWC3_TRBSTS_MISSED_ISOC:
3241                         /* Isoc endpoint only */
3242                         request_status = -EXDEV;
3243                         break;
3244                 case DWC3_TRB_STS_XFER_IN_PROG:
3245                         /* Applicable when End Transfer with ForceRM=0 */
3246                 case DWC3_TRBSTS_SETUP_PENDING:
3247                         /* Control endpoint only */
3248                 case DWC3_TRBSTS_OK:
3249                 default:
3250                         request_status = 0;
3251                         break;
3252                 }
3253         } else {
3254                 request_status = status;
3255         }
3256
3257         dwc3_gadget_giveback(dep, req, request_status);
3258
3259 out:
3260         return ret;
3261 }
3262
3263 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
3264                 const struct dwc3_event_depevt *event, int status)
3265 {
3266         struct dwc3_request     *req;
3267
3268         while (!list_empty(&dep->started_list)) {
3269                 int ret;
3270
3271                 req = next_request(&dep->started_list);
3272                 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
3273                                 req, status);
3274                 if (ret)
3275                         break;
3276                 /*
3277                  * The endpoint is disabled, let the dwc3_remove_requests()
3278                  * handle the cleanup.
3279                  */
3280                 if (!dep->endpoint.desc)
3281                         break;
3282         }
3283 }
3284
3285 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
3286 {
3287         struct dwc3_request     *req;
3288         struct dwc3             *dwc = dep->dwc;
3289
3290         if (!dep->endpoint.desc || !dwc->pullups_connected ||
3291             !dwc->connected)
3292                 return false;
3293
3294         if (!list_empty(&dep->pending_list))
3295                 return true;
3296
3297         /*
3298          * We only need to check the first entry of the started list. We can
3299          * assume the completed requests are removed from the started list.
3300          */
3301         req = next_request(&dep->started_list);
3302         if (!req)
3303                 return false;
3304
3305         return !dwc3_gadget_ep_request_completed(req);
3306 }
3307
3308 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
3309                 const struct dwc3_event_depevt *event)
3310 {
3311         dep->frame_number = event->parameters;
3312 }
3313
3314 static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
3315                 const struct dwc3_event_depevt *event, int status)
3316 {
3317         struct dwc3             *dwc = dep->dwc;
3318         bool                    no_started_trb = true;
3319
3320         dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
3321
3322         if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
3323                 goto out;
3324
3325         if (!dep->endpoint.desc)
3326                 return no_started_trb;
3327
3328         if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3329                 list_empty(&dep->started_list) &&
3330                 (list_empty(&dep->pending_list) || status == -EXDEV))
3331                 dwc3_stop_active_transfer(dep, true, true);
3332         else if (dwc3_gadget_ep_should_continue(dep))
3333                 if (__dwc3_gadget_kick_transfer(dep) == 0)
3334                         no_started_trb = false;
3335
3336 out:
3337         /*
3338          * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
3339          * See dwc3_gadget_linksts_change_interrupt() for 1st half.
3340          */
3341         if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
3342                 u32             reg;
3343                 int             i;
3344
3345                 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
3346                         dep = dwc->eps[i];
3347
3348                         if (!(dep->flags & DWC3_EP_ENABLED))
3349                                 continue;
3350
3351                         if (!list_empty(&dep->started_list))
3352                                 return no_started_trb;
3353                 }
3354
3355                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3356                 reg |= dwc->u1u2;
3357                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3358
3359                 dwc->u1u2 = 0;
3360         }
3361
3362         return no_started_trb;
3363 }
3364
3365 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
3366                 const struct dwc3_event_depevt *event)
3367 {
3368         int status = 0;
3369
3370         if (!dep->endpoint.desc)
3371                 return;
3372
3373         if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
3374                 dwc3_gadget_endpoint_frame_from_event(dep, event);
3375
3376         if (event->status & DEPEVT_STATUS_BUSERR)
3377                 status = -ECONNRESET;
3378
3379         if (event->status & DEPEVT_STATUS_MISSED_ISOC)
3380                 status = -EXDEV;
3381
3382         dwc3_gadget_endpoint_trbs_complete(dep, event, status);
3383 }
3384
3385 static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
3386                 const struct dwc3_event_depevt *event)
3387 {
3388         int status = 0;
3389
3390         dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3391
3392         if (event->status & DEPEVT_STATUS_BUSERR)
3393                 status = -ECONNRESET;
3394
3395         if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
3396                 dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
3397 }
3398
3399 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
3400                 const struct dwc3_event_depevt *event)
3401 {
3402         dwc3_gadget_endpoint_frame_from_event(dep, event);
3403
3404         /*
3405          * The XferNotReady event is generated only once before the endpoint
3406          * starts. It will be generated again when END_TRANSFER command is
3407          * issued. For some controller versions, the XferNotReady event may be
3408          * generated while the END_TRANSFER command is still in process. Ignore
3409          * it and wait for the next XferNotReady event after the command is
3410          * completed.
3411          */
3412         if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
3413                 return;
3414
3415         (void) __dwc3_gadget_start_isoc(dep);
3416 }
3417
3418 static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
3419                 const struct dwc3_event_depevt *event)
3420 {
3421         u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
3422
3423         if (cmd != DWC3_DEPCMD_ENDTRANSFER)
3424                 return;
3425
3426         /*
3427          * The END_TRANSFER command will cause the controller to generate a
3428          * NoStream Event, and it's not due to the host DP NoStream rejection.
3429          * Ignore the next NoStream event.
3430          */
3431         if (dep->stream_capable)
3432                 dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
3433
3434         dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
3435         dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3436         dwc3_gadget_ep_cleanup_cancelled_requests(dep);
3437
3438         if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
3439                 struct dwc3 *dwc = dep->dwc;
3440
3441                 dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
3442                 if (dwc3_send_clear_stall_ep_cmd(dep)) {
3443                         struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
3444
3445                         dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
3446                         if (dwc->delayed_status)
3447                                 __dwc3_gadget_ep0_set_halt(ep0, 1);
3448                         return;
3449                 }
3450
3451                 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
3452                 if (dwc->delayed_status)
3453                         dwc3_ep0_send_delayed_status(dwc);
3454         }
3455
3456         if ((dep->flags & DWC3_EP_DELAY_START) &&
3457             !usb_endpoint_xfer_isoc(dep->endpoint.desc))
3458                 __dwc3_gadget_kick_transfer(dep);
3459
3460         dep->flags &= ~DWC3_EP_DELAY_START;
3461 }
3462
3463 static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
3464                 const struct dwc3_event_depevt *event)
3465 {
3466         struct dwc3 *dwc = dep->dwc;
3467
3468         if (event->status == DEPEVT_STREAMEVT_FOUND) {
3469                 dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
3470                 goto out;
3471         }
3472
3473         /* Note: NoStream rejection event param value is 0 and not 0xFFFF */
3474         switch (event->parameters) {
3475         case DEPEVT_STREAM_PRIME:
3476                 /*
3477                  * If the host can properly transition the endpoint state from
3478                  * idle to prime after a NoStream rejection, there's no need to
3479                  * force restarting the endpoint to reinitiate the stream. To
3480                  * simplify the check, assume the host follows the USB spec if
3481                  * it primed the endpoint more than once.
3482                  */
3483                 if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
3484                         if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
3485                                 dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
3486                         else
3487                                 dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
3488                 }
3489
3490                 break;
3491         case DEPEVT_STREAM_NOSTREAM:
3492                 if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
3493                     !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
3494                     !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
3495                         break;
3496
3497                 /*
3498                  * If the host rejects a stream due to no active stream, by the
3499                  * USB and xHCI spec, the endpoint will be put back to idle
3500                  * state. When the host is ready (buffer added/updated), it will
3501                  * prime the endpoint to inform the usb device controller. This
3502                  * triggers the device controller to issue ERDY to restart the
3503                  * stream. However, some hosts don't follow this and keep the
3504                  * endpoint in the idle state. No prime will come despite host
3505                  * streams are updated, and the device controller will not be
3506                  * triggered to generate ERDY to move the next stream data. To
3507                  * workaround this and maintain compatibility with various
3508                  * hosts, force to reinitate the stream until the host is ready
3509                  * instead of waiting for the host to prime the endpoint.
3510                  */
3511                 if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
3512                         unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
3513
3514                         dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
3515                 } else {
3516                         dep->flags |= DWC3_EP_DELAY_START;
3517                         dwc3_stop_active_transfer(dep, true, true);
3518                         return;
3519                 }
3520                 break;
3521         }
3522
3523 out:
3524         dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
3525 }
3526
3527 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
3528                 const struct dwc3_event_depevt *event)
3529 {
3530         struct dwc3_ep          *dep;
3531         u8                      epnum = event->endpoint_number;
3532
3533         dep = dwc->eps[epnum];
3534
3535         if (!(dep->flags & DWC3_EP_ENABLED)) {
3536                 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
3537                         return;
3538
3539                 /* Handle only EPCMDCMPLT when EP disabled */
3540                 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
3541                         return;
3542         }
3543
3544         if (epnum == 0 || epnum == 1) {
3545                 dwc3_ep0_interrupt(dwc, event);
3546                 return;
3547         }
3548
3549         switch (event->endpoint_event) {
3550         case DWC3_DEPEVT_XFERINPROGRESS:
3551                 dwc3_gadget_endpoint_transfer_in_progress(dep, event);
3552                 break;
3553         case DWC3_DEPEVT_XFERNOTREADY:
3554                 dwc3_gadget_endpoint_transfer_not_ready(dep, event);
3555                 break;
3556         case DWC3_DEPEVT_EPCMDCMPLT:
3557                 dwc3_gadget_endpoint_command_complete(dep, event);
3558                 break;
3559         case DWC3_DEPEVT_XFERCOMPLETE:
3560                 dwc3_gadget_endpoint_transfer_complete(dep, event);
3561                 break;
3562         case DWC3_DEPEVT_STREAMEVT:
3563                 dwc3_gadget_endpoint_stream_event(dep, event);
3564                 break;
3565         case DWC3_DEPEVT_RXTXFIFOEVT:
3566                 break;
3567         }
3568 }
3569
3570 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
3571 {
3572         if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
3573                 spin_unlock(&dwc->lock);
3574                 dwc->gadget_driver->disconnect(dwc->gadget);
3575                 spin_lock(&dwc->lock);
3576         }
3577 }
3578
3579 static void dwc3_suspend_gadget(struct dwc3 *dwc)
3580 {
3581         if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
3582                 spin_unlock(&dwc->lock);
3583                 dwc->gadget_driver->suspend(dwc->gadget);
3584                 spin_lock(&dwc->lock);
3585         }
3586 }
3587
3588 static void dwc3_resume_gadget(struct dwc3 *dwc)
3589 {
3590         if (dwc->async_callbacks && dwc->gadget_driver->resume) {
3591                 spin_unlock(&dwc->lock);
3592                 dwc->gadget_driver->resume(dwc->gadget);
3593                 spin_lock(&dwc->lock);
3594         }
3595 }
3596
3597 static void dwc3_reset_gadget(struct dwc3 *dwc)
3598 {
3599         if (!dwc->gadget_driver)
3600                 return;
3601
3602         if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
3603                 spin_unlock(&dwc->lock);
3604                 usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
3605                 spin_lock(&dwc->lock);
3606         }
3607 }
3608
3609 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
3610         bool interrupt)
3611 {
3612         struct dwc3_gadget_ep_cmd_params params;
3613         u32 cmd;
3614         int ret;
3615
3616         if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
3617             (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
3618                 return;
3619
3620         /*
3621          * NOTICE: We are violating what the Databook says about the
3622          * EndTransfer command. Ideally we would _always_ wait for the
3623          * EndTransfer Command Completion IRQ, but that's causing too
3624          * much trouble synchronizing between us and gadget driver.
3625          *
3626          * We have discussed this with the IP Provider and it was
3627          * suggested to giveback all requests here.
3628          *
3629          * Note also that a similar handling was tested by Synopsys
3630          * (thanks a lot Paul) and nothing bad has come out of it.
3631          * In short, what we're doing is issuing EndTransfer with
3632          * CMDIOC bit set and delay kicking transfer until the
3633          * EndTransfer command had completed.
3634          *
3635          * As of IP version 3.10a of the DWC_usb3 IP, the controller
3636          * supports a mode to work around the above limitation. The
3637          * software can poll the CMDACT bit in the DEPCMD register
3638          * after issuing a EndTransfer command. This mode is enabled
3639          * by writing GUCTL2[14]. This polling is already done in the
3640          * dwc3_send_gadget_ep_cmd() function so if the mode is
3641          * enabled, the EndTransfer command will have completed upon
3642          * returning from this function.
3643          *
3644          * This mode is NOT available on the DWC_usb31 IP.
3645          */
3646
3647         cmd = DWC3_DEPCMD_ENDTRANSFER;
3648         cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
3649         cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
3650         cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3651         memset(&params, 0, sizeof(params));
3652         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
3653         WARN_ON_ONCE(ret);
3654         dep->resource_index = 0;
3655
3656         if (!interrupt)
3657                 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3658         else
3659                 dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
3660 }
3661
3662 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
3663 {
3664         u32 epnum;
3665
3666         for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
3667                 struct dwc3_ep *dep;
3668                 int ret;
3669
3670                 dep = dwc->eps[epnum];
3671                 if (!dep)
3672                         continue;
3673
3674                 if (!(dep->flags & DWC3_EP_STALL))
3675                         continue;
3676
3677                 dep->flags &= ~DWC3_EP_STALL;
3678
3679                 ret = dwc3_send_clear_stall_ep_cmd(dep);
3680                 WARN_ON_ONCE(ret);
3681         }
3682 }
3683
3684 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
3685 {
3686         int                     reg;
3687
3688         dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
3689
3690         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3691         reg &= ~DWC3_DCTL_INITU1ENA;
3692         reg &= ~DWC3_DCTL_INITU2ENA;
3693         dwc3_gadget_dctl_write_safe(dwc, reg);
3694
3695         dwc3_disconnect_gadget(dwc);
3696
3697         dwc->gadget->speed = USB_SPEED_UNKNOWN;
3698         dwc->setup_packet_pending = false;
3699         usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
3700
3701         dwc->connected = false;
3702 }
3703
3704 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
3705 {
3706         u32                     reg;
3707
3708         /*
3709          * Ideally, dwc3_reset_gadget() would trigger the function
3710          * drivers to stop any active transfers through ep disable.
3711          * However, for functions which defer ep disable, such as mass
3712          * storage, we will need to rely on the call to stop active
3713          * transfers here, and avoid allowing of request queuing.
3714          */
3715         dwc->connected = false;
3716
3717         /*
3718          * WORKAROUND: DWC3 revisions <1.88a have an issue which
3719          * would cause a missing Disconnect Event if there's a
3720          * pending Setup Packet in the FIFO.
3721          *
3722          * There's no suggested workaround on the official Bug
3723          * report, which states that "unless the driver/application
3724          * is doing any special handling of a disconnect event,
3725          * there is no functional issue".
3726          *
3727          * Unfortunately, it turns out that we _do_ some special
3728          * handling of a disconnect event, namely complete all
3729          * pending transfers, notify gadget driver of the
3730          * disconnection, and so on.
3731          *
3732          * Our suggested workaround is to follow the Disconnect
3733          * Event steps here, instead, based on a setup_packet_pending
3734          * flag. Such flag gets set whenever we have a SETUP_PENDING
3735          * status for EP0 TRBs and gets cleared on XferComplete for the
3736          * same endpoint.
3737          *
3738          * Refers to:
3739          *
3740          * STAR#9000466709: RTL: Device : Disconnect event not
3741          * generated if setup packet pending in FIFO
3742          */
3743         if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
3744                 if (dwc->setup_packet_pending)
3745                         dwc3_gadget_disconnect_interrupt(dwc);
3746         }
3747
3748         dwc3_reset_gadget(dwc);
3749         /*
3750          * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3751          * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
3752          * needs to ensure that it sends "a DEPENDXFER command for any active
3753          * transfers."
3754          */
3755         dwc3_stop_active_transfers(dwc);
3756         dwc->connected = true;
3757
3758         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3759         reg &= ~DWC3_DCTL_TSTCTRL_MASK;
3760         dwc3_gadget_dctl_write_safe(dwc, reg);
3761         dwc->test_mode = false;
3762         dwc3_clear_stall_all_ep(dwc);
3763
3764         /* Reset device address to zero */
3765         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3766         reg &= ~(DWC3_DCFG_DEVADDR_MASK);
3767         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3768 }
3769
3770 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
3771 {
3772         struct dwc3_ep          *dep;
3773         int                     ret;
3774         u32                     reg;
3775         u8                      lanes = 1;
3776         u8                      speed;
3777
3778         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
3779         speed = reg & DWC3_DSTS_CONNECTSPD;
3780         dwc->speed = speed;
3781
3782         if (DWC3_IP_IS(DWC32))
3783                 lanes = DWC3_DSTS_CONNLANES(reg) + 1;
3784
3785         dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
3786
3787         /*
3788          * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
3789          * each time on Connect Done.
3790          *
3791          * Currently we always use the reset value. If any platform
3792          * wants to set this to a different value, we need to add a
3793          * setting and update GCTL.RAMCLKSEL here.
3794          */
3795
3796         switch (speed) {
3797         case DWC3_DSTS_SUPERSPEED_PLUS:
3798                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3799                 dwc->gadget->ep0->maxpacket = 512;
3800                 dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
3801
3802                 if (lanes > 1)
3803                         dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
3804                 else
3805                         dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
3806                 break;
3807         case DWC3_DSTS_SUPERSPEED:
3808                 /*
3809                  * WORKAROUND: DWC3 revisions <1.90a have an issue which
3810                  * would cause a missing USB3 Reset event.
3811                  *
3812                  * In such situations, we should force a USB3 Reset
3813                  * event by calling our dwc3_gadget_reset_interrupt()
3814                  * routine.
3815                  *
3816                  * Refers to:
3817                  *
3818                  * STAR#9000483510: RTL: SS : USB3 reset event may
3819                  * not be generated always when the link enters poll
3820                  */
3821                 if (DWC3_VER_IS_PRIOR(DWC3, 190A))
3822                         dwc3_gadget_reset_interrupt(dwc);
3823
3824                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3825                 dwc->gadget->ep0->maxpacket = 512;
3826                 dwc->gadget->speed = USB_SPEED_SUPER;
3827
3828                 if (lanes > 1) {
3829                         dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
3830                         dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
3831                 }
3832                 break;
3833         case DWC3_DSTS_HIGHSPEED:
3834                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
3835                 dwc->gadget->ep0->maxpacket = 64;
3836                 dwc->gadget->speed = USB_SPEED_HIGH;
3837                 break;
3838         case DWC3_DSTS_FULLSPEED:
3839                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
3840                 dwc->gadget->ep0->maxpacket = 64;
3841                 dwc->gadget->speed = USB_SPEED_FULL;
3842                 break;
3843         }
3844
3845         dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
3846
3847         /* Enable USB2 LPM Capability */
3848
3849         if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
3850             !dwc->usb2_gadget_lpm_disable &&
3851             (speed != DWC3_DSTS_SUPERSPEED) &&
3852             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
3853                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3854                 reg |= DWC3_DCFG_LPM_CAP;
3855                 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3856
3857                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3858                 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
3859
3860                 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
3861                                             (dwc->is_utmi_l1_suspend << 4));
3862
3863                 /*
3864                  * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
3865                  * DCFG.LPMCap is set, core responses with an ACK and the
3866                  * BESL value in the LPM token is less than or equal to LPM
3867                  * NYET threshold.
3868                  */
3869                 WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
3870                                 "LPM Erratum not available on dwc3 revisions < 2.40a\n");
3871
3872                 if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
3873                         reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
3874
3875                 dwc3_gadget_dctl_write_safe(dwc, reg);
3876         } else {
3877                 if (dwc->usb2_gadget_lpm_disable) {
3878                         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3879                         reg &= ~DWC3_DCFG_LPM_CAP;
3880                         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3881                 }
3882
3883                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3884                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
3885                 dwc3_gadget_dctl_write_safe(dwc, reg);
3886         }
3887
3888         dep = dwc->eps[0];
3889         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
3890         if (ret) {
3891                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3892                 return;
3893         }
3894
3895         dep = dwc->eps[1];
3896         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
3897         if (ret) {
3898                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3899                 return;
3900         }
3901
3902         /*
3903          * Configure PHY via GUSB3PIPECTLn if required.
3904          *
3905          * Update GTXFIFOSIZn
3906          *
3907          * In both cases reset values should be sufficient.
3908          */
3909 }
3910
3911 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
3912 {
3913         /*
3914          * TODO take core out of low power mode when that's
3915          * implemented.
3916          */
3917
3918         if (dwc->async_callbacks && dwc->gadget_driver->resume) {
3919                 spin_unlock(&dwc->lock);
3920                 dwc->gadget_driver->resume(dwc->gadget);
3921                 spin_lock(&dwc->lock);
3922         }
3923 }
3924
3925 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
3926                 unsigned int evtinfo)
3927 {
3928         enum dwc3_link_state    next = evtinfo & DWC3_LINK_STATE_MASK;
3929         unsigned int            pwropt;
3930
3931         /*
3932          * WORKAROUND: DWC3 < 2.50a have an issue when configured without
3933          * Hibernation mode enabled which would show up when device detects
3934          * host-initiated U3 exit.
3935          *
3936          * In that case, device will generate a Link State Change Interrupt
3937          * from U3 to RESUME which is only necessary if Hibernation is
3938          * configured in.
3939          *
3940          * There are no functional changes due to such spurious event and we
3941          * just need to ignore it.
3942          *
3943          * Refers to:
3944          *
3945          * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
3946          * operational mode
3947          */
3948         pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
3949         if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
3950                         (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
3951                 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
3952                                 (next == DWC3_LINK_STATE_RESUME)) {
3953                         return;
3954                 }
3955         }
3956
3957         /*
3958          * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
3959          * on the link partner, the USB session might do multiple entry/exit
3960          * of low power states before a transfer takes place.
3961          *
3962          * Due to this problem, we might experience lower throughput. The
3963          * suggested workaround is to disable DCTL[12:9] bits if we're
3964          * transitioning from U1/U2 to U0 and enable those bits again
3965          * after a transfer completes and there are no pending transfers
3966          * on any of the enabled endpoints.
3967          *
3968          * This is the first half of that workaround.
3969          *
3970          * Refers to:
3971          *
3972          * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
3973          * core send LGO_Ux entering U0
3974          */
3975         if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
3976                 if (next == DWC3_LINK_STATE_U0) {
3977                         u32     u1u2;
3978                         u32     reg;
3979
3980                         switch (dwc->link_state) {
3981                         case DWC3_LINK_STATE_U1:
3982                         case DWC3_LINK_STATE_U2:
3983                                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3984                                 u1u2 = reg & (DWC3_DCTL_INITU2ENA
3985                                                 | DWC3_DCTL_ACCEPTU2ENA
3986                                                 | DWC3_DCTL_INITU1ENA
3987                                                 | DWC3_DCTL_ACCEPTU1ENA);
3988
3989                                 if (!dwc->u1u2)
3990                                         dwc->u1u2 = reg & u1u2;
3991
3992                                 reg &= ~u1u2;
3993
3994                                 dwc3_gadget_dctl_write_safe(dwc, reg);
3995                                 break;
3996                         default:
3997                                 /* do nothing */
3998                                 break;
3999                         }
4000                 }
4001         }
4002
4003         switch (next) {
4004         case DWC3_LINK_STATE_U1:
4005                 if (dwc->speed == USB_SPEED_SUPER)
4006                         dwc3_suspend_gadget(dwc);
4007                 break;
4008         case DWC3_LINK_STATE_U2:
4009         case DWC3_LINK_STATE_U3:
4010                 dwc3_suspend_gadget(dwc);
4011                 break;
4012         case DWC3_LINK_STATE_RESUME:
4013                 dwc3_resume_gadget(dwc);
4014                 break;
4015         default:
4016                 /* do nothing */
4017                 break;
4018         }
4019
4020         dwc->link_state = next;
4021 }
4022
4023 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
4024                                           unsigned int evtinfo)
4025 {
4026         enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
4027
4028         if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
4029                 dwc3_suspend_gadget(dwc);
4030
4031         dwc->link_state = next;
4032 }
4033
4034 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
4035                 unsigned int evtinfo)
4036 {
4037         unsigned int is_ss = evtinfo & BIT(4);
4038
4039         /*
4040          * WORKAROUND: DWC3 revison 2.20a with hibernation support
4041          * have a known issue which can cause USB CV TD.9.23 to fail
4042          * randomly.
4043          *
4044          * Because of this issue, core could generate bogus hibernation
4045          * events which SW needs to ignore.
4046          *
4047          * Refers to:
4048          *
4049          * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
4050          * Device Fallback from SuperSpeed
4051          */
4052         if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
4053                 return;
4054
4055         /* enter hibernation here */
4056 }
4057
4058 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
4059                 const struct dwc3_event_devt *event)
4060 {
4061         switch (event->type) {
4062         case DWC3_DEVICE_EVENT_DISCONNECT:
4063                 dwc3_gadget_disconnect_interrupt(dwc);
4064                 break;
4065         case DWC3_DEVICE_EVENT_RESET:
4066                 dwc3_gadget_reset_interrupt(dwc);
4067                 break;
4068         case DWC3_DEVICE_EVENT_CONNECT_DONE:
4069                 dwc3_gadget_conndone_interrupt(dwc);
4070                 break;
4071         case DWC3_DEVICE_EVENT_WAKEUP:
4072                 dwc3_gadget_wakeup_interrupt(dwc);
4073                 break;
4074         case DWC3_DEVICE_EVENT_HIBER_REQ:
4075                 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
4076                                         "unexpected hibernation event\n"))
4077                         break;
4078
4079                 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
4080                 break;
4081         case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
4082                 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
4083                 break;
4084         case DWC3_DEVICE_EVENT_SUSPEND:
4085                 /* It changed to be suspend event for version 2.30a and above */
4086                 if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
4087                         /*
4088                          * Ignore suspend event until the gadget enters into
4089                          * USB_STATE_CONFIGURED state.
4090                          */
4091                         if (dwc->gadget->state >= USB_STATE_CONFIGURED)
4092                                 dwc3_gadget_suspend_interrupt(dwc,
4093                                                 event->event_info);
4094                 }
4095                 break;
4096         case DWC3_DEVICE_EVENT_SOF:
4097         case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
4098         case DWC3_DEVICE_EVENT_CMD_CMPL:
4099         case DWC3_DEVICE_EVENT_OVERFLOW:
4100                 break;
4101         default:
4102                 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
4103         }
4104 }
4105
4106 static void dwc3_process_event_entry(struct dwc3 *dwc,
4107                 const union dwc3_event *event)
4108 {
4109         trace_dwc3_event(event->raw, dwc);
4110
4111         if (!event->type.is_devspec)
4112                 dwc3_endpoint_interrupt(dwc, &event->depevt);
4113         else if (event->type.type == DWC3_EVENT_TYPE_DEV)
4114                 dwc3_gadget_interrupt(dwc, &event->devt);
4115         else
4116                 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
4117 }
4118
4119 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
4120 {
4121         struct dwc3 *dwc = evt->dwc;
4122         irqreturn_t ret = IRQ_NONE;
4123         int left;
4124         u32 reg;
4125
4126         left = evt->count;
4127
4128         if (!(evt->flags & DWC3_EVENT_PENDING))
4129                 return IRQ_NONE;
4130
4131         while (left > 0) {
4132                 union dwc3_event event;
4133
4134                 event.raw = *(u32 *) (evt->cache + evt->lpos);
4135
4136                 dwc3_process_event_entry(dwc, &event);
4137
4138                 /*
4139                  * FIXME we wrap around correctly to the next entry as
4140                  * almost all entries are 4 bytes in size. There is one
4141                  * entry which has 12 bytes which is a regular entry
4142                  * followed by 8 bytes data. ATM I don't know how
4143                  * things are organized if we get next to the a
4144                  * boundary so I worry about that once we try to handle
4145                  * that.
4146                  */
4147                 evt->lpos = (evt->lpos + 4) % evt->length;
4148                 left -= 4;
4149         }
4150
4151         evt->count = 0;
4152         ret = IRQ_HANDLED;
4153
4154         /* Unmask interrupt */
4155         reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4156         reg &= ~DWC3_GEVNTSIZ_INTMASK;
4157         dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
4158
4159         if (dwc->imod_interval) {
4160                 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
4161                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
4162         }
4163
4164         /* Keep the clearing of DWC3_EVENT_PENDING at the end */
4165         evt->flags &= ~DWC3_EVENT_PENDING;
4166
4167         return ret;
4168 }
4169
4170 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
4171 {
4172         struct dwc3_event_buffer *evt = _evt;
4173         struct dwc3 *dwc = evt->dwc;
4174         unsigned long flags;
4175         irqreturn_t ret = IRQ_NONE;
4176
4177         local_bh_disable();
4178         spin_lock_irqsave(&dwc->lock, flags);
4179         ret = dwc3_process_event_buf(evt);
4180         spin_unlock_irqrestore(&dwc->lock, flags);
4181         local_bh_enable();
4182
4183         return ret;
4184 }
4185
4186 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
4187 {
4188         struct dwc3 *dwc = evt->dwc;
4189         u32 amount;
4190         u32 count;
4191         u32 reg;
4192
4193         if (pm_runtime_suspended(dwc->dev)) {
4194                 pm_runtime_get(dwc->dev);
4195                 disable_irq_nosync(dwc->irq_gadget);
4196                 dwc->pending_events = true;
4197                 return IRQ_HANDLED;
4198         }
4199
4200         /*
4201          * With PCIe legacy interrupt, test shows that top-half irq handler can
4202          * be called again after HW interrupt deassertion. Check if bottom-half
4203          * irq event handler completes before caching new event to prevent
4204          * losing events.
4205          */
4206         if (evt->flags & DWC3_EVENT_PENDING)
4207                 return IRQ_HANDLED;
4208
4209         count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
4210         count &= DWC3_GEVNTCOUNT_MASK;
4211         if (!count)
4212                 return IRQ_NONE;
4213
4214         evt->count = count;
4215         evt->flags |= DWC3_EVENT_PENDING;
4216
4217         /* Mask interrupt */
4218         reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4219         reg |= DWC3_GEVNTSIZ_INTMASK;
4220         dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
4221
4222         amount = min(count, evt->length - evt->lpos);
4223         memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
4224
4225         if (amount < count)
4226                 memcpy(evt->cache, evt->buf, count - amount);
4227
4228         dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
4229
4230         return IRQ_WAKE_THREAD;
4231 }
4232
4233 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
4234 {
4235         struct dwc3_event_buffer        *evt = _evt;
4236
4237         return dwc3_check_event_buf(evt);
4238 }
4239
4240 static int dwc3_gadget_get_irq(struct dwc3 *dwc)
4241 {
4242         struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
4243         int irq;
4244
4245         irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
4246         if (irq > 0)
4247                 goto out;
4248
4249         if (irq == -EPROBE_DEFER)
4250                 goto out;
4251
4252         irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
4253         if (irq > 0)
4254                 goto out;
4255
4256         if (irq == -EPROBE_DEFER)
4257                 goto out;
4258
4259         irq = platform_get_irq(dwc3_pdev, 0);
4260         if (irq > 0)
4261                 goto out;
4262
4263         if (!irq)
4264                 irq = -EINVAL;
4265
4266 out:
4267         return irq;
4268 }
4269
4270 static void dwc_gadget_release(struct device *dev)
4271 {
4272         struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
4273
4274         kfree(gadget);
4275 }
4276
4277 /**
4278  * dwc3_gadget_init - initializes gadget related registers
4279  * @dwc: pointer to our controller context structure
4280  *
4281  * Returns 0 on success otherwise negative errno.
4282  */
4283 int dwc3_gadget_init(struct dwc3 *dwc)
4284 {
4285         int ret;
4286         int irq;
4287         struct device *dev;
4288
4289         irq = dwc3_gadget_get_irq(dwc);
4290         if (irq < 0) {
4291                 ret = irq;
4292                 goto err0;
4293         }
4294
4295         dwc->irq_gadget = irq;
4296
4297         dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
4298                                           sizeof(*dwc->ep0_trb) * 2,
4299                                           &dwc->ep0_trb_addr, GFP_KERNEL);
4300         if (!dwc->ep0_trb) {
4301                 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
4302                 ret = -ENOMEM;
4303                 goto err0;
4304         }
4305
4306         dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
4307         if (!dwc->setup_buf) {
4308                 ret = -ENOMEM;
4309                 goto err1;
4310         }
4311
4312         dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
4313                         &dwc->bounce_addr, GFP_KERNEL);
4314         if (!dwc->bounce) {
4315                 ret = -ENOMEM;
4316                 goto err2;
4317         }
4318
4319         init_completion(&dwc->ep0_in_setup);
4320         dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
4321         if (!dwc->gadget) {
4322                 ret = -ENOMEM;
4323                 goto err3;
4324         }
4325
4326
4327         usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
4328         dev                             = &dwc->gadget->dev;
4329         dev->platform_data              = dwc;
4330         dwc->gadget->ops                = &dwc3_gadget_ops;
4331         dwc->gadget->speed              = USB_SPEED_UNKNOWN;
4332         dwc->gadget->ssp_rate           = USB_SSP_GEN_UNKNOWN;
4333         dwc->gadget->sg_supported       = true;
4334         dwc->gadget->name               = "dwc3-gadget";
4335         dwc->gadget->lpm_capable        = !dwc->usb2_gadget_lpm_disable;
4336
4337         /*
4338          * FIXME We might be setting max_speed to <SUPER, however versions
4339          * <2.20a of dwc3 have an issue with metastability (documented
4340          * elsewhere in this driver) which tells us we can't set max speed to
4341          * anything lower than SUPER.
4342          *
4343          * Because gadget.max_speed is only used by composite.c and function
4344          * drivers (i.e. it won't go into dwc3's registers) we are allowing this
4345          * to happen so we avoid sending SuperSpeed Capability descriptor
4346          * together with our BOS descriptor as that could confuse host into
4347          * thinking we can handle super speed.
4348          *
4349          * Note that, in fact, we won't even support GetBOS requests when speed
4350          * is less than super speed because we don't have means, yet, to tell
4351          * composite.c that we are USB 2.0 + LPM ECN.
4352          */
4353         if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
4354             !dwc->dis_metastability_quirk)
4355                 dev_info(dwc->dev, "changing max_speed on rev %08x\n",
4356                                 dwc->revision);
4357
4358         dwc->gadget->max_speed          = dwc->maximum_speed;
4359         dwc->gadget->max_ssp_rate       = dwc->max_ssp_rate;
4360
4361         /*
4362          * REVISIT: Here we should clear all pending IRQs to be
4363          * sure we're starting from a well known location.
4364          */
4365
4366         ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
4367         if (ret)
4368                 goto err4;
4369
4370         ret = usb_add_gadget(dwc->gadget);
4371         if (ret) {
4372                 dev_err(dwc->dev, "failed to add gadget\n");
4373                 goto err5;
4374         }
4375
4376         if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
4377                 dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
4378         else
4379                 dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
4380
4381         return 0;
4382
4383 err5:
4384         dwc3_gadget_free_endpoints(dwc);
4385 err4:
4386         usb_put_gadget(dwc->gadget);
4387         dwc->gadget = NULL;
4388 err3:
4389         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
4390                         dwc->bounce_addr);
4391
4392 err2:
4393         kfree(dwc->setup_buf);
4394
4395 err1:
4396         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
4397                         dwc->ep0_trb, dwc->ep0_trb_addr);
4398
4399 err0:
4400         return ret;
4401 }
4402
4403 /* -------------------------------------------------------------------------- */
4404
4405 void dwc3_gadget_exit(struct dwc3 *dwc)
4406 {
4407         if (!dwc->gadget)
4408                 return;
4409
4410         usb_del_gadget(dwc->gadget);
4411         dwc3_gadget_free_endpoints(dwc);
4412         usb_put_gadget(dwc->gadget);
4413         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
4414                           dwc->bounce_addr);
4415         kfree(dwc->setup_buf);
4416         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
4417                           dwc->ep0_trb, dwc->ep0_trb_addr);
4418 }
4419
4420 int dwc3_gadget_suspend(struct dwc3 *dwc)
4421 {
4422         if (!dwc->gadget_driver)
4423                 return 0;
4424
4425         dwc3_gadget_run_stop(dwc, false, false);
4426         dwc3_disconnect_gadget(dwc);
4427         __dwc3_gadget_stop(dwc);
4428
4429         return 0;
4430 }
4431
4432 int dwc3_gadget_resume(struct dwc3 *dwc)
4433 {
4434         int                     ret;
4435
4436         if (!dwc->gadget_driver)
4437                 return 0;
4438
4439         ret = __dwc3_gadget_start(dwc);
4440         if (ret < 0)
4441                 goto err0;
4442
4443         ret = dwc3_gadget_run_stop(dwc, true, false);
4444         if (ret < 0)
4445                 goto err1;
4446
4447         return 0;
4448
4449 err1:
4450         __dwc3_gadget_stop(dwc);
4451
4452 err0:
4453         return ret;
4454 }
4455
4456 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
4457 {
4458         if (dwc->pending_events) {
4459                 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
4460                 dwc->pending_events = false;
4461                 enable_irq(dwc->irq_gadget);
4462         }
4463 }