usb: ether: avoid NULL check before free()
[platform/kernel/u-boot.git] / drivers / usb / cdns3 / gadget.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence USBSS DRD Driver - gadget side.
4  *
5  * Copyright (C) 2018-2019 Cadence Design Systems.
6  * Copyright (C) 2017-2018 NXP
7  *
8  * Authors: Pawel Jez <pjez@cadence.com>,
9  *          Pawel Laszczak <pawell@cadence.com>
10  *          Peter Chen <peter.chen@nxp.com>
11  */
12
13 /*
14  * Work around 1:
15  * At some situations, the controller may get stale data address in TRB
16  * at below sequences:
17  * 1. Controller read TRB includes data address
18  * 2. Software updates TRBs includes data address and Cycle bit
19  * 3. Controller read TRB which includes Cycle bit
20  * 4. DMA run with stale data address
21  *
22  * To fix this problem, driver needs to make the first TRB in TD as invalid.
23  * After preparing all TRBs driver needs to check the position of DMA and
24  * if the DMA point to the first just added TRB and doorbell is 1,
25  * then driver must defer making this TRB as valid. This TRB will be make
26  * as valid during adding next TRB only if DMA is stopped or at TRBERR
27  * interrupt.
28  *
29  * Issue has been fixed in DEV_VER_V3 version of controller.
30  *
31  * Work around 2:
32  * Controller for OUT endpoints has shared on-chip buffers for all incoming
33  * packets, including ep0out. It's FIFO buffer, so packets must be handle by DMA
34  * in correct order. If the first packet in the buffer will not be handled,
35  * then the following packets directed for other endpoints and  functions
36  * will be blocked.
37  * Additionally the packets directed to one endpoint can block entire on-chip
38  * buffers. In this case transfer to other endpoints also will blocked.
39  *
40  * To resolve this issue after raising the descriptor missing interrupt
41  * driver prepares internal usb_request object and use it to arm DMA transfer.
42  *
43  * The problematic situation was observed in case when endpoint has been enabled
44  * but no usb_request were queued. Driver try detects such endpoints and will
45  * use this workaround only for these endpoint.
46  *
47  * Driver use limited number of buffer. This number can be set by macro
48  * CDNS3_WA2_NUM_BUFFERS.
49  *
50  * Such blocking situation was observed on ACM gadget. For this function
51  * host send OUT data packet but ACM function is not prepared for this packet.
52  * It's cause that buffer placed in on chip memory block transfer to other
53  * endpoints.
54  *
55  * Issue has been fixed in DEV_VER_V2 version of controller.
56  *
57  */
58
59 #include <dm.h>
60 #include <dm/device_compat.h>
61 #include <dm/devres.h>
62 #include <linux/err.h>
63 #include <linux/usb/gadget.h>
64 #include <linux/compat.h>
65 #include <linux/iopoll.h>
66 #include <linux/dma-mapping.h>
67 #include <linux/bitmap.h>
68 #include <linux/bug.h>
69
70 #include "core.h"
71 #include "gadget-export.h"
72 #include "gadget.h"
73 #include "trace.h"
74 #include "drd.h"
75
76 #define readl_poll_timeout_atomic readl_poll_timeout
77 #define usleep_range(a, b) udelay((b))
78
79 static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
80                                    struct usb_request *request,
81                                    gfp_t gfp_flags);
82
83 /**
84  * cdns3_set_register_bit - set bit in given register.
85  * @ptr: address of device controller register to be read and changed
86  * @mask: bits requested to set
87  */
88 void cdns3_set_register_bit(void __iomem *ptr, u32 mask)
89 {
90         mask = readl(ptr) | mask;
91         writel(mask, ptr);
92 }
93
94 /**
95  * cdns3_ep_addr_to_index - Macro converts endpoint address to
96  * index of endpoint object in cdns3_device.eps[] container
97  * @ep_addr: endpoint address for which endpoint object is required
98  *
99  */
100 u8 cdns3_ep_addr_to_index(u8 ep_addr)
101 {
102         return (((ep_addr & 0x7F)) + ((ep_addr & USB_DIR_IN) ? 16 : 0));
103 }
104
105 static int cdns3_get_dma_pos(struct cdns3_device *priv_dev,
106                              struct cdns3_endpoint *priv_ep)
107 {
108         int dma_index;
109
110         dma_index = readl(&priv_dev->regs->ep_traddr) - priv_ep->trb_pool_dma;
111
112         return dma_index / TRB_SIZE;
113 }
114
115 /**
116  * cdns3_next_request - returns next request from list
117  * @list: list containing requests
118  *
119  * Returns request or NULL if no requests in list
120  */
121 struct usb_request *cdns3_next_request(struct list_head *list)
122 {
123         return list_first_entry_or_null(list, struct usb_request, list);
124 }
125
126 /**
127  * cdns3_next_align_buf - returns next buffer from list
128  * @list: list containing buffers
129  *
130  * Returns buffer or NULL if no buffers in list
131  */
132 struct cdns3_aligned_buf *cdns3_next_align_buf(struct list_head *list)
133 {
134         return list_first_entry_or_null(list, struct cdns3_aligned_buf, list);
135 }
136
137 /**
138  * cdns3_next_priv_request - returns next request from list
139  * @list: list containing requests
140  *
141  * Returns request or NULL if no requests in list
142  */
143 struct cdns3_request *cdns3_next_priv_request(struct list_head *list)
144 {
145         return list_first_entry_or_null(list, struct cdns3_request, list);
146 }
147
148 /**
149  * select_ep - selects endpoint
150  * @priv_dev:  extended gadget object
151  * @ep: endpoint address
152  */
153 void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep)
154 {
155         if (priv_dev->selected_ep == ep)
156                 return;
157
158         priv_dev->selected_ep = ep;
159         writel(ep, &priv_dev->regs->ep_sel);
160 }
161
162 dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep,
163                                  struct cdns3_trb *trb)
164 {
165         u32 offset = (char *)trb - (char *)priv_ep->trb_pool;
166
167         return priv_ep->trb_pool_dma + offset;
168 }
169
170 int cdns3_ring_size(struct cdns3_endpoint *priv_ep)
171 {
172         switch (priv_ep->type) {
173         case USB_ENDPOINT_XFER_ISOC:
174                 return TRB_ISO_RING_SIZE;
175         case USB_ENDPOINT_XFER_CONTROL:
176                 return TRB_CTRL_RING_SIZE;
177         default:
178                 return TRB_RING_SIZE;
179         }
180 }
181
182 /**
183  * cdns3_allocate_trb_pool - Allocates TRB's pool for selected endpoint
184  * @priv_ep:  endpoint object
185  *
186  * Function will return 0 on success or -ENOMEM on allocation error
187  */
188 int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
189 {
190         int ring_size = cdns3_ring_size(priv_ep);
191         struct cdns3_trb *link_trb;
192
193         if (!priv_ep->trb_pool) {
194                 priv_ep->trb_pool =
195                 dma_alloc_coherent(ring_size,
196                                    (unsigned long *)&priv_ep->trb_pool_dma);
197                 if (!priv_ep->trb_pool)
198                         return -ENOMEM;
199         } else {
200                 memset(priv_ep->trb_pool, 0, ring_size);
201         }
202
203         if (!priv_ep->num)
204                 return 0;
205
206         priv_ep->num_trbs = ring_size / TRB_SIZE;
207         /* Initialize the last TRB as Link TRB. */
208         link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1));
209         link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma);
210         link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE;
211
212         return 0;
213 }
214
215 static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep)
216 {
217         if (priv_ep->trb_pool) {
218                 dma_free_coherent(priv_ep->trb_pool);
219                 priv_ep->trb_pool = NULL;
220         }
221 }
222
223 /**
224  * cdns3_ep_stall_flush - Stalls and flushes selected endpoint
225  * @priv_ep: endpoint object
226  *
227  * Endpoint must be selected before call to this function
228  */
229 static void cdns3_ep_stall_flush(struct cdns3_endpoint *priv_ep)
230 {
231         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
232         int val;
233
234         trace_cdns3_halt(priv_ep, 1, 1);
235
236         writel(EP_CMD_DFLUSH | EP_CMD_ERDY | EP_CMD_SSTALL,
237                &priv_dev->regs->ep_cmd);
238
239         /* wait for DFLUSH cleared */
240         readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
241                                   !(val & EP_CMD_DFLUSH), 1000);
242         priv_ep->flags |= EP_STALLED;
243         priv_ep->flags &= ~EP_STALL_PENDING;
244 }
245
246 /**
247  * cdns3_hw_reset_eps_config - reset endpoints configuration kept by controller.
248  * @priv_dev: extended gadget object
249  */
250 void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev)
251 {
252         writel(USB_CONF_CFGRST, &priv_dev->regs->usb_conf);
253
254         cdns3_allow_enable_l1(priv_dev, 0);
255         priv_dev->hw_configured_flag = 0;
256         priv_dev->onchip_used_size = 0;
257         priv_dev->out_mem_is_allocated = 0;
258         priv_dev->wait_for_setup = 0;
259 }
260
261 /**
262  * cdns3_ep_inc_trb - increment a trb index.
263  * @index: Pointer to the TRB index to increment.
264  * @cs: Cycle state
265  * @trb_in_seg: number of TRBs in segment
266  *
267  * The index should never point to the link TRB. After incrementing,
268  * if it is point to the link TRB, wrap around to the beginning and revert
269  * cycle state bit The
270  * link TRB is always at the last TRB entry.
271  */
272 static void cdns3_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
273 {
274         (*index)++;
275         if (*index == (trb_in_seg - 1)) {
276                 *index = 0;
277                 *cs ^=  1;
278         }
279 }
280
281 /**
282  * cdns3_ep_inc_enq - increment endpoint's enqueue pointer
283  * @priv_ep: The endpoint whose enqueue pointer we're incrementing
284  */
285 static void cdns3_ep_inc_enq(struct cdns3_endpoint *priv_ep)
286 {
287         priv_ep->free_trbs--;
288         cdns3_ep_inc_trb(&priv_ep->enqueue, &priv_ep->pcs, priv_ep->num_trbs);
289 }
290
291 /**
292  * cdns3_ep_inc_deq - increment endpoint's dequeue pointer
293  * @priv_ep: The endpoint whose dequeue pointer we're incrementing
294  */
295 static void cdns3_ep_inc_deq(struct cdns3_endpoint *priv_ep)
296 {
297         priv_ep->free_trbs++;
298         cdns3_ep_inc_trb(&priv_ep->dequeue, &priv_ep->ccs, priv_ep->num_trbs);
299 }
300
301 void cdns3_move_deq_to_next_trb(struct cdns3_request *priv_req)
302 {
303         struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
304         int current_trb = priv_req->start_trb;
305
306         while (current_trb != priv_req->end_trb) {
307                 cdns3_ep_inc_deq(priv_ep);
308                 current_trb = priv_ep->dequeue;
309         }
310
311         cdns3_ep_inc_deq(priv_ep);
312 }
313
314 /**
315  * cdns3_allow_enable_l1 - enable/disable permits to transition to L1.
316  * @priv_dev: Extended gadget object
317  * @enable: Enable/disable permit to transition to L1.
318  *
319  * If bit USB_CONF_L1EN is set and device receive Extended Token packet,
320  * then controller answer with ACK handshake.
321  * If bit USB_CONF_L1DS is set and device receive Extended Token packet,
322  * then controller answer with NYET handshake.
323  */
324 void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable)
325 {
326         if (enable)
327                 writel(USB_CONF_L1EN, &priv_dev->regs->usb_conf);
328         else
329                 writel(USB_CONF_L1DS, &priv_dev->regs->usb_conf);
330 }
331
332 enum usb_device_speed cdns3_get_speed(struct cdns3_device *priv_dev)
333 {
334         u32 reg;
335
336         reg = readl(&priv_dev->regs->usb_sts);
337
338         if (DEV_SUPERSPEED(reg))
339                 return USB_SPEED_SUPER;
340         else if (DEV_HIGHSPEED(reg))
341                 return USB_SPEED_HIGH;
342         else if (DEV_FULLSPEED(reg))
343                 return USB_SPEED_FULL;
344         else if (DEV_LOWSPEED(reg))
345                 return USB_SPEED_LOW;
346         return USB_SPEED_UNKNOWN;
347 }
348
349 /**
350  * cdns3_start_all_request - add to ring all request not started
351  * @priv_dev: Extended gadget object
352  * @priv_ep: The endpoint for whom request will be started.
353  *
354  * Returns return ENOMEM if transfer ring i not enough TRBs to start
355  *         all requests.
356  */
357 static int cdns3_start_all_request(struct cdns3_device *priv_dev,
358                                    struct cdns3_endpoint *priv_ep)
359 {
360         struct usb_request *request;
361         int ret = 0;
362
363         while (!list_empty(&priv_ep->deferred_req_list)) {
364                 request = cdns3_next_request(&priv_ep->deferred_req_list);
365
366                 ret = cdns3_ep_run_transfer(priv_ep, request);
367                 if (ret)
368                         return ret;
369
370                 list_del(&request->list);
371                 list_add_tail(&request->list,
372                               &priv_ep->pending_req_list);
373         }
374
375         priv_ep->flags &= ~EP_RING_FULL;
376         return ret;
377 }
378
379 /*
380  * WA2: Set flag for all not ISOC OUT endpoints. If this flag is set
381  * driver try to detect whether endpoint need additional internal
382  * buffer for unblocking on-chip FIFO buffer. This flag will be cleared
383  * if before first DESCMISS interrupt the DMA will be armed.
384  */
385 #define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \
386         if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \
387                 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \
388                 (reg) |= EP_STS_EN_DESCMISEN; \
389         } } while (0)
390
391 /**
392  * cdns3_wa2_descmiss_copy_data copy data from internal requests to
393  * request queued by class driver.
394  * @priv_ep: extended endpoint object
395  * @request: request object
396  */
397 static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep,
398                                          struct usb_request *request)
399 {
400         struct usb_request *descmiss_req;
401         struct cdns3_request *descmiss_priv_req;
402
403         while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
404                 int chunk_end;
405                 int length;
406
407                 descmiss_priv_req =
408                         cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
409                 descmiss_req = &descmiss_priv_req->request;
410
411                 /* driver can't touch pending request */
412                 if (descmiss_priv_req->flags & REQUEST_PENDING)
413                         break;
414
415                 chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH;
416                 length = request->actual + descmiss_req->actual;
417
418                 request->status = descmiss_req->status;
419
420                 if (length <= request->length) {
421                         memcpy(&((u8 *)request->buf)[request->actual],
422                                descmiss_req->buf,
423                                descmiss_req->actual);
424                         request->actual = length;
425                 } else {
426                         /* It should never occur */
427                         request->status = -ENOMEM;
428                 }
429
430                 list_del_init(&descmiss_priv_req->list);
431
432                 kfree(descmiss_req->buf);
433                 cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req);
434                 --priv_ep->wa2_counter;
435
436                 if (!chunk_end)
437                         break;
438         }
439 }
440
441 struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev,
442                                               struct cdns3_endpoint *priv_ep,
443                                               struct cdns3_request *priv_req)
444 {
445         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN &&
446             priv_req->flags & REQUEST_INTERNAL) {
447                 struct usb_request *req;
448
449                 req = cdns3_next_request(&priv_ep->deferred_req_list);
450
451                 priv_ep->descmis_req = NULL;
452
453                 if (!req)
454                         return NULL;
455
456                 cdns3_wa2_descmiss_copy_data(priv_ep, req);
457                 if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) &&
458                     req->length != req->actual) {
459                         /* wait for next part of transfer */
460                         return NULL;
461                 }
462
463                 if (req->status == -EINPROGRESS)
464                         req->status = 0;
465
466                 list_del_init(&req->list);
467                 cdns3_start_all_request(priv_dev, priv_ep);
468                 return req;
469         }
470
471         return &priv_req->request;
472 }
473
474 int cdns3_wa2_gadget_ep_queue(struct cdns3_device *priv_dev,
475                               struct cdns3_endpoint *priv_ep,
476                               struct cdns3_request *priv_req)
477 {
478         int deferred = 0;
479
480         /*
481          * If transfer was queued before DESCMISS appear than we
482          * can disable handling of DESCMISS interrupt. Driver assumes that it
483          * can disable special treatment for this endpoint.
484          */
485         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
486                 u32 reg;
487
488                 cdns3_select_ep(priv_dev, priv_ep->num | priv_ep->dir);
489                 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
490                 reg = readl(&priv_dev->regs->ep_sts_en);
491                 reg &= ~EP_STS_EN_DESCMISEN;
492                 trace_cdns3_wa2(priv_ep, "workaround disabled\n");
493                 writel(reg, &priv_dev->regs->ep_sts_en);
494         }
495
496         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
497                 u8 pending_empty = list_empty(&priv_ep->pending_req_list);
498                 u8 descmiss_empty = list_empty(&priv_ep->wa2_descmiss_req_list);
499
500                 /*
501                  *  DESCMISS transfer has been finished, so data will be
502                  *  directly copied from internal allocated usb_request
503                  *  objects.
504                  */
505                 if (pending_empty && !descmiss_empty &&
506                     !(priv_req->flags & REQUEST_INTERNAL)) {
507                         cdns3_wa2_descmiss_copy_data(priv_ep,
508                                                      &priv_req->request);
509
510                         trace_cdns3_wa2(priv_ep, "get internal stored data");
511
512                         list_add_tail(&priv_req->request.list,
513                                       &priv_ep->pending_req_list);
514                         cdns3_gadget_giveback(priv_ep, priv_req,
515                                               priv_req->request.status);
516
517                         /*
518                          * Intentionally driver returns positive value as
519                          * correct value. It informs that transfer has
520                          * been finished.
521                          */
522                         return EINPROGRESS;
523                 }
524
525                 /*
526                  * Driver will wait for completion DESCMISS transfer,
527                  * before starts new, not DESCMISS transfer.
528                  */
529                 if (!pending_empty && !descmiss_empty) {
530                         trace_cdns3_wa2(priv_ep, "wait for pending transfer\n");
531                         deferred = 1;
532                 }
533
534                 if (priv_req->flags & REQUEST_INTERNAL)
535                         list_add_tail(&priv_req->list,
536                                       &priv_ep->wa2_descmiss_req_list);
537         }
538
539         return deferred;
540 }
541
542 static void cdns3_wa2_remove_old_request(struct cdns3_endpoint *priv_ep)
543 {
544         struct cdns3_request *priv_req;
545
546         while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
547                 u8 chain;
548
549                 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
550                 chain = !!(priv_req->flags & REQUEST_INTERNAL_CH);
551
552                 trace_cdns3_wa2(priv_ep, "removes eldest request");
553
554                 kfree(priv_req->request.buf);
555                 cdns3_gadget_ep_free_request(&priv_ep->endpoint,
556                                              &priv_req->request);
557                 list_del_init(&priv_req->list);
558                 --priv_ep->wa2_counter;
559
560                 if (!chain)
561                         break;
562         }
563 }
564
565 /**
566  * cdns3_wa2_descmissing_packet - handles descriptor missing event.
567  * @priv_dev: extended gadget object
568  *
569  * This function is used only for WA2. For more information see Work around 2
570  * description.
571  */
572 static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep)
573 {
574         struct cdns3_request *priv_req;
575         struct usb_request *request;
576
577         if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) {
578                 priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET;
579                 priv_ep->flags |= EP_QUIRK_EXTRA_BUF_EN;
580         }
581
582         trace_cdns3_wa2(priv_ep, "Description Missing detected\n");
583
584         if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS)
585                 cdns3_wa2_remove_old_request(priv_ep);
586
587         request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint,
588                                                 GFP_ATOMIC);
589         if (!request)
590                 goto err;
591
592         priv_req = to_cdns3_request(request);
593         priv_req->flags |= REQUEST_INTERNAL;
594
595         /* if this field is still assigned it indicate that transfer related
596          * with this request has not been finished yet. Driver in this
597          * case simply allocate next request and assign flag REQUEST_INTERNAL_CH
598          * flag to previous one. It will indicate that current request is
599          * part of the previous one.
600          */
601         if (priv_ep->descmis_req)
602                 priv_ep->descmis_req->flags |= REQUEST_INTERNAL_CH;
603
604         priv_req->request.buf = kzalloc(CDNS3_DESCMIS_BUF_SIZE,
605                                         GFP_ATOMIC);
606         priv_ep->wa2_counter++;
607
608         if (!priv_req->request.buf) {
609                 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
610                 goto err;
611         }
612
613         priv_req->request.length = CDNS3_DESCMIS_BUF_SIZE;
614         priv_ep->descmis_req = priv_req;
615
616         __cdns3_gadget_ep_queue(&priv_ep->endpoint,
617                                 &priv_ep->descmis_req->request,
618                                 GFP_ATOMIC);
619
620         return;
621
622 err:
623         dev_err(priv_ep->cdns3_dev->dev,
624                 "Failed: No sufficient memory for DESCMIS\n");
625 }
626
627 /**
628  * cdns3_gadget_giveback - call struct usb_request's ->complete callback
629  * @priv_ep: The endpoint to whom the request belongs to
630  * @priv_req: The request we're giving back
631  * @status: completion code for the request
632  *
633  * Must be called with controller's lock held and interrupts disabled. This
634  * function will unmap @req and call its ->complete() callback to notify upper
635  * layers that it has completed.
636  */
637 void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep,
638                            struct cdns3_request *priv_req,
639                            int status)
640 {
641         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
642         struct usb_request *request = &priv_req->request;
643
644         list_del_init(&request->list);
645
646         if (request->status == -EINPROGRESS)
647                 request->status = status;
648
649         usb_gadget_unmap_request(&priv_dev->gadget, request,
650                                  priv_ep->dir);
651
652         if ((priv_req->flags & REQUEST_UNALIGNED) &&
653             priv_ep->dir == USB_DIR_OUT && !request->status)
654                 memcpy(request->buf, priv_req->aligned_buf->buf,
655                        request->length);
656
657         priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED);
658         trace_cdns3_gadget_giveback(priv_req);
659
660         if (priv_dev->dev_ver < DEV_VER_V2) {
661                 request = cdns3_wa2_gadget_giveback(priv_dev, priv_ep,
662                                                     priv_req);
663                 if (!request)
664                         return;
665         }
666
667         if (request->complete) {
668                 spin_unlock(&priv_dev->lock);
669                 usb_gadget_giveback_request(&priv_ep->endpoint,
670                                             request);
671                 spin_lock(&priv_dev->lock);
672         }
673
674         if (request->buf == priv_dev->zlp_buf)
675                 cdns3_gadget_ep_free_request(&priv_ep->endpoint, request);
676 }
677
678 void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep)
679 {
680         /* Work around for stale data address in TRB*/
681         if (priv_ep->wa1_set) {
682                 trace_cdns3_wa1(priv_ep, "restore cycle bit");
683
684                 priv_ep->wa1_set = 0;
685                 priv_ep->wa1_trb_index = 0xFFFF;
686                 if (priv_ep->wa1_cycle_bit) {
687                         priv_ep->wa1_trb->control =
688                                 priv_ep->wa1_trb->control | 0x1;
689                 } else {
690                         priv_ep->wa1_trb->control =
691                                 priv_ep->wa1_trb->control & ~0x1;
692                 }
693         }
694 }
695
696 static void cdns3_free_aligned_request_buf(struct cdns3_device *priv_dev)
697 {
698         struct cdns3_aligned_buf *buf, *tmp;
699         unsigned long flags;
700
701         spin_lock_irqsave(&priv_dev->lock, flags);
702
703         list_for_each_entry_safe(buf, tmp, &priv_dev->aligned_buf_list, list) {
704                 if (!buf->in_use) {
705                         list_del(&buf->list);
706
707                         /*
708                          * Re-enable interrupts to free DMA capable memory.
709                          * Driver can't free this memory with disabled
710                          * interrupts.
711                          */
712                         spin_unlock_irqrestore(&priv_dev->lock, flags);
713                         dma_free_coherent(buf->buf);
714                         kfree(buf);
715                         spin_lock_irqsave(&priv_dev->lock, flags);
716                 }
717         }
718
719         spin_unlock_irqrestore(&priv_dev->lock, flags);
720 }
721
722 static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req)
723 {
724         struct cdns3_endpoint *priv_ep = priv_req->priv_ep;
725         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
726         struct cdns3_aligned_buf *buf;
727
728         /* check if buffer is aligned to 8. */
729         if (!((uintptr_t)priv_req->request.buf & 0x7))
730                 return 0;
731
732         buf = priv_req->aligned_buf;
733
734         if (!buf || priv_req->request.length > buf->size) {
735                 buf = kzalloc(sizeof(*buf), GFP_ATOMIC);
736                 if (!buf)
737                         return -ENOMEM;
738
739                 buf->size = priv_req->request.length;
740
741                 buf->buf = dma_alloc_coherent(buf->size,
742                                               (unsigned long *)&buf->dma);
743                 if (!buf->buf) {
744                         kfree(buf);
745                         return -ENOMEM;
746                 }
747
748                 if (priv_req->aligned_buf) {
749                         trace_cdns3_free_aligned_request(priv_req);
750                         priv_req->aligned_buf->in_use = 0;
751 #ifndef __UBOOT__
752                         queue_work(system_freezable_wq,
753                                    &priv_dev->aligned_buf_wq);
754 #else
755                         cdns3_free_aligned_request_buf(priv_dev);
756 #endif
757                 }
758
759                 buf->in_use = 1;
760                 priv_req->aligned_buf = buf;
761
762                 list_add_tail(&buf->list,
763                               &priv_dev->aligned_buf_list);
764         }
765
766         if (priv_ep->dir == USB_DIR_IN) {
767                 memcpy(buf->buf, priv_req->request.buf,
768                        priv_req->request.length);
769         }
770
771         priv_req->flags |= REQUEST_UNALIGNED;
772         trace_cdns3_prepare_aligned_request(priv_req);
773
774         return 0;
775 }
776
777 static int cdns3_wa1_update_guard(struct cdns3_endpoint *priv_ep,
778                                   struct cdns3_trb *trb)
779 {
780         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
781
782         if (!priv_ep->wa1_set) {
783                 u32 doorbell;
784
785                 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
786
787                 if (doorbell) {
788                         priv_ep->wa1_cycle_bit = priv_ep->pcs ? TRB_CYCLE : 0;
789                         priv_ep->wa1_set = 1;
790                         priv_ep->wa1_trb = trb;
791                         priv_ep->wa1_trb_index = priv_ep->enqueue;
792                         trace_cdns3_wa1(priv_ep, "set guard");
793                         return 0;
794                 }
795         }
796         return 1;
797 }
798
799 static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev,
800                                              struct cdns3_endpoint *priv_ep)
801 {
802         int dma_index;
803         u32 doorbell;
804
805         doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
806         dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
807
808         if (!doorbell || dma_index != priv_ep->wa1_trb_index)
809                 cdns3_wa1_restore_cycle_bit(priv_ep);
810 }
811
812 /**
813  * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
814  * @priv_ep: endpoint object
815  *
816  * Returns zero on success or negative value on failure
817  */
818 int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
819                           struct usb_request *request)
820 {
821         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
822         struct cdns3_request *priv_req;
823         struct cdns3_trb *trb;
824         dma_addr_t trb_dma;
825         u32 togle_pcs = 1;
826         int sg_iter = 0;
827         int num_trb = 1;
828         int address;
829         u32 control;
830         int pcs;
831
832         if (num_trb > priv_ep->free_trbs) {
833                 priv_ep->flags |= EP_RING_FULL;
834                 return -ENOBUFS;
835         }
836
837         priv_req = to_cdns3_request(request);
838         address = priv_ep->endpoint.desc->bEndpointAddress;
839
840         priv_ep->flags |= EP_PENDING_REQUEST;
841
842         /* must allocate buffer aligned to 8 */
843         if (priv_req->flags & REQUEST_UNALIGNED)
844                 trb_dma = priv_req->aligned_buf->dma;
845         else
846                 trb_dma = request->dma;
847
848         trb = priv_ep->trb_pool + priv_ep->enqueue;
849         priv_req->start_trb = priv_ep->enqueue;
850         priv_req->trb = trb;
851
852         cdns3_select_ep(priv_ep->cdns3_dev, address);
853
854         /* prepare ring */
855         if ((priv_ep->enqueue + num_trb)  >= (priv_ep->num_trbs - 1)) {
856                 struct cdns3_trb *link_trb;
857                 int doorbell, dma_index;
858                 u32 ch_bit = 0;
859
860                 doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
861                 dma_index = cdns3_get_dma_pos(priv_dev, priv_ep);
862
863                 /* Driver can't update LINK TRB if it is current processed. */
864                 if (doorbell && dma_index == priv_ep->num_trbs - 1) {
865                         priv_ep->flags |= EP_DEFERRED_DRDY;
866                         return -ENOBUFS;
867                 }
868
869                 /*updating C bt in  Link TRB before starting DMA*/
870                 link_trb = priv_ep->trb_pool + (priv_ep->num_trbs - 1);
871                 /*
872                  * For TRs size equal 2 enabling TRB_CHAIN for epXin causes
873                  * that DMA stuck at the LINK TRB.
874                  * On the other hand, removing TRB_CHAIN for longer TRs for
875                  * epXout cause that DMA stuck after handling LINK TRB.
876                  * To eliminate this strange behavioral driver set TRB_CHAIN
877                  * bit only for TR size > 2.
878                  */
879                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC ||
880                     TRBS_PER_SEGMENT > 2)
881                         ch_bit = TRB_CHAIN;
882
883                 link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) |
884                                     TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit;
885         }
886
887         if (priv_dev->dev_ver <= DEV_VER_V2)
888                 togle_pcs = cdns3_wa1_update_guard(priv_ep, trb);
889
890         /* set incorrect Cycle Bit for first trb*/
891         control = priv_ep->pcs ? 0 : TRB_CYCLE;
892
893         do {
894                 u32 length;
895                 u16 td_size = 0;
896
897                 /* fill TRB */
898                 control |= TRB_TYPE(TRB_NORMAL);
899                 trb->buffer = TRB_BUFFER(trb_dma);
900
901                 length = request->length;
902
903                 if (likely(priv_dev->dev_ver >= DEV_VER_V2))
904                         td_size = DIV_ROUND_UP(length,
905                                                priv_ep->endpoint.maxpacket);
906
907                 trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) |
908                                         TRB_LEN(length);
909                 if (priv_dev->gadget.speed == USB_SPEED_SUPER)
910                         trb->length |= TRB_TDL_SS_SIZE(td_size);
911                 else
912                         control |= TRB_TDL_HS_SIZE(td_size);
913
914                 pcs = priv_ep->pcs ? TRB_CYCLE : 0;
915
916                 /*
917                  * first trb should be prepared as last to avoid processing
918                  *  transfer to early
919                  */
920                 if (sg_iter != 0)
921                         control |= pcs;
922
923                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir) {
924                         control |= TRB_IOC | TRB_ISP;
925                 } else {
926                         /* for last element in TD or in SG list */
927                         if (sg_iter == (num_trb - 1) && sg_iter != 0)
928                                 control |= pcs | TRB_IOC | TRB_ISP;
929                 }
930
931                 if (sg_iter)
932                         trb->control = control;
933                 else
934                         priv_req->trb->control = control;
935
936                 control = 0;
937                 ++sg_iter;
938                 priv_req->end_trb = priv_ep->enqueue;
939                 cdns3_ep_inc_enq(priv_ep);
940                 trb = priv_ep->trb_pool + priv_ep->enqueue;
941         } while (sg_iter < num_trb);
942
943         trb = priv_req->trb;
944
945         priv_req->flags |= REQUEST_PENDING;
946
947         if (sg_iter == 1)
948                 trb->control |= TRB_IOC | TRB_ISP;
949
950         /*
951          * Memory barrier - cycle bit must be set before other filds in trb.
952          */
953         dmb();
954
955         /* give the TD to the consumer*/
956         if (togle_pcs)
957                 trb->control =  trb->control ^ 1;
958
959         if (priv_dev->dev_ver <= DEV_VER_V2)
960                 cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep);
961
962         trace_cdns3_prepare_trb(priv_ep, priv_req->trb);
963
964         /*
965          * Memory barrier - Cycle Bit must be set before trb->length  and
966          * trb->buffer fields.
967          */
968         dmb();
969
970         /*
971          * For DMULT mode we can set address to transfer ring only once after
972          * enabling endpoint.
973          */
974         if (priv_ep->flags & EP_UPDATE_EP_TRBADDR) {
975                 /*
976                  * Until SW is not ready to handle the OUT transfer the ISO OUT
977                  * Endpoint should be disabled (EP_CFG.ENABLE = 0).
978                  * EP_CFG_ENABLE must be set before updating ep_traddr.
979                  */
980                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir &&
981                     !(priv_ep->flags & EP_QUIRK_ISO_OUT_EN)) {
982                         priv_ep->flags |= EP_QUIRK_ISO_OUT_EN;
983                         cdns3_set_register_bit(&priv_dev->regs->ep_cfg,
984                                                EP_CFG_ENABLE);
985                 }
986
987                 writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma +
988                                         priv_req->start_trb * TRB_SIZE),
989                                         &priv_dev->regs->ep_traddr);
990
991                 priv_ep->flags &= ~EP_UPDATE_EP_TRBADDR;
992         }
993
994         if (!priv_ep->wa1_set && !(priv_ep->flags & EP_STALLED)) {
995                 trace_cdns3_ring(priv_ep);
996                 /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
997                 writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
998                 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
999                 trace_cdns3_doorbell_epx(priv_ep->name,
1000                                          readl(&priv_dev->regs->ep_traddr));
1001         }
1002
1003         /* WORKAROUND for transition to L0 */
1004         __cdns3_gadget_wakeup(priv_dev);
1005
1006         return 0;
1007 }
1008
1009 void cdns3_set_hw_configuration(struct cdns3_device *priv_dev)
1010 {
1011         struct cdns3_endpoint *priv_ep;
1012         struct usb_ep *ep;
1013         int val;
1014
1015         if (priv_dev->hw_configured_flag)
1016                 return;
1017
1018         writel(USB_CONF_CFGSET, &priv_dev->regs->usb_conf);
1019         writel(EP_CMD_ERDY | EP_CMD_REQ_CMPL, &priv_dev->regs->ep_cmd);
1020
1021         cdns3_set_register_bit(&priv_dev->regs->usb_conf,
1022                                USB_CONF_U1EN | USB_CONF_U2EN);
1023
1024         /* wait until configuration set */
1025         readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val,
1026                                   val & USB_STS_CFGSTS_MASK, 100);
1027
1028         priv_dev->hw_configured_flag = 1;
1029
1030         list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
1031                 priv_ep = ep_to_cdns3_ep(ep);
1032                 if (priv_ep->flags & EP_ENABLED)
1033                         cdns3_start_all_request(priv_dev, priv_ep);
1034         }
1035 }
1036
1037 /**
1038  * cdns3_request_handled - check whether request has been handled by DMA
1039  *
1040  * @priv_ep: extended endpoint object.
1041  * @priv_req: request object for checking
1042  *
1043  * Endpoint must be selected before invoking this function.
1044  *
1045  * Returns false if request has not been handled by DMA, else returns true.
1046  *
1047  * SR - start ring
1048  * ER -  end ring
1049  * DQ = priv_ep->dequeue - dequeue position
1050  * EQ = priv_ep->enqueue -  enqueue position
1051  * ST = priv_req->start_trb - index of first TRB in transfer ring
1052  * ET = priv_req->end_trb - index of last TRB in transfer ring
1053  * CI = current_index - index of processed TRB by DMA.
1054  *
1055  * As first step, function checks if cycle bit for priv_req->start_trb is
1056  * correct.
1057  *
1058  * some rules:
1059  * 1. priv_ep->dequeue never exceed current_index.
1060  * 2  priv_ep->enqueue never exceed priv_ep->dequeue
1061  * 3. exception: priv_ep->enqueue == priv_ep->dequeue
1062  *    and priv_ep->free_trbs is zero.
1063  *    This case indicate that TR is full.
1064  *
1065  * Then We can split recognition into two parts:
1066  * Case 1 - priv_ep->dequeue < current_index
1067  *      SR ... EQ ... DQ ... CI ... ER
1068  *      SR ... DQ ... CI ... EQ ... ER
1069  *
1070  *      Request has been handled by DMA if ST and ET is between DQ and CI.
1071  *
1072  * Case 2 - priv_ep->dequeue > current_index
1073  * This situation take place when CI go through the LINK TRB at the end of
1074  * transfer ring.
1075  *      SR ... CI ... EQ ... DQ ... ER
1076  *
1077  *      Request has been handled by DMA if ET is less then CI or
1078  *      ET is greater or equal DQ.
1079  */
1080 static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep,
1081                                   struct cdns3_request *priv_req)
1082 {
1083         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1084         struct cdns3_trb *trb = priv_req->trb;
1085         int current_index = 0;
1086         int handled = 0;
1087         int doorbell;
1088
1089         current_index = cdns3_get_dma_pos(priv_dev, priv_ep);
1090         doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY);
1091
1092         trb = &priv_ep->trb_pool[priv_req->start_trb];
1093
1094         if ((trb->control  & TRB_CYCLE) != priv_ep->ccs)
1095                 goto finish;
1096
1097         if (doorbell == 1 && current_index == priv_ep->dequeue)
1098                 goto finish;
1099
1100         /* The corner case for TRBS_PER_SEGMENT equal 2). */
1101         if (TRBS_PER_SEGMENT == 2 && priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
1102                 handled = 1;
1103                 goto finish;
1104         }
1105
1106         if (priv_ep->enqueue == priv_ep->dequeue &&
1107             priv_ep->free_trbs == 0) {
1108                 handled = 1;
1109         } else if (priv_ep->dequeue < current_index) {
1110                 if ((current_index == (priv_ep->num_trbs - 1)) &&
1111                     !priv_ep->dequeue)
1112                         goto finish;
1113
1114                 if (priv_req->end_trb >= priv_ep->dequeue &&
1115                     priv_req->end_trb < current_index)
1116                         handled = 1;
1117         } else if (priv_ep->dequeue  > current_index) {
1118                 if (priv_req->end_trb  < current_index ||
1119                     priv_req->end_trb >= priv_ep->dequeue)
1120                         handled = 1;
1121         }
1122
1123 finish:
1124         trace_cdns3_request_handled(priv_req, current_index, handled);
1125
1126         return handled;
1127 }
1128
1129 static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
1130                                      struct cdns3_endpoint *priv_ep)
1131 {
1132         struct cdns3_request *priv_req;
1133         struct usb_request *request;
1134         struct cdns3_trb *trb;
1135
1136         while (!list_empty(&priv_ep->pending_req_list)) {
1137                 request = cdns3_next_request(&priv_ep->pending_req_list);
1138                 priv_req = to_cdns3_request(request);
1139
1140                 /* Re-select endpoint. It could be changed by other CPU during
1141                  * handling usb_gadget_giveback_request.
1142                  */
1143 #ifndef __UBOOT__
1144                 cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1145 #else
1146                 cdns3_select_ep(priv_dev,
1147                                 priv_ep->endpoint.desc->bEndpointAddress);
1148 #endif
1149
1150                 if (!cdns3_request_handled(priv_ep, priv_req))
1151                         goto prepare_next_td;
1152
1153                 trb = priv_ep->trb_pool + priv_ep->dequeue;
1154                 trace_cdns3_complete_trb(priv_ep, trb);
1155
1156                 if (trb != priv_req->trb)
1157                         dev_warn(priv_dev->dev,
1158                                  "request_trb=0x%p, queue_trb=0x%p\n",
1159                                  priv_req->trb, trb);
1160
1161                 request->actual = TRB_LEN(le32_to_cpu(trb->length));
1162                 cdns3_move_deq_to_next_trb(priv_req);
1163                 cdns3_gadget_giveback(priv_ep, priv_req, 0);
1164
1165                 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC &&
1166                     TRBS_PER_SEGMENT == 2)
1167                         break;
1168         }
1169         priv_ep->flags &= ~EP_PENDING_REQUEST;
1170
1171 prepare_next_td:
1172         if (!(priv_ep->flags & EP_STALLED) &&
1173             !(priv_ep->flags & EP_STALL_PENDING))
1174                 cdns3_start_all_request(priv_dev, priv_ep);
1175 }
1176
1177 void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm)
1178 {
1179         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1180
1181         cdns3_wa1_restore_cycle_bit(priv_ep);
1182
1183         if (rearm) {
1184                 trace_cdns3_ring(priv_ep);
1185
1186                 /* Cycle Bit must be updated before arming DMA. */
1187                 dmb();
1188                 writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
1189
1190                 __cdns3_gadget_wakeup(priv_dev);
1191
1192                 trace_cdns3_doorbell_epx(priv_ep->name,
1193                                          readl(&priv_dev->regs->ep_traddr));
1194         }
1195 }
1196
1197 /**
1198  * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint
1199  * @priv_ep: endpoint object
1200  *
1201  * Returns 0
1202  */
1203 static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
1204 {
1205         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1206         u32 ep_sts_reg;
1207
1208 #ifndef __UBOOT__
1209         cdns3_select_ep(priv_dev, priv_ep->endpoint.address);
1210 #else
1211         cdns3_select_ep(priv_dev, priv_ep->endpoint.desc->bEndpointAddress);
1212 #endif
1213
1214         trace_cdns3_epx_irq(priv_dev, priv_ep);
1215
1216         ep_sts_reg = readl(&priv_dev->regs->ep_sts);
1217         writel(ep_sts_reg, &priv_dev->regs->ep_sts);
1218
1219         if (ep_sts_reg & EP_STS_TRBERR) {
1220                 if (priv_ep->flags & EP_STALL_PENDING &&
1221                     !(ep_sts_reg & EP_STS_DESCMIS &&
1222                     priv_dev->dev_ver < DEV_VER_V2)) {
1223                         cdns3_ep_stall_flush(priv_ep);
1224                 }
1225
1226                 /*
1227                  * For isochronous transfer driver completes request on
1228                  * IOC or on TRBERR. IOC appears only when device receive
1229                  * OUT data packet. If host disable stream or lost some packet
1230                  * then the only way to finish all queued transfer is to do it
1231                  * on TRBERR event.
1232                  */
1233                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC &&
1234                     !priv_ep->wa1_set) {
1235                         if (!priv_ep->dir) {
1236                                 u32 ep_cfg = readl(&priv_dev->regs->ep_cfg);
1237
1238                                 ep_cfg &= ~EP_CFG_ENABLE;
1239                                 writel(ep_cfg, &priv_dev->regs->ep_cfg);
1240                                 priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
1241                         }
1242                         cdns3_transfer_completed(priv_dev, priv_ep);
1243                 } else if (!(priv_ep->flags & EP_STALLED) &&
1244                           !(priv_ep->flags & EP_STALL_PENDING)) {
1245                         if (priv_ep->flags & EP_DEFERRED_DRDY) {
1246                                 priv_ep->flags &= ~EP_DEFERRED_DRDY;
1247                                 cdns3_start_all_request(priv_dev, priv_ep);
1248                         } else {
1249                                 cdns3_rearm_transfer(priv_ep,
1250                                                      priv_ep->wa1_set);
1251                         }
1252                 }
1253         }
1254
1255         if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) {
1256                 if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) {
1257                         if (ep_sts_reg & EP_STS_ISP)
1258                                 priv_ep->flags |= EP_QUIRK_END_TRANSFER;
1259                         else
1260                                 priv_ep->flags &= ~EP_QUIRK_END_TRANSFER;
1261                 }
1262
1263                 cdns3_transfer_completed(priv_dev, priv_ep);
1264         }
1265
1266         /*
1267          * WA2: this condition should only be meet when
1268          * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or
1269          * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN.
1270          * In other cases this interrupt will be disabled/
1271          */
1272         if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 &&
1273             !(priv_ep->flags & EP_STALLED))
1274                 cdns3_wa2_descmissing_packet(priv_ep);
1275
1276         return 0;
1277 }
1278
1279 static void cdns3_disconnect_gadget(struct cdns3_device *priv_dev)
1280 {
1281         if (priv_dev->gadget_driver && priv_dev->gadget_driver->disconnect) {
1282                 spin_unlock(&priv_dev->lock);
1283                 priv_dev->gadget_driver->disconnect(&priv_dev->gadget);
1284                 spin_lock(&priv_dev->lock);
1285         }
1286 }
1287
1288 /**
1289  * cdns3_check_usb_interrupt_proceed - Processes interrupt related to device
1290  * @priv_dev: extended gadget object
1291  * @usb_ists: bitmap representation of device's reported interrupts
1292  * (usb_ists register value)
1293  */
1294 static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev,
1295                                               u32 usb_ists)
1296 {
1297         int speed = 0;
1298
1299         trace_cdns3_usb_irq(priv_dev, usb_ists);
1300         if (usb_ists & USB_ISTS_L1ENTI) {
1301                 /*
1302                  * WORKAROUND: CDNS3 controller has issue with hardware resuming
1303                  * from L1. To fix it, if any DMA transfer is pending driver
1304                  * must starts driving resume signal immediately.
1305                  */
1306                 if (readl(&priv_dev->regs->drbl))
1307                         __cdns3_gadget_wakeup(priv_dev);
1308         }
1309
1310         /* Connection detected */
1311         if (usb_ists & (USB_ISTS_CON2I | USB_ISTS_CONI)) {
1312                 speed = cdns3_get_speed(priv_dev);
1313                 priv_dev->gadget.speed = speed;
1314                 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_POWERED);
1315                 cdns3_ep0_config(priv_dev);
1316         }
1317
1318         /* Disconnection detected */
1319         if (usb_ists & (USB_ISTS_DIS2I | USB_ISTS_DISI)) {
1320                 cdns3_disconnect_gadget(priv_dev);
1321                 priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
1322                 usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
1323                 cdns3_hw_reset_eps_config(priv_dev);
1324         }
1325
1326         if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) {
1327                 if (priv_dev->gadget_driver &&
1328                     priv_dev->gadget_driver->suspend) {
1329                         spin_unlock(&priv_dev->lock);
1330                         priv_dev->gadget_driver->suspend(&priv_dev->gadget);
1331                         spin_lock(&priv_dev->lock);
1332                 }
1333         }
1334
1335         if (usb_ists & (USB_ISTS_L2EXTI | USB_ISTS_U3EXTI)) {
1336                 if (priv_dev->gadget_driver &&
1337                     priv_dev->gadget_driver->resume) {
1338                         spin_unlock(&priv_dev->lock);
1339                         priv_dev->gadget_driver->resume(&priv_dev->gadget);
1340                         spin_lock(&priv_dev->lock);
1341                 }
1342         }
1343
1344         /* reset*/
1345         if (usb_ists & (USB_ISTS_UWRESI | USB_ISTS_UHRESI | USB_ISTS_U2RESI)) {
1346                 if (priv_dev->gadget_driver) {
1347                         spin_unlock(&priv_dev->lock);
1348                         usb_gadget_udc_reset(&priv_dev->gadget,
1349                                              priv_dev->gadget_driver);
1350                         spin_lock(&priv_dev->lock);
1351
1352                         /*read again to check the actual speed*/
1353                         speed = cdns3_get_speed(priv_dev);
1354                         priv_dev->gadget.speed = speed;
1355                         cdns3_hw_reset_eps_config(priv_dev);
1356                         cdns3_ep0_config(priv_dev);
1357                 }
1358         }
1359 }
1360
1361 /**
1362  * cdns3_device_irq_handler- interrupt handler for device part of controller
1363  *
1364  * @irq: irq number for cdns3 core device
1365  * @data: structure of cdns3
1366  *
1367  * Returns IRQ_HANDLED or IRQ_NONE
1368  */
1369 static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
1370 {
1371         struct cdns3_device *priv_dev;
1372         struct cdns3 *cdns = data;
1373         irqreturn_t ret = IRQ_NONE;
1374         u32 reg;
1375
1376         priv_dev = cdns->gadget_dev;
1377
1378         /* check USB device interrupt */
1379         reg = readl(&priv_dev->regs->usb_ists);
1380         if (reg) {
1381                 /* After masking interrupts the new interrupts won't be
1382                  * reported in usb_ists/ep_ists. In order to not lose some
1383                  * of them driver disables only detected interrupts.
1384                  * They will be enabled ASAP after clearing source of
1385                  * interrupt. This an unusual behavior only applies to
1386                  * usb_ists register.
1387                  */
1388                 reg = ~reg & readl(&priv_dev->regs->usb_ien);
1389                 /* mask deferred interrupt. */
1390                 writel(reg, &priv_dev->regs->usb_ien);
1391                 ret = IRQ_WAKE_THREAD;
1392         }
1393
1394         /* check endpoint interrupt */
1395         reg = readl(&priv_dev->regs->ep_ists);
1396         if (reg) {
1397                 writel(0, &priv_dev->regs->ep_ien);
1398                 ret = IRQ_WAKE_THREAD;
1399         }
1400
1401         return ret;
1402 }
1403
1404 /**
1405  * cdns3_device_thread_irq_handler- interrupt handler for device part
1406  * of controller
1407  *
1408  * @irq: irq number for cdns3 core device
1409  * @data: structure of cdns3
1410  *
1411  * Returns IRQ_HANDLED or IRQ_NONE
1412  */
1413 static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data)
1414 {
1415         struct cdns3_device *priv_dev;
1416         struct cdns3 *cdns = data;
1417         irqreturn_t ret = IRQ_NONE;
1418         unsigned long flags;
1419         int bit;
1420         u32 reg;
1421
1422         priv_dev = cdns->gadget_dev;
1423         spin_lock_irqsave(&priv_dev->lock, flags);
1424
1425         reg = readl(&priv_dev->regs->usb_ists);
1426         if (reg) {
1427                 writel(reg, &priv_dev->regs->usb_ists);
1428                 writel(USB_IEN_INIT, &priv_dev->regs->usb_ien);
1429                 cdns3_check_usb_interrupt_proceed(priv_dev, reg);
1430                 ret = IRQ_HANDLED;
1431         }
1432
1433         reg = readl(&priv_dev->regs->ep_ists);
1434
1435         /* handle default endpoint OUT */
1436         if (reg & EP_ISTS_EP_OUT0) {
1437                 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_OUT);
1438                 ret = IRQ_HANDLED;
1439         }
1440
1441         /* handle default endpoint IN */
1442         if (reg & EP_ISTS_EP_IN0) {
1443                 cdns3_check_ep0_interrupt_proceed(priv_dev, USB_DIR_IN);
1444                 ret = IRQ_HANDLED;
1445         }
1446
1447         /* check if interrupt from non default endpoint, if no exit */
1448         reg &= ~(EP_ISTS_EP_OUT0 | EP_ISTS_EP_IN0);
1449         if (!reg)
1450                 goto irqend;
1451
1452         for_each_set_bit(bit, (unsigned long *)&reg,
1453                          sizeof(u32) * BITS_PER_BYTE) {
1454                 cdns3_check_ep_interrupt_proceed(priv_dev->eps[bit]);
1455                 ret = IRQ_HANDLED;
1456         }
1457
1458 irqend:
1459         writel(~0, &priv_dev->regs->ep_ien);
1460         spin_unlock_irqrestore(&priv_dev->lock, flags);
1461
1462         return ret;
1463 }
1464
1465 /**
1466  * cdns3_ep_onchip_buffer_reserve - Try to reserve onchip buf for EP
1467  *
1468  * The real reservation will occur during write to EP_CFG register,
1469  * this function is used to check if the 'size' reservation is allowed.
1470  *
1471  * @priv_dev: extended gadget object
1472  * @size: the size (KB) for EP would like to allocate
1473  * @is_in: endpoint direction
1474  *
1475  * Return 0 if the required size can met or negative value on failure
1476  */
1477 static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev,
1478                                           int size, int is_in)
1479 {
1480         int remained;
1481
1482         /* 2KB are reserved for EP0*/
1483         remained = priv_dev->onchip_buffers - priv_dev->onchip_used_size - 2;
1484
1485         if (is_in) {
1486                 if (remained < size)
1487                         return -EPERM;
1488
1489                 priv_dev->onchip_used_size += size;
1490         } else {
1491                 int required;
1492
1493                 /**
1494                  *  ALL OUT EPs are shared the same chunk onchip memory, so
1495                  * driver checks if it already has assigned enough buffers
1496                  */
1497                 if (priv_dev->out_mem_is_allocated >= size)
1498                         return 0;
1499
1500                 required = size - priv_dev->out_mem_is_allocated;
1501
1502                 if (required > remained)
1503                         return -EPERM;
1504
1505                 priv_dev->out_mem_is_allocated += required;
1506                 priv_dev->onchip_used_size += required;
1507         }
1508
1509         return 0;
1510 }
1511
1512 void cdns3_configure_dmult(struct cdns3_device *priv_dev,
1513                            struct cdns3_endpoint *priv_ep)
1514 {
1515         struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
1516
1517         /* For dev_ver > DEV_VER_V2 DMULT is configured per endpoint */
1518         if (priv_dev->dev_ver <= DEV_VER_V2)
1519                 writel(USB_CONF_DMULT, &regs->usb_conf);
1520
1521         if (priv_dev->dev_ver == DEV_VER_V2)
1522                 writel(USB_CONF2_EN_TDL_TRB, &regs->usb_conf2);
1523
1524         if (priv_dev->dev_ver >= DEV_VER_V3 && priv_ep) {
1525                 u32 mask;
1526
1527                 if (priv_ep->dir)
1528                         mask = BIT(priv_ep->num + 16);
1529                 else
1530                         mask = BIT(priv_ep->num);
1531
1532                 if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
1533                         cdns3_set_register_bit(&regs->tdl_from_trb, mask);
1534                         cdns3_set_register_bit(&regs->tdl_beh, mask);
1535                         cdns3_set_register_bit(&regs->tdl_beh2, mask);
1536                         cdns3_set_register_bit(&regs->dma_adv_td, mask);
1537                 }
1538
1539                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && !priv_ep->dir)
1540                         cdns3_set_register_bit(&regs->tdl_from_trb, mask);
1541
1542                 cdns3_set_register_bit(&regs->dtrans, mask);
1543         }
1544 }
1545
1546 /**
1547  * cdns3_ep_config Configure hardware endpoint
1548  * @priv_ep: extended endpoint object
1549  */
1550 void cdns3_ep_config(struct cdns3_endpoint *priv_ep)
1551 {
1552         bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC);
1553         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1554         u32 bEndpointAddress = priv_ep->num | priv_ep->dir;
1555         u32 max_packet_size = 0;
1556         u8 maxburst = 0;
1557         u32 ep_cfg = 0;
1558         u8 buffering;
1559         u8 mult = 0;
1560         int ret;
1561
1562         buffering = CDNS3_EP_BUF_SIZE - 1;
1563
1564         cdns3_configure_dmult(priv_dev, priv_ep);
1565
1566         switch (priv_ep->type) {
1567         case USB_ENDPOINT_XFER_INT:
1568                 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
1569
1570                 if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
1571                     priv_dev->dev_ver > DEV_VER_V2)
1572                         ep_cfg |= EP_CFG_TDL_CHK;
1573                 break;
1574         case USB_ENDPOINT_XFER_BULK:
1575                 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
1576
1577                 if ((priv_dev->dev_ver == DEV_VER_V2  && !priv_ep->dir) ||
1578                     priv_dev->dev_ver > DEV_VER_V2)
1579                         ep_cfg |= EP_CFG_TDL_CHK;
1580                 break;
1581         default:
1582                 ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC);
1583                 mult = CDNS3_EP_ISO_HS_MULT - 1;
1584                 buffering = mult + 1;
1585         }
1586
1587         switch (priv_dev->gadget.speed) {
1588         case USB_SPEED_FULL:
1589                 max_packet_size = is_iso_ep ? 1023 : 64;
1590                 break;
1591         case USB_SPEED_HIGH:
1592                 max_packet_size = is_iso_ep ? 1024 : 512;
1593                 break;
1594         case USB_SPEED_SUPER:
1595                 /* It's limitation that driver assumes in driver. */
1596                 mult = 0;
1597                 max_packet_size = 1024;
1598                 if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
1599                         maxburst = CDNS3_EP_ISO_SS_BURST - 1;
1600                         buffering = (mult + 1) *
1601                                     (maxburst + 1);
1602
1603                         if (priv_ep->interval > 1)
1604                                 buffering++;
1605                 } else {
1606                         maxburst = CDNS3_EP_BUF_SIZE - 1;
1607                 }
1608                 break;
1609         default:
1610                 /* all other speed are not supported */
1611                 return;
1612         }
1613
1614         if (max_packet_size == 1024)
1615                 priv_ep->trb_burst_size = 128;
1616         else if (max_packet_size >= 512)
1617                 priv_ep->trb_burst_size = 64;
1618         else
1619                 priv_ep->trb_burst_size = 16;
1620
1621         ret = cdns3_ep_onchip_buffer_reserve(priv_dev, buffering + 1,
1622                                              !!priv_ep->dir);
1623         if (ret) {
1624                 dev_err(priv_dev->dev, "onchip mem is full, ep is invalid\n");
1625                 return;
1626         }
1627
1628         ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) |
1629                   EP_CFG_MULT(mult) |
1630                   EP_CFG_BUFFERING(buffering) |
1631                   EP_CFG_MAXBURST(maxburst);
1632
1633         cdns3_select_ep(priv_dev, bEndpointAddress);
1634         writel(ep_cfg, &priv_dev->regs->ep_cfg);
1635
1636         dev_dbg(priv_dev->dev, "Configure %s: with val %08x\n",
1637                 priv_ep->name, ep_cfg);
1638 }
1639
1640 /* Find correct direction for HW endpoint according to description */
1641 static int cdns3_ep_dir_is_correct(struct usb_endpoint_descriptor *desc,
1642                                    struct cdns3_endpoint *priv_ep)
1643 {
1644         return (priv_ep->endpoint.caps.dir_in && usb_endpoint_dir_in(desc)) ||
1645                (priv_ep->endpoint.caps.dir_out && usb_endpoint_dir_out(desc));
1646 }
1647
1648 static struct
1649 cdns3_endpoint *cdns3_find_available_ep(struct cdns3_device *priv_dev,
1650                                         struct usb_endpoint_descriptor *desc)
1651 {
1652         struct usb_ep *ep;
1653         struct cdns3_endpoint *priv_ep;
1654
1655         list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
1656                 unsigned long num;
1657                 /* ep name pattern likes epXin or epXout */
1658                 char c[2] = {ep->name[2], '\0'};
1659
1660                 num = simple_strtoul(c, NULL, 10);
1661
1662                 priv_ep = ep_to_cdns3_ep(ep);
1663                 if (cdns3_ep_dir_is_correct(desc, priv_ep)) {
1664                         if (!(priv_ep->flags & EP_CLAIMED)) {
1665                                 priv_ep->num  = num;
1666                                 return priv_ep;
1667                         }
1668                 }
1669         }
1670
1671         return ERR_PTR(-ENOENT);
1672 }
1673
1674 /*
1675  *  Cadence IP has one limitation that all endpoints must be configured
1676  * (Type & MaxPacketSize) before setting configuration through hardware
1677  * register, it means we can't change endpoints configuration after
1678  * set_configuration.
1679  *
1680  * This function set EP_CLAIMED flag which is added when the gadget driver
1681  * uses usb_ep_autoconfig to configure specific endpoint;
1682  * When the udc driver receives set_configurion request,
1683  * it goes through all claimed endpoints, and configure all endpoints
1684  * accordingly.
1685  *
1686  * At usb_ep_ops.enable/disable, we only enable and disable endpoint through
1687  * ep_cfg register which can be changed after set_configuration, and do
1688  * some software operation accordingly.
1689  */
1690 static struct
1691 usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget,
1692                               struct usb_endpoint_descriptor *desc,
1693                               struct usb_ss_ep_comp_descriptor *comp_desc)
1694 {
1695         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
1696         struct cdns3_endpoint *priv_ep;
1697         unsigned long flags;
1698
1699         priv_ep = cdns3_find_available_ep(priv_dev, desc);
1700         if (IS_ERR(priv_ep)) {
1701                 dev_err(priv_dev->dev, "no available ep\n");
1702                 return NULL;
1703         }
1704
1705         dev_dbg(priv_dev->dev, "match endpoint: %s\n", priv_ep->name);
1706
1707         spin_lock_irqsave(&priv_dev->lock, flags);
1708         priv_ep->endpoint.desc = desc;
1709         priv_ep->dir  = usb_endpoint_dir_in(desc) ? USB_DIR_IN : USB_DIR_OUT;
1710         priv_ep->type = usb_endpoint_type(desc);
1711         priv_ep->flags |= EP_CLAIMED;
1712         priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1713
1714         spin_unlock_irqrestore(&priv_dev->lock, flags);
1715         return &priv_ep->endpoint;
1716 }
1717
1718 /**
1719  * cdns3_gadget_ep_alloc_request Allocates request
1720  * @ep: endpoint object associated with request
1721  * @gfp_flags: gfp flags
1722  *
1723  * Returns allocated request address, NULL on allocation error
1724  */
1725 struct usb_request *cdns3_gadget_ep_alloc_request(struct usb_ep *ep,
1726                                                   gfp_t gfp_flags)
1727 {
1728         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
1729         struct cdns3_request *priv_req;
1730
1731         priv_req = kzalloc(sizeof(*priv_req), gfp_flags);
1732         if (!priv_req)
1733                 return NULL;
1734
1735         priv_req->priv_ep = priv_ep;
1736
1737         trace_cdns3_alloc_request(priv_req);
1738         return &priv_req->request;
1739 }
1740
1741 /**
1742  * cdns3_gadget_ep_free_request Free memory occupied by request
1743  * @ep: endpoint object associated with request
1744  * @request: request to free memory
1745  */
1746 void cdns3_gadget_ep_free_request(struct usb_ep *ep,
1747                                   struct usb_request *request)
1748 {
1749         struct cdns3_request *priv_req = to_cdns3_request(request);
1750
1751         if (priv_req->aligned_buf)
1752                 priv_req->aligned_buf->in_use = 0;
1753
1754         trace_cdns3_free_request(priv_req);
1755         kfree(priv_req);
1756 }
1757
1758 /**
1759  * cdns3_gadget_ep_enable Enable endpoint
1760  * @ep: endpoint object
1761  * @desc: endpoint descriptor
1762  *
1763  * Returns 0 on success, error code elsewhere
1764  */
1765 static int cdns3_gadget_ep_enable(struct usb_ep *ep,
1766                                   const struct usb_endpoint_descriptor *desc)
1767 {
1768         struct cdns3_endpoint *priv_ep;
1769         struct cdns3_device *priv_dev;
1770         u32 reg = EP_STS_EN_TRBERREN;
1771         u32 bEndpointAddress;
1772         unsigned long flags;
1773         int enable = 1;
1774         int ret;
1775         int val;
1776
1777         priv_ep = ep_to_cdns3_ep(ep);
1778         priv_dev = priv_ep->cdns3_dev;
1779
1780         if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
1781                 dev_dbg(priv_dev->dev, "usbss: invalid parameters\n");
1782                 return -EINVAL;
1783         }
1784
1785         if (!desc->wMaxPacketSize) {
1786                 dev_err(priv_dev->dev, "usbss: missing wMaxPacketSize\n");
1787                 return -EINVAL;
1788         }
1789
1790         if (WARN_ON(priv_ep->flags & EP_ENABLED))
1791                 return 0;
1792
1793         spin_lock_irqsave(&priv_dev->lock, flags);
1794
1795         priv_ep->endpoint.desc = desc;
1796         priv_ep->type = usb_endpoint_type(desc);
1797         priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
1798
1799         if (priv_ep->interval > ISO_MAX_INTERVAL &&
1800             priv_ep->type == USB_ENDPOINT_XFER_ISOC) {
1801                 dev_err(priv_dev->dev, "Driver is limited to %d period\n",
1802                         ISO_MAX_INTERVAL);
1803
1804                 ret =  -EINVAL;
1805                 goto exit;
1806         }
1807
1808         ret = cdns3_allocate_trb_pool(priv_ep);
1809
1810         if (ret)
1811                 goto exit;
1812
1813         bEndpointAddress = priv_ep->num | priv_ep->dir;
1814         cdns3_select_ep(priv_dev, bEndpointAddress);
1815
1816         trace_cdns3_gadget_ep_enable(priv_ep);
1817
1818         writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
1819
1820         ret = readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
1821                                         !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
1822                                         1000);
1823
1824         if (unlikely(ret)) {
1825                 cdns3_free_trb_pool(priv_ep);
1826                 ret =  -EINVAL;
1827                 goto exit;
1828         }
1829
1830         /* enable interrupt for selected endpoint */
1831         cdns3_set_register_bit(&priv_dev->regs->ep_ien,
1832                                BIT(cdns3_ep_addr_to_index(bEndpointAddress)));
1833
1834         if (priv_dev->dev_ver < DEV_VER_V2)
1835                 cdns3_wa2_enable_detection(priv_dev, priv_ep, reg);
1836
1837         writel(reg, &priv_dev->regs->ep_sts_en);
1838
1839         /*
1840          * For some versions of controller at some point during ISO OUT traffic
1841          * DMA reads Transfer Ring for the EP which has never got doorbell.
1842          * This issue was detected only on simulation, but to avoid this issue
1843          * driver add protection against it. To fix it driver enable ISO OUT
1844          * endpoint before setting DRBL. This special treatment of ISO OUT
1845          * endpoints are recommended by controller specification.
1846          */
1847         if (priv_ep->type == USB_ENDPOINT_XFER_ISOC  && !priv_ep->dir)
1848                 enable = 0;
1849
1850         if (enable)
1851                 cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_ENABLE);
1852
1853         ep->desc = desc;
1854         priv_ep->flags &= ~(EP_PENDING_REQUEST | EP_STALLED | EP_STALL_PENDING |
1855                             EP_QUIRK_ISO_OUT_EN | EP_QUIRK_EXTRA_BUF_EN);
1856         priv_ep->flags |= EP_ENABLED | EP_UPDATE_EP_TRBADDR;
1857         priv_ep->wa1_set = 0;
1858         priv_ep->enqueue = 0;
1859         priv_ep->dequeue = 0;
1860         reg = readl(&priv_dev->regs->ep_sts);
1861         priv_ep->pcs = !!EP_STS_CCS(reg);
1862         priv_ep->ccs = !!EP_STS_CCS(reg);
1863         /* one TRB is reserved for link TRB used in DMULT mode*/
1864         priv_ep->free_trbs = priv_ep->num_trbs - 1;
1865 exit:
1866         spin_unlock_irqrestore(&priv_dev->lock, flags);
1867
1868         return ret;
1869 }
1870
1871 /**
1872  * cdns3_gadget_ep_disable Disable endpoint
1873  * @ep: endpoint object
1874  *
1875  * Returns 0 on success, error code elsewhere
1876  */
1877 static int cdns3_gadget_ep_disable(struct usb_ep *ep)
1878 {
1879         struct cdns3_endpoint *priv_ep;
1880         struct cdns3_request *priv_req;
1881         struct cdns3_device *priv_dev;
1882         struct usb_request *request;
1883         unsigned long flags;
1884         int ret = 0;
1885         u32 ep_cfg;
1886         int val;
1887
1888         if (!ep) {
1889                 pr_err("usbss: invalid parameters\n");
1890                 return -EINVAL;
1891         }
1892
1893         priv_ep = ep_to_cdns3_ep(ep);
1894         priv_dev = priv_ep->cdns3_dev;
1895
1896         if (WARN_ON(!(priv_ep->flags & EP_ENABLED)))
1897                 return 0;
1898
1899         spin_lock_irqsave(&priv_dev->lock, flags);
1900
1901         trace_cdns3_gadget_ep_disable(priv_ep);
1902
1903         cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
1904
1905         ep_cfg = readl(&priv_dev->regs->ep_cfg);
1906         ep_cfg &= ~EP_CFG_ENABLE;
1907         writel(ep_cfg, &priv_dev->regs->ep_cfg);
1908
1909         /**
1910          * Driver needs some time before resetting endpoint.
1911          * It need waits for clearing DBUSY bit or for timeout expired.
1912          * 10us is enough time for controller to stop transfer.
1913          */
1914         readl_poll_timeout_atomic(&priv_dev->regs->ep_sts, val,
1915                                   !(val & EP_STS_DBUSY), 10);
1916         writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
1917
1918         readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
1919                                   !(val & (EP_CMD_CSTALL | EP_CMD_EPRST)),
1920                                   1000);
1921         if (unlikely(ret))
1922                 dev_err(priv_dev->dev, "Timeout: %s resetting failed.\n",
1923                         priv_ep->name);
1924
1925         while (!list_empty(&priv_ep->pending_req_list)) {
1926                 request = cdns3_next_request(&priv_ep->pending_req_list);
1927
1928                 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
1929                                       -ESHUTDOWN);
1930         }
1931
1932         while (!list_empty(&priv_ep->wa2_descmiss_req_list)) {
1933                 priv_req = cdns3_next_priv_request(&priv_ep->wa2_descmiss_req_list);
1934
1935                 kfree(priv_req->request.buf);
1936                 cdns3_gadget_ep_free_request(&priv_ep->endpoint,
1937                                              &priv_req->request);
1938                 list_del_init(&priv_req->list);
1939                 --priv_ep->wa2_counter;
1940         }
1941
1942         while (!list_empty(&priv_ep->deferred_req_list)) {
1943                 request = cdns3_next_request(&priv_ep->deferred_req_list);
1944
1945                 cdns3_gadget_giveback(priv_ep, to_cdns3_request(request),
1946                                       -ESHUTDOWN);
1947         }
1948
1949         priv_ep->descmis_req = NULL;
1950
1951         ep->desc = NULL;
1952         priv_ep->flags &= ~EP_ENABLED;
1953
1954         spin_unlock_irqrestore(&priv_dev->lock, flags);
1955
1956         return ret;
1957 }
1958
1959 /**
1960  * cdns3_gadget_ep_queue Transfer data on endpoint
1961  * @ep: endpoint object
1962  * @request: request object
1963  * @gfp_flags: gfp flags
1964  *
1965  * Returns 0 on success, error code elsewhere
1966  */
1967 static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
1968                                    struct usb_request *request,
1969                                    gfp_t gfp_flags)
1970 {
1971         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
1972         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
1973         struct cdns3_request *priv_req;
1974         int ret = 0;
1975
1976         request->actual = 0;
1977         request->status = -EINPROGRESS;
1978         priv_req = to_cdns3_request(request);
1979         trace_cdns3_ep_queue(priv_req);
1980
1981         if (priv_dev->dev_ver < DEV_VER_V2) {
1982                 ret = cdns3_wa2_gadget_ep_queue(priv_dev, priv_ep,
1983                                                 priv_req);
1984
1985                 if (ret == EINPROGRESS)
1986                         return 0;
1987         }
1988
1989         ret = cdns3_prepare_aligned_request_buf(priv_req);
1990         if (ret < 0)
1991                 return ret;
1992
1993         ret = usb_gadget_map_request(&priv_dev->gadget, request,
1994                                      usb_endpoint_dir_in(ep->desc));
1995         if (ret)
1996                 return ret;
1997
1998         list_add_tail(&request->list, &priv_ep->deferred_req_list);
1999
2000         /*
2001          * If hardware endpoint configuration has not been set yet then
2002          * just queue request in deferred list. Transfer will be started in
2003          * cdns3_set_hw_configuration.
2004          */
2005         if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) &&
2006             !(priv_ep->flags & EP_STALL_PENDING))
2007                 cdns3_start_all_request(priv_dev, priv_ep);
2008
2009         return 0;
2010 }
2011
2012 static int cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
2013                                  gfp_t gfp_flags)
2014 {
2015         struct usb_request *zlp_request;
2016         struct cdns3_endpoint *priv_ep;
2017         struct cdns3_device *priv_dev;
2018         unsigned long flags;
2019         int ret;
2020
2021         if (!request || !ep)
2022                 return -EINVAL;
2023
2024         priv_ep = ep_to_cdns3_ep(ep);
2025         priv_dev = priv_ep->cdns3_dev;
2026
2027         spin_lock_irqsave(&priv_dev->lock, flags);
2028
2029         ret = __cdns3_gadget_ep_queue(ep, request, gfp_flags);
2030
2031         if (ret == 0 && request->zero && request->length &&
2032             (request->length % ep->maxpacket == 0)) {
2033                 struct cdns3_request *priv_req;
2034
2035                 zlp_request = cdns3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
2036                 zlp_request->buf = priv_dev->zlp_buf;
2037                 zlp_request->length = 0;
2038
2039                 priv_req = to_cdns3_request(zlp_request);
2040                 priv_req->flags |= REQUEST_ZLP;
2041
2042                 dev_dbg(priv_dev->dev, "Queuing ZLP for endpoint: %s\n",
2043                         priv_ep->name);
2044                 ret = __cdns3_gadget_ep_queue(ep, zlp_request, gfp_flags);
2045         }
2046
2047         spin_unlock_irqrestore(&priv_dev->lock, flags);
2048         return ret;
2049 }
2050
2051 /**
2052  * cdns3_gadget_ep_dequeue Remove request from transfer queue
2053  * @ep: endpoint object associated with request
2054  * @request: request object
2055  *
2056  * Returns 0 on success, error code elsewhere
2057  */
2058 int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
2059                             struct usb_request *request)
2060 {
2061         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2062         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2063         struct usb_request *req, *req_temp;
2064         struct cdns3_request *priv_req;
2065         struct cdns3_trb *link_trb;
2066         unsigned long flags;
2067         int ret = 0;
2068
2069         if (!ep || !request || !ep->desc)
2070                 return -EINVAL;
2071
2072         spin_lock_irqsave(&priv_dev->lock, flags);
2073
2074         priv_req = to_cdns3_request(request);
2075
2076         trace_cdns3_ep_dequeue(priv_req);
2077
2078         cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2079
2080         list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
2081                                  list) {
2082                 if (request == req)
2083                         goto found;
2084         }
2085
2086         list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
2087                                  list) {
2088                 if (request == req)
2089                         goto found;
2090         }
2091
2092         goto not_found;
2093
2094 found:
2095
2096         if (priv_ep->wa1_trb == priv_req->trb)
2097                 cdns3_wa1_restore_cycle_bit(priv_ep);
2098
2099         link_trb = priv_req->trb;
2100         cdns3_move_deq_to_next_trb(priv_req);
2101         cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
2102
2103         /* Update ring */
2104         request = cdns3_next_request(&priv_ep->deferred_req_list);
2105         if (request) {
2106                 priv_req = to_cdns3_request(request);
2107
2108                 link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
2109                                               (priv_req->start_trb * TRB_SIZE));
2110                 link_trb->control = (link_trb->control & TRB_CYCLE) |
2111                                     TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
2112         } else {
2113                 priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
2114         }
2115
2116 not_found:
2117         spin_unlock_irqrestore(&priv_dev->lock, flags);
2118         return ret;
2119 }
2120
2121 /**
2122  * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint
2123  * Should be called after acquiring spin_lock and selecting ep
2124  * @ep: endpoint object to set stall on.
2125  */
2126 void __cdns3_gadget_ep_set_halt(struct cdns3_endpoint *priv_ep)
2127 {
2128         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2129
2130         trace_cdns3_halt(priv_ep, 1, 0);
2131
2132         if (!(priv_ep->flags & EP_STALLED)) {
2133                 u32 ep_sts_reg = readl(&priv_dev->regs->ep_sts);
2134
2135                 if (!(ep_sts_reg & EP_STS_DBUSY))
2136                         cdns3_ep_stall_flush(priv_ep);
2137                 else
2138                         priv_ep->flags |= EP_STALL_PENDING;
2139         }
2140 }
2141
2142 /**
2143  * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint
2144  * Should be called after acquiring spin_lock and selecting ep
2145  * @ep: endpoint object to clear stall on
2146  */
2147 int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
2148 {
2149         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2150         struct usb_request *request;
2151         int ret = 0;
2152         int val;
2153
2154         trace_cdns3_halt(priv_ep, 0, 0);
2155
2156         writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2157
2158         /* wait for EPRST cleared */
2159         readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2160                                   !(val & EP_CMD_EPRST), 100);
2161         if (ret)
2162                 return -EINVAL;
2163
2164         priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
2165
2166         request = cdns3_next_request(&priv_ep->pending_req_list);
2167
2168         if (request)
2169                 cdns3_rearm_transfer(priv_ep, 1);
2170
2171         cdns3_start_all_request(priv_dev, priv_ep);
2172         return ret;
2173 }
2174
2175 /**
2176  * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint
2177  * @ep: endpoint object to set/clear stall on
2178  * @value: 1 for set stall, 0 for clear stall
2179  *
2180  * Returns 0 on success, error code elsewhere
2181  */
2182 int cdns3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2183 {
2184         struct cdns3_endpoint *priv_ep = ep_to_cdns3_ep(ep);
2185         struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
2186         unsigned long flags;
2187         int ret = 0;
2188
2189         if (!(priv_ep->flags & EP_ENABLED))
2190                 return -EPERM;
2191
2192         spin_lock_irqsave(&priv_dev->lock, flags);
2193
2194         cdns3_select_ep(priv_dev, ep->desc->bEndpointAddress);
2195
2196         if (!value) {
2197                 priv_ep->flags &= ~EP_WEDGE;
2198                 ret = __cdns3_gadget_ep_clear_halt(priv_ep);
2199         } else {
2200                 __cdns3_gadget_ep_set_halt(priv_ep);
2201         }
2202
2203         spin_unlock_irqrestore(&priv_dev->lock, flags);
2204
2205         return ret;
2206 }
2207
2208 extern const struct usb_ep_ops cdns3_gadget_ep0_ops;
2209
2210 static const struct usb_ep_ops cdns3_gadget_ep_ops = {
2211         .enable = cdns3_gadget_ep_enable,
2212         .disable = cdns3_gadget_ep_disable,
2213         .alloc_request = cdns3_gadget_ep_alloc_request,
2214         .free_request = cdns3_gadget_ep_free_request,
2215         .queue = cdns3_gadget_ep_queue,
2216         .dequeue = cdns3_gadget_ep_dequeue,
2217         .set_halt = cdns3_gadget_ep_set_halt,
2218         .set_wedge = cdns3_gadget_ep_set_wedge,
2219 };
2220
2221 /**
2222  * cdns3_gadget_get_frame Returns number of actual ITP frame
2223  * @gadget: gadget object
2224  *
2225  * Returns number of actual ITP frame
2226  */
2227 static int cdns3_gadget_get_frame(struct usb_gadget *gadget)
2228 {
2229         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2230
2231         return readl(&priv_dev->regs->usb_itpn);
2232 }
2233
2234 int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev)
2235 {
2236         enum usb_device_speed speed;
2237
2238         speed = cdns3_get_speed(priv_dev);
2239
2240         if (speed >= USB_SPEED_SUPER)
2241                 return 0;
2242
2243         /* Start driving resume signaling to indicate remote wakeup. */
2244         writel(USB_CONF_LGO_L0, &priv_dev->regs->usb_conf);
2245
2246         return 0;
2247 }
2248
2249 static int cdns3_gadget_wakeup(struct usb_gadget *gadget)
2250 {
2251         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2252         unsigned long flags;
2253         int ret = 0;
2254
2255         spin_lock_irqsave(&priv_dev->lock, flags);
2256         ret = __cdns3_gadget_wakeup(priv_dev);
2257         spin_unlock_irqrestore(&priv_dev->lock, flags);
2258         return ret;
2259 }
2260
2261 static int cdns3_gadget_set_selfpowered(struct usb_gadget *gadget,
2262                                         int is_selfpowered)
2263 {
2264         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2265         unsigned long flags;
2266
2267         spin_lock_irqsave(&priv_dev->lock, flags);
2268         priv_dev->is_selfpowered = !!is_selfpowered;
2269         spin_unlock_irqrestore(&priv_dev->lock, flags);
2270         return 0;
2271 }
2272
2273 static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on)
2274 {
2275         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2276
2277         if (is_on)
2278                 writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf);
2279         else
2280                 writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2281
2282         return 0;
2283 }
2284
2285 static void cdns3_gadget_config(struct cdns3_device *priv_dev)
2286 {
2287         struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
2288         u32 reg;
2289
2290         cdns3_ep0_config(priv_dev);
2291
2292         /* enable interrupts for endpoint 0 (in and out) */
2293         writel(EP_IEN_EP_OUT0 | EP_IEN_EP_IN0, &regs->ep_ien);
2294
2295         /*
2296          * Driver needs to modify LFPS minimal U1 Exit time for DEV_VER_TI_V1
2297          * revision of controller.
2298          */
2299         if (priv_dev->dev_ver == DEV_VER_TI_V1) {
2300                 reg = readl(&regs->dbg_link1);
2301
2302                 reg &= ~DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_MASK;
2303                 reg |= DBG_LINK1_LFPS_MIN_GEN_U1_EXIT(0x55) |
2304                        DBG_LINK1_LFPS_MIN_GEN_U1_EXIT_SET;
2305                 writel(reg, &regs->dbg_link1);
2306         }
2307
2308         /*
2309          * By default some platforms has set protected access to memory.
2310          * This cause problem with cache, so driver restore non-secure
2311          * access to memory.
2312          */
2313         reg = readl(&regs->dma_axi_ctrl);
2314         reg |= DMA_AXI_CTRL_MARPROT(DMA_AXI_CTRL_NON_SECURE) |
2315                DMA_AXI_CTRL_MAWPROT(DMA_AXI_CTRL_NON_SECURE);
2316         writel(reg, &regs->dma_axi_ctrl);
2317
2318         /* enable generic interrupt*/
2319         writel(USB_IEN_INIT, &regs->usb_ien);
2320         writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
2321
2322         cdns3_configure_dmult(priv_dev, NULL);
2323
2324         cdns3_gadget_pullup(&priv_dev->gadget, 1);
2325 }
2326
2327 /**
2328  * cdns3_gadget_udc_start Gadget start
2329  * @gadget: gadget object
2330  * @driver: driver which operates on this gadget
2331  *
2332  * Returns 0 on success, error code elsewhere
2333  */
2334 static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
2335                                   struct usb_gadget_driver *driver)
2336 {
2337         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2338         unsigned long flags;
2339
2340         spin_lock_irqsave(&priv_dev->lock, flags);
2341         priv_dev->gadget_driver = driver;
2342         cdns3_gadget_config(priv_dev);
2343         spin_unlock_irqrestore(&priv_dev->lock, flags);
2344         return 0;
2345 }
2346
2347 /**
2348  * cdns3_gadget_udc_stop Stops gadget
2349  * @gadget: gadget object
2350  *
2351  * Returns 0
2352  */
2353 static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
2354 {
2355         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2356         struct cdns3_endpoint *priv_ep;
2357         u32 bEndpointAddress;
2358         struct usb_ep *ep;
2359         int ret = 0;
2360         int val;
2361
2362         priv_dev->gadget_driver = NULL;
2363
2364         priv_dev->onchip_used_size = 0;
2365         priv_dev->out_mem_is_allocated = 0;
2366         priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2367
2368         list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) {
2369                 priv_ep = ep_to_cdns3_ep(ep);
2370                 bEndpointAddress = priv_ep->num | priv_ep->dir;
2371                 cdns3_select_ep(priv_dev, bEndpointAddress);
2372                 writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
2373                 readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
2374                                           !(val & EP_CMD_EPRST), 100);
2375         }
2376
2377         /* disable interrupt for device */
2378         writel(0, &priv_dev->regs->usb_ien);
2379         writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf);
2380
2381         return ret;
2382 }
2383
2384 static void cdns3_gadget_udc_set_speed(struct usb_gadget *gadget,
2385                                        enum usb_device_speed speed)
2386 {
2387         struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
2388
2389         switch (speed) {
2390         case USB_SPEED_FULL:
2391                 writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
2392                 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2393                 break;
2394         case USB_SPEED_HIGH:
2395                 writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
2396                 break;
2397         case USB_SPEED_SUPER:
2398                 break;
2399         default:
2400                 dev_err(cdns->dev, "invalid speed parameter %d\n",
2401                         speed);
2402         }
2403
2404         priv_dev->gadget.speed = speed;
2405 }
2406
2407 static const struct usb_gadget_ops cdns3_gadget_ops = {
2408         .get_frame = cdns3_gadget_get_frame,
2409         .wakeup = cdns3_gadget_wakeup,
2410         .set_selfpowered = cdns3_gadget_set_selfpowered,
2411         .pullup = cdns3_gadget_pullup,
2412         .udc_start = cdns3_gadget_udc_start,
2413         .udc_stop = cdns3_gadget_udc_stop,
2414         .match_ep = cdns3_gadget_match_ep,
2415         .udc_set_speed = cdns3_gadget_udc_set_speed,
2416 };
2417
2418 static void cdns3_free_all_eps(struct cdns3_device *priv_dev)
2419 {
2420         int i;
2421
2422         /* ep0 OUT point to ep0 IN. */
2423         priv_dev->eps[16] = NULL;
2424
2425         for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++)
2426                 if (priv_dev->eps[i]) {
2427                         cdns3_free_trb_pool(priv_dev->eps[i]);
2428                         devm_kfree(priv_dev->dev, priv_dev->eps[i]);
2429                 }
2430 }
2431
2432 /**
2433  * cdns3_init_eps Initializes software endpoints of gadget
2434  * @cdns3: extended gadget object
2435  *
2436  * Returns 0 on success, error code elsewhere
2437  */
2438 static int cdns3_init_eps(struct cdns3_device *priv_dev)
2439 {
2440         u32 ep_enabled_reg, iso_ep_reg;
2441         struct cdns3_endpoint *priv_ep;
2442         int ep_dir, ep_number;
2443         u32 ep_mask;
2444         int ret = 0;
2445         int i;
2446
2447         /* Read it from USB_CAP3 to USB_CAP5 */
2448         ep_enabled_reg = readl(&priv_dev->regs->usb_cap3);
2449         iso_ep_reg = readl(&priv_dev->regs->usb_cap4);
2450
2451         dev_dbg(priv_dev->dev, "Initializing non-zero endpoints\n");
2452
2453         for (i = 0; i < CDNS3_ENDPOINTS_MAX_COUNT; i++) {
2454                 ep_dir = i >> 4;        /* i div 16 */
2455                 ep_number = i & 0xF;    /* i % 16 */
2456                 ep_mask = BIT(i);
2457
2458                 if (!(ep_enabled_reg & ep_mask))
2459                         continue;
2460
2461                 if (ep_dir && !ep_number) {
2462                         priv_dev->eps[i] = priv_dev->eps[0];
2463                         continue;
2464                 }
2465
2466                 priv_ep = devm_kzalloc(priv_dev->dev, sizeof(*priv_ep),
2467                                        GFP_KERNEL);
2468                 if (!priv_ep) {
2469                         ret = -ENOMEM;
2470                         goto err;
2471                 }
2472
2473                 /* set parent of endpoint object */
2474                 priv_ep->cdns3_dev = priv_dev;
2475                 priv_dev->eps[i] = priv_ep;
2476                 priv_ep->num = ep_number;
2477                 priv_ep->dir = ep_dir ? USB_DIR_IN : USB_DIR_OUT;
2478
2479                 if (!ep_number) {
2480                         ret = cdns3_init_ep0(priv_dev, priv_ep);
2481                         if (ret) {
2482                                 dev_err(priv_dev->dev, "Failed to init ep0\n");
2483                                 goto err;
2484                         }
2485                 } else {
2486                         snprintf(priv_ep->name, sizeof(priv_ep->name), "ep%d%s",
2487                                  ep_number, !!ep_dir ? "in" : "out");
2488                         priv_ep->endpoint.name = priv_ep->name;
2489
2490                         usb_ep_set_maxpacket_limit(&priv_ep->endpoint,
2491                                                    CDNS3_EP_MAX_PACKET_LIMIT);
2492                         priv_ep->endpoint.max_streams = CDNS3_EP_MAX_STREAMS;
2493                         priv_ep->endpoint.ops = &cdns3_gadget_ep_ops;
2494                         if (ep_dir)
2495                                 priv_ep->endpoint.caps.dir_in = 1;
2496                         else
2497                                 priv_ep->endpoint.caps.dir_out = 1;
2498
2499                         if (iso_ep_reg & ep_mask)
2500                                 priv_ep->endpoint.caps.type_iso = 1;
2501
2502                         priv_ep->endpoint.caps.type_bulk = 1;
2503                         priv_ep->endpoint.caps.type_int = 1;
2504
2505                         list_add_tail(&priv_ep->endpoint.ep_list,
2506                                       &priv_dev->gadget.ep_list);
2507                 }
2508
2509                 priv_ep->flags = 0;
2510
2511                 dev_info(priv_dev->dev, "Initialized  %s support: %s %s\n",
2512                          priv_ep->name,
2513                          priv_ep->endpoint.caps.type_bulk ? "BULK, INT" : "",
2514                          priv_ep->endpoint.caps.type_iso ? "ISO" : "");
2515
2516                 INIT_LIST_HEAD(&priv_ep->pending_req_list);
2517                 INIT_LIST_HEAD(&priv_ep->deferred_req_list);
2518                 INIT_LIST_HEAD(&priv_ep->wa2_descmiss_req_list);
2519         }
2520
2521         return 0;
2522 err:
2523         cdns3_free_all_eps(priv_dev);
2524         return -ENOMEM;
2525 }
2526
2527 void cdns3_gadget_exit(struct cdns3 *cdns)
2528 {
2529         struct cdns3_device *priv_dev;
2530
2531         priv_dev = cdns->gadget_dev;
2532
2533         usb_del_gadget_udc(&priv_dev->gadget);
2534
2535         cdns3_free_all_eps(priv_dev);
2536
2537         while (!list_empty(&priv_dev->aligned_buf_list)) {
2538                 struct cdns3_aligned_buf *buf;
2539
2540                 buf = cdns3_next_align_buf(&priv_dev->aligned_buf_list);
2541                 dma_free_coherent(buf->buf);
2542
2543                 list_del(&buf->list);
2544                 kfree(buf);
2545         }
2546
2547         dma_free_coherent(priv_dev->setup_buf);
2548
2549         kfree(priv_dev->zlp_buf);
2550         kfree(priv_dev);
2551         cdns->gadget_dev = NULL;
2552         cdns3_drd_switch_gadget(cdns, 0);
2553 }
2554
2555 static int cdns3_gadget_start(struct cdns3 *cdns)
2556 {
2557         struct cdns3_device *priv_dev;
2558         u32 max_speed;
2559         int ret;
2560
2561         priv_dev = kzalloc(sizeof(*priv_dev), GFP_KERNEL);
2562         if (!priv_dev)
2563                 return -ENOMEM;
2564
2565         cdns->gadget_dev = priv_dev;
2566         priv_dev->sysdev = cdns->dev;
2567         priv_dev->dev = cdns->dev;
2568         priv_dev->regs = cdns->dev_regs;
2569
2570         dev_read_u32(priv_dev->dev, "cdns,on-chip-buff-size",
2571                      &priv_dev->onchip_buffers);
2572
2573         if (priv_dev->onchip_buffers <=  0) {
2574                 u32 reg = readl(&priv_dev->regs->usb_cap2);
2575
2576                 priv_dev->onchip_buffers = USB_CAP2_ACTUAL_MEM_SIZE(reg);
2577         }
2578
2579         if (!priv_dev->onchip_buffers)
2580                 priv_dev->onchip_buffers = 256;
2581
2582         max_speed = usb_get_maximum_speed(dev_ofnode(cdns->dev));
2583
2584         /* Check the maximum_speed parameter */
2585         switch (max_speed) {
2586         case USB_SPEED_FULL:
2587                 /* fall through */
2588         case USB_SPEED_HIGH:
2589                 /* fall through */
2590         case USB_SPEED_SUPER:
2591                 break;
2592         default:
2593                 dev_err(cdns->dev, "invalid maximum_speed parameter %d\n",
2594                         max_speed);
2595                 /* fall through */
2596         case USB_SPEED_UNKNOWN:
2597                 /* default to superspeed */
2598                 max_speed = USB_SPEED_SUPER;
2599                 break;
2600         }
2601
2602         /* fill gadget fields */
2603         priv_dev->gadget.max_speed = max_speed;
2604         priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2605         priv_dev->gadget.ops = &cdns3_gadget_ops;
2606         priv_dev->gadget.name = "cdns3-gadget";
2607 #ifndef __UBOOT__
2608         priv_dev->gadget.name = "usb-ss-gadget";
2609         priv_dev->gadget.sg_supported = 1;
2610         priv_dev->gadget.quirk_avoids_skb_reserve = 1;
2611 #endif
2612
2613         spin_lock_init(&priv_dev->lock);
2614         INIT_WORK(&priv_dev->pending_status_wq,
2615                   cdns3_pending_setup_status_handler);
2616
2617         /* initialize endpoint container */
2618         INIT_LIST_HEAD(&priv_dev->gadget.ep_list);
2619         INIT_LIST_HEAD(&priv_dev->aligned_buf_list);
2620
2621         ret = cdns3_init_eps(priv_dev);
2622         if (ret) {
2623                 dev_err(priv_dev->dev, "Failed to create endpoints\n");
2624                 goto err1;
2625         }
2626
2627         /* allocate memory for setup packet buffer */
2628         priv_dev->setup_buf =
2629                 dma_alloc_coherent(8, (unsigned long *)&priv_dev->setup_dma);
2630         if (!priv_dev->setup_buf) {
2631                 ret = -ENOMEM;
2632                 goto err2;
2633         }
2634
2635         priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6);
2636
2637         dev_dbg(priv_dev->dev, "Device Controller version: %08x\n",
2638                 readl(&priv_dev->regs->usb_cap6));
2639         dev_dbg(priv_dev->dev, "USB Capabilities:: %08x\n",
2640                 readl(&priv_dev->regs->usb_cap1));
2641         dev_dbg(priv_dev->dev, "On-Chip memory cnfiguration: %08x\n",
2642                 readl(&priv_dev->regs->usb_cap2));
2643
2644         priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver);
2645
2646         priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL);
2647         if (!priv_dev->zlp_buf) {
2648                 ret = -ENOMEM;
2649                 goto err3;
2650         }
2651
2652         /* add USB gadget device */
2653         ret = usb_add_gadget_udc((struct device *)priv_dev->dev,
2654                                  &priv_dev->gadget);
2655         if (ret < 0) {
2656                 dev_err(priv_dev->dev,
2657                         "Failed to register USB device controller\n");
2658                 goto err4;
2659         }
2660
2661         return 0;
2662 err4:
2663         kfree(priv_dev->zlp_buf);
2664 err3:
2665         dma_free_coherent(priv_dev->setup_buf);
2666 err2:
2667         cdns3_free_all_eps(priv_dev);
2668 err1:
2669         cdns->gadget_dev = NULL;
2670         return ret;
2671 }
2672
2673 static int __cdns3_gadget_init(struct cdns3 *cdns)
2674 {
2675         int ret = 0;
2676
2677         cdns3_drd_switch_gadget(cdns, 1);
2678
2679         ret = cdns3_gadget_start(cdns);
2680         if (ret)
2681                 return ret;
2682
2683         return 0;
2684 }
2685
2686 static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
2687 {
2688         struct cdns3_device *priv_dev = cdns->gadget_dev;
2689
2690         cdns3_disconnect_gadget(priv_dev);
2691
2692         priv_dev->gadget.speed = USB_SPEED_UNKNOWN;
2693         usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED);
2694         cdns3_hw_reset_eps_config(priv_dev);
2695
2696         /* disable interrupt for device */
2697         writel(0, &priv_dev->regs->usb_ien);
2698
2699         cdns3_gadget_pullup(&priv_dev->gadget, 0);
2700
2701         return 0;
2702 }
2703
2704 static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
2705 {
2706         struct cdns3_device *priv_dev = cdns->gadget_dev;
2707
2708         if (!priv_dev->gadget_driver)
2709                 return 0;
2710
2711         cdns3_gadget_config(priv_dev);
2712
2713         return 0;
2714 }
2715
2716 /**
2717  * cdns3_gadget_init - initialize device structure
2718  *
2719  * cdns: cdns3 instance
2720  *
2721  * This function initializes the gadget.
2722  */
2723 int cdns3_gadget_init(struct cdns3 *cdns)
2724 {
2725         struct cdns3_role_driver *rdrv;
2726
2727         rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
2728         if (!rdrv)
2729                 return -ENOMEM;
2730
2731         rdrv->start     = __cdns3_gadget_init;
2732         rdrv->stop      = cdns3_gadget_exit;
2733         rdrv->suspend   = cdns3_gadget_suspend;
2734         rdrv->resume    = cdns3_gadget_resume;
2735         rdrv->state     = CDNS3_ROLE_STATE_INACTIVE;
2736         rdrv->name      = "gadget";
2737         cdns->roles[USB_ROLE_DEVICE] = rdrv;
2738
2739         return 0;
2740 }
2741
2742 /**
2743  * cdns3_gadget_uboot_handle_interrupt - handle cdns3 gadget interrupt
2744  * @cdns: pointer to struct cdns3
2745  *
2746  * Handles ep0 and gadget interrupt
2747  */
2748 static void cdns3_gadget_uboot_handle_interrupt(struct cdns3 *cdns)
2749 {
2750         int ret = cdns3_device_irq_handler(0, cdns);
2751
2752         if (ret == IRQ_WAKE_THREAD)
2753                 cdns3_device_thread_irq_handler(0, cdns);
2754 }
2755
2756 int dm_usb_gadget_handle_interrupts(struct udevice *dev)
2757 {
2758         struct cdns3 *cdns = dev_get_priv(dev);
2759
2760         cdns3_gadget_uboot_handle_interrupt(cdns);
2761
2762         return 0;
2763 }