Merge tag 'v5.15.57' into rpi-5.15.y
[platform/kernel/linux-rpi.git] / drivers / usb / host / dwc_otg / dwc_otg_pcd.c
1 /* ==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
3  * $Revision: #101 $
4  * $Date: 2012/08/10 $
5  * $Change: 2047372 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_HOST_ONLY
34
35 /** @file
36  * This file implements PCD Core. All code in this file is portable and doesn't
37  * use any OS specific functions.
38  * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39  * header file, which can be used to implement OS specific PCD interface.
40  *
41  * An important function of the PCD is managing interrupts generated
42  * by the DWC_otg controller. The implementation of the DWC_otg device
43  * mode interrupt service routines is in dwc_otg_pcd_intr.c.
44  *
45  * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46  * @todo Does it work when the request size is greater than DEPTSIZ
47  * transfer size
48  *
49  */
50
51 #include "dwc_otg_pcd.h"
52
53 #ifdef DWC_UTE_CFI
54 #include "dwc_otg_cfi.h"
55
56 extern int init_cfi(cfiobject_t * cfiobj);
57 #endif
58
59 /**
60  * Choose endpoint from ep arrays using usb_ep structure.
61  */
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
63 {
64         int i;
65         if (pcd->ep0.priv == handle) {
66                 return &pcd->ep0;
67         }
68         for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69                 if (pcd->in_ep[i].priv == handle)
70                         return &pcd->in_ep[i];
71                 if (pcd->out_ep[i].priv == handle)
72                         return &pcd->out_ep[i];
73         }
74
75         return NULL;
76 }
77
78 /**
79  * This function completes a request.  It call's the request call back.
80  */
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
82                           int32_t status)
83 {
84         unsigned stopped = ep->stopped;
85
86         DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87         DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
88
89         /* don't modify queue heads during completion callback */
90         ep->stopped = 1;
91         /* spin_unlock/spin_lock now done in fops->complete() */
92         ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
93                                 req->actual);
94
95         if (ep->pcd->request_pending > 0) {
96                 --ep->pcd->request_pending;
97         }
98
99         ep->stopped = stopped;
100         DWC_FREE(req);
101 }
102
103 /**
104  * This function terminates all the requsts in the EP request queue.
105  */
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
107 {
108         dwc_otg_pcd_request_t *req;
109
110         ep->stopped = 1;
111
112         /* called with irqs blocked?? */
113         while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115                 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
116         }
117 }
118
119 void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
120                        const struct dwc_otg_pcd_function_ops *fops)
121 {
122         pcd->fops = fops;
123 }
124
125 /**
126  * PCD Callback function for initializing the PCD when switching to
127  * device mode.
128  *
129  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
130  */
131 static int32_t dwc_otg_pcd_start_cb(void *p)
132 {
133         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
135
136         /*
137          * Initialized the Core for Device mode.
138          */
139         if (dwc_otg_is_device_mode(core_if)) {
140                 dwc_otg_core_dev_init(core_if);
141                 /* Set core_if's lock pointer to the pcd->lock */
142                 core_if->lock = pcd->lock;
143         }
144         return 1;
145 }
146
147 /** CFI-specific buffer allocation function for EP */
148 #ifdef DWC_UTE_CFI
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
150                               size_t buflen, int flags)
151 {
152         dwc_otg_pcd_ep_t *ep;
153         ep = get_ep_from_handle(pcd, pep);
154         if (!ep) {
155                 DWC_WARN("bad ep\n");
156                 return -DWC_E_INVALID;
157         }
158
159         return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
160                                           flags);
161 }
162 #else
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
164                               size_t buflen, int flags);
165 #endif
166
167 /**
168  * PCD Callback function for notifying the PCD when resuming from
169  * suspend.
170  *
171  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
172  */
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
174 {
175         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
176
177         if (pcd->fops->resume) {
178                 pcd->fops->resume(pcd);
179         }
180
181         /* Stop the SRP timeout timer. */
182         if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183             || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184                 if (GET_CORE_IF(pcd)->srp_timer_started) {
185                         GET_CORE_IF(pcd)->srp_timer_started = 0;
186                         DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
187                 }
188         }
189         return 1;
190 }
191
192 /**
193  * PCD Callback function for notifying the PCD device is suspended.
194  *
195  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
196  */
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
198 {
199         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
200
201         if (pcd->fops->suspend) {
202                 DWC_SPINUNLOCK(pcd->lock);
203                 pcd->fops->suspend(pcd);
204                 DWC_SPINLOCK(pcd->lock);
205         }
206
207         return 1;
208 }
209
210 /**
211  * PCD Callback function for stopping the PCD when switching to Host
212  * mode.
213  *
214  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
215  */
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
217 {
218         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219         extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
220
221         dwc_otg_pcd_stop(pcd);
222         return 1;
223 }
224
225 /**
226  * PCD Callback structure for handling mode switching.
227  */
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229         .start = dwc_otg_pcd_start_cb,
230         .stop = dwc_otg_pcd_stop_cb,
231         .suspend = dwc_otg_pcd_suspend_cb,
232         .resume_wakeup = dwc_otg_pcd_resume_cb,
233         .p = 0,                 /* Set at registration */
234 };
235
236 /**
237  * This function allocates a DMA Descriptor chain for the Endpoint
238  * buffer to be used for a transfer to/from the specified endpoint.
239  */
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(struct device *dev,
241                                                     dwc_dma_t * dma_desc_addr,
242                                                     uint32_t count)
243 {
244         return DWC_DMA_ALLOC_ATOMIC(dev, count * sizeof(dwc_otg_dev_dma_desc_t),
245                                                         dma_desc_addr);
246 }
247
248 /**
249  * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
250  */
251 void dwc_otg_ep_free_desc_chain(struct device *dev,
252                                 dwc_otg_dev_dma_desc_t * desc_addr,
253                                 uint32_t dma_desc_addr, uint32_t count)
254 {
255         DWC_DMA_FREE(dev, count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
256                      dma_desc_addr);
257 }
258
259 #ifdef DWC_EN_ISOC
260
261 /**
262  * This function initializes a descriptor chain for Isochronous transfer
263  *
264  * @param core_if Programming view of DWC_otg controller.
265  * @param dwc_ep The EP to start the transfer on.
266  *
267  */
268 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
269                                         dwc_ep_t * dwc_ep)
270 {
271
272         dsts_data_t dsts = {.d32 = 0 };
273         depctl_data_t depctl = {.d32 = 0 };
274         volatile uint32_t *addr;
275         int i, j;
276         uint32_t len;
277
278         if (dwc_ep->is_in)
279                 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
280         else
281                 dwc_ep->desc_cnt =
282                     dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
283                     dwc_ep->bInterval;
284
285         /** Allocate descriptors for double buffering */
286         dwc_ep->iso_desc_addr =
287             dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
288                                         dwc_ep->desc_cnt * 2);
289         if (dwc_ep->desc_addr) {
290                 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
291                 return;
292         }
293
294         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
295
296         /** ISO OUT EP */
297         if (dwc_ep->is_in == 0) {
298                 dev_dma_desc_sts_t sts = {.d32 = 0 };
299                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
300                 dma_addr_t dma_ad;
301                 uint32_t data_per_desc;
302                 dwc_otg_dev_out_ep_regs_t *out_regs =
303                     core_if->dev_if->out_ep_regs[dwc_ep->num];
304                 int offset;
305
306                 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
307                 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
308
309                 /** Buffer 0 descriptors setup */
310                 dma_ad = dwc_ep->dma_addr0;
311
312                 sts.b_iso_out.bs = BS_HOST_READY;
313                 sts.b_iso_out.rxsts = 0;
314                 sts.b_iso_out.l = 0;
315                 sts.b_iso_out.sp = 0;
316                 sts.b_iso_out.ioc = 0;
317                 sts.b_iso_out.pid = 0;
318                 sts.b_iso_out.framenum = 0;
319
320                 offset = 0;
321                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
322                      i += dwc_ep->pkt_per_frm) {
323
324                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
325                                 uint32_t len = (j + 1) * dwc_ep->maxpacket;
326                                 if (len > dwc_ep->data_per_frame)
327                                         data_per_desc =
328                                             dwc_ep->data_per_frame -
329                                             j * dwc_ep->maxpacket;
330                                 else
331                                         data_per_desc = dwc_ep->maxpacket;
332                                 len = data_per_desc % 4;
333                                 if (len)
334                                         data_per_desc += 4 - len;
335
336                                 sts.b_iso_out.rxbytes = data_per_desc;
337                                 dma_desc->buf = dma_ad;
338                                 dma_desc->status.d32 = sts.d32;
339
340                                 offset += data_per_desc;
341                                 dma_desc++;
342                                 dma_ad += data_per_desc;
343                         }
344                 }
345
346                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
347                         uint32_t len = (j + 1) * dwc_ep->maxpacket;
348                         if (len > dwc_ep->data_per_frame)
349                                 data_per_desc =
350                                     dwc_ep->data_per_frame -
351                                     j * dwc_ep->maxpacket;
352                         else
353                                 data_per_desc = dwc_ep->maxpacket;
354                         len = data_per_desc % 4;
355                         if (len)
356                                 data_per_desc += 4 - len;
357                         sts.b_iso_out.rxbytes = data_per_desc;
358                         dma_desc->buf = dma_ad;
359                         dma_desc->status.d32 = sts.d32;
360
361                         offset += data_per_desc;
362                         dma_desc++;
363                         dma_ad += data_per_desc;
364                 }
365
366                 sts.b_iso_out.ioc = 1;
367                 len = (j + 1) * dwc_ep->maxpacket;
368                 if (len > dwc_ep->data_per_frame)
369                         data_per_desc =
370                             dwc_ep->data_per_frame - j * dwc_ep->maxpacket;
371                 else
372                         data_per_desc = dwc_ep->maxpacket;
373                 len = data_per_desc % 4;
374                 if (len)
375                         data_per_desc += 4 - len;
376                 sts.b_iso_out.rxbytes = data_per_desc;
377
378                 dma_desc->buf = dma_ad;
379                 dma_desc->status.d32 = sts.d32;
380                 dma_desc++;
381
382                 /** Buffer 1 descriptors setup */
383                 sts.b_iso_out.ioc = 0;
384                 dma_ad = dwc_ep->dma_addr1;
385
386                 offset = 0;
387                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
388                      i += dwc_ep->pkt_per_frm) {
389                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
390                                 uint32_t len = (j + 1) * dwc_ep->maxpacket;
391                                 if (len > dwc_ep->data_per_frame)
392                                         data_per_desc =
393                                             dwc_ep->data_per_frame -
394                                             j * dwc_ep->maxpacket;
395                                 else
396                                         data_per_desc = dwc_ep->maxpacket;
397                                 len = data_per_desc % 4;
398                                 if (len)
399                                         data_per_desc += 4 - len;
400
401                                 data_per_desc =
402                                     sts.b_iso_out.rxbytes = data_per_desc;
403                                 dma_desc->buf = dma_ad;
404                                 dma_desc->status.d32 = sts.d32;
405
406                                 offset += data_per_desc;
407                                 dma_desc++;
408                                 dma_ad += data_per_desc;
409                         }
410                 }
411                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
412                         data_per_desc =
413                             ((j + 1) * dwc_ep->maxpacket >
414                              dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
415                             j * dwc_ep->maxpacket : dwc_ep->maxpacket;
416                         data_per_desc +=
417                             (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
418                         sts.b_iso_out.rxbytes = data_per_desc;
419                         dma_desc->buf = dma_ad;
420                         dma_desc->status.d32 = sts.d32;
421
422                         offset += data_per_desc;
423                         dma_desc++;
424                         dma_ad += data_per_desc;
425                 }
426
427                 sts.b_iso_out.ioc = 1;
428                 sts.b_iso_out.l = 1;
429                 data_per_desc =
430                     ((j + 1) * dwc_ep->maxpacket >
431                      dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
432                     j * dwc_ep->maxpacket : dwc_ep->maxpacket;
433                 data_per_desc +=
434                     (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
435                 sts.b_iso_out.rxbytes = data_per_desc;
436
437                 dma_desc->buf = dma_ad;
438                 dma_desc->status.d32 = sts.d32;
439
440                 dwc_ep->next_frame = 0;
441
442                 /** Write dma_ad into DOEPDMA register */
443                 DWC_WRITE_REG32(&(out_regs->doepdma),
444                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
445
446         }
447         /** ISO IN EP */
448         else {
449                 dev_dma_desc_sts_t sts = {.d32 = 0 };
450                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
451                 dma_addr_t dma_ad;
452                 dwc_otg_dev_in_ep_regs_t *in_regs =
453                     core_if->dev_if->in_ep_regs[dwc_ep->num];
454                 unsigned int frmnumber;
455                 fifosize_data_t txfifosize, rxfifosize;
456
457                 txfifosize.d32 =
458                     DWC_READ_REG32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->
459                                    dtxfsts);
460                 rxfifosize.d32 =
461                     DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
462
463                 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
464
465                 dma_ad = dwc_ep->dma_addr0;
466
467                 dsts.d32 =
468                     DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
469
470                 sts.b_iso_in.bs = BS_HOST_READY;
471                 sts.b_iso_in.txsts = 0;
472                 sts.b_iso_in.sp =
473                     (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
474                 sts.b_iso_in.ioc = 0;
475                 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
476
477                 frmnumber = dwc_ep->next_frame;
478
479                 sts.b_iso_in.framenum = frmnumber;
480                 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
481                 sts.b_iso_in.l = 0;
482
483                 /** Buffer 0 descriptors setup */
484                 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
485                         dma_desc->buf = dma_ad;
486                         dma_desc->status.d32 = sts.d32;
487                         dma_desc++;
488
489                         dma_ad += dwc_ep->data_per_frame;
490                         sts.b_iso_in.framenum += dwc_ep->bInterval;
491                 }
492
493                 sts.b_iso_in.ioc = 1;
494                 dma_desc->buf = dma_ad;
495                 dma_desc->status.d32 = sts.d32;
496                 ++dma_desc;
497
498                 /** Buffer 1 descriptors setup */
499                 sts.b_iso_in.ioc = 0;
500                 dma_ad = dwc_ep->dma_addr1;
501
502                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
503                      i += dwc_ep->pkt_per_frm) {
504                         dma_desc->buf = dma_ad;
505                         dma_desc->status.d32 = sts.d32;
506                         dma_desc++;
507
508                         dma_ad += dwc_ep->data_per_frame;
509                         sts.b_iso_in.framenum += dwc_ep->bInterval;
510
511                         sts.b_iso_in.ioc = 0;
512                 }
513                 sts.b_iso_in.ioc = 1;
514                 sts.b_iso_in.l = 1;
515
516                 dma_desc->buf = dma_ad;
517                 dma_desc->status.d32 = sts.d32;
518
519                 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
520
521                 /** Write dma_ad into diepdma register */
522                 DWC_WRITE_REG32(&(in_regs->diepdma),
523                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
524         }
525         /** Enable endpoint, clear nak  */
526         depctl.d32 = 0;
527         depctl.b.epena = 1;
528         depctl.b.usbactep = 1;
529         depctl.b.cnak = 1;
530
531         DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
532         depctl.d32 = DWC_READ_REG32(addr);
533 }
534
535 /**
536  * This function initializes a descriptor chain for Isochronous transfer
537  *
538  * @param core_if Programming view of DWC_otg controller.
539  * @param ep The EP to start the transfer on.
540  *
541  */
542 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
543                                        dwc_ep_t * ep)
544 {
545         depctl_data_t depctl = {.d32 = 0 };
546         volatile uint32_t *addr;
547
548         if (ep->is_in) {
549                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
550         } else {
551                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
552         }
553
554         if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
555                 return;
556         } else {
557                 deptsiz_data_t deptsiz = {.d32 = 0 };
558
559                 ep->xfer_len =
560                     ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
561                 ep->pkt_cnt =
562                     (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
563                 ep->xfer_count = 0;
564                 ep->xfer_buff =
565                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
566                 ep->dma_addr =
567                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
568
569                 if (ep->is_in) {
570                         /* Program the transfer size and packet count
571                          *      as follows: xfersize = N * maxpacket +
572                          *      short_packet pktcnt = N + (short_packet
573                          *      exist ? 1 : 0)
574                          */
575                         deptsiz.b.mc = ep->pkt_per_frm;
576                         deptsiz.b.xfersize = ep->xfer_len;
577                         deptsiz.b.pktcnt =
578                             (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
579                         DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
580                                         dieptsiz, deptsiz.d32);
581
582                         /* Write the DMA register */
583                         DWC_WRITE_REG32(&
584                                         (core_if->dev_if->in_ep_regs[ep->num]->
585                                          diepdma), (uint32_t) ep->dma_addr);
586
587                 } else {
588                         deptsiz.b.pktcnt =
589                             (ep->xfer_len + (ep->maxpacket - 1)) /
590                             ep->maxpacket;
591                         deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
592
593                         DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
594                                         doeptsiz, deptsiz.d32);
595
596                         /* Write the DMA register */
597                         DWC_WRITE_REG32(&
598                                         (core_if->dev_if->out_ep_regs[ep->num]->
599                                          doepdma), (uint32_t) ep->dma_addr);
600
601                 }
602                 /** Enable endpoint, clear nak  */
603                 depctl.d32 = 0;
604                 depctl.b.epena = 1;
605                 depctl.b.cnak = 1;
606
607                 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
608         }
609 }
610
611 /**
612  * This function does the setup for a data transfer for an EP and
613  * starts the transfer. For an IN transfer, the packets will be
614  * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
615  * the packets are unloaded from the Rx FIFO in the ISR.
616  *
617  * @param core_if Programming view of DWC_otg controller.
618  * @param ep The EP to start the transfer on.
619  */
620
621 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
622                                           dwc_ep_t * ep)
623 {
624         if (core_if->dma_enable) {
625                 if (core_if->dma_desc_enable) {
626                         if (ep->is_in) {
627                                 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
628                         } else {
629                                 ep->desc_cnt = ep->pkt_cnt;
630                         }
631                         dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
632                 } else {
633                         if (core_if->pti_enh_enable) {
634                                 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
635                         } else {
636                                 ep->cur_pkt_addr =
637                                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
638                                     xfer_buff0;
639                                 ep->cur_pkt_dma_addr =
640                                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->
641                                     dma_addr0;
642                                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
643                         }
644                 }
645         } else {
646                 ep->cur_pkt_addr =
647                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
648                 ep->cur_pkt_dma_addr =
649                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
650                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
651         }
652 }
653
654 /**
655  * This function stops transfer for an EP and
656  * resets the ep's variables.
657  *
658  * @param core_if Programming view of DWC_otg controller.
659  * @param ep The EP to start the transfer on.
660  */
661
662 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
663 {
664         depctl_data_t depctl = {.d32 = 0 };
665         volatile uint32_t *addr;
666
667         if (ep->is_in == 1) {
668                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
669         } else {
670                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
671         }
672
673         /* disable the ep */
674         depctl.d32 = DWC_READ_REG32(addr);
675
676         depctl.b.epdis = 1;
677         depctl.b.snak = 1;
678
679         DWC_WRITE_REG32(addr, depctl.d32);
680
681         if (core_if->dma_desc_enable &&
682             ep->iso_desc_addr && ep->iso_dma_desc_addr) {
683                 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
684                                            ep->iso_dma_desc_addr,
685                                            ep->desc_cnt * 2);
686         }
687
688         /* reset varibales */
689         ep->dma_addr0 = 0;
690         ep->dma_addr1 = 0;
691         ep->xfer_buff0 = 0;
692         ep->xfer_buff1 = 0;
693         ep->data_per_frame = 0;
694         ep->data_pattern_frame = 0;
695         ep->sync_frame = 0;
696         ep->buf_proc_intrvl = 0;
697         ep->bInterval = 0;
698         ep->proc_buf_num = 0;
699         ep->pkt_per_frm = 0;
700         ep->pkt_per_frm = 0;
701         ep->desc_cnt = 0;
702         ep->iso_desc_addr = 0;
703         ep->iso_dma_desc_addr = 0;
704 }
705
706 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
707                              uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
708                              dwc_dma_t dma1, int sync_frame, int dp_frame,
709                              int data_per_frame, int start_frame,
710                              int buf_proc_intrvl, void *req_handle,
711                              int atomic_alloc)
712 {
713         dwc_otg_pcd_ep_t *ep;
714         dwc_irqflags_t flags = 0;
715         dwc_ep_t *dwc_ep;
716         int32_t frm_data;
717         dsts_data_t dsts;
718         dwc_otg_core_if_t *core_if;
719
720         ep = get_ep_from_handle(pcd, ep_handle);
721
722         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
723                 DWC_WARN("bad ep\n");
724                 return -DWC_E_INVALID;
725         }
726
727         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
728         core_if = GET_CORE_IF(pcd);
729         dwc_ep = &ep->dwc_ep;
730
731         if (ep->iso_req_handle) {
732                 DWC_WARN("ISO request in progress\n");
733         }
734
735         dwc_ep->dma_addr0 = dma0;
736         dwc_ep->dma_addr1 = dma1;
737
738         dwc_ep->xfer_buff0 = buf0;
739         dwc_ep->xfer_buff1 = buf1;
740
741         dwc_ep->data_per_frame = data_per_frame;
742
743         /** @todo - pattern data support is to be implemented in the future */
744         dwc_ep->data_pattern_frame = dp_frame;
745         dwc_ep->sync_frame = sync_frame;
746
747         dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
748
749         dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
750
751         dwc_ep->proc_buf_num = 0;
752
753         dwc_ep->pkt_per_frm = 0;
754         frm_data = ep->dwc_ep.data_per_frame;
755         while (frm_data > 0) {
756                 dwc_ep->pkt_per_frm++;
757                 frm_data -= ep->dwc_ep.maxpacket;
758         }
759
760         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
761
762         if (start_frame == -1) {
763                 dwc_ep->next_frame = dsts.b.soffn + 1;
764                 if (dwc_ep->bInterval != 1) {
765                         dwc_ep->next_frame =
766                             dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
767                                                   dwc_ep->next_frame %
768                                                   dwc_ep->bInterval);
769                 }
770         } else {
771                 dwc_ep->next_frame = start_frame;
772         }
773
774         if (!core_if->pti_enh_enable) {
775                 dwc_ep->pkt_cnt =
776                     dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
777                     dwc_ep->bInterval;
778         } else {
779                 dwc_ep->pkt_cnt =
780                     (dwc_ep->data_per_frame *
781                      (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
782                      - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
783         }
784
785         if (core_if->dma_desc_enable) {
786                 dwc_ep->desc_cnt =
787                     dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
788                     dwc_ep->bInterval;
789         }
790
791         if (atomic_alloc) {
792                 dwc_ep->pkt_info =
793                     DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
794         } else {
795                 dwc_ep->pkt_info =
796                     DWC_ALLOC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
797         }
798         if (!dwc_ep->pkt_info) {
799                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
800                 return -DWC_E_NO_MEMORY;
801         }
802         if (core_if->pti_enh_enable) {
803                 dwc_memset(dwc_ep->pkt_info, 0,
804                            sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
805         }
806
807         dwc_ep->cur_pkt = 0;
808         ep->iso_req_handle = req_handle;
809
810         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
811         dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
812         return 0;
813 }
814
815 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
816                             void *req_handle)
817 {
818         dwc_irqflags_t flags = 0;
819         dwc_otg_pcd_ep_t *ep;
820         dwc_ep_t *dwc_ep;
821
822         ep = get_ep_from_handle(pcd, ep_handle);
823         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
824                 DWC_WARN("bad ep\n");
825                 return -DWC_E_INVALID;
826         }
827         dwc_ep = &ep->dwc_ep;
828
829         dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
830
831         DWC_FREE(dwc_ep->pkt_info);
832         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
833         if (ep->iso_req_handle != req_handle) {
834                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
835                 return -DWC_E_INVALID;
836         }
837
838         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
839
840         ep->iso_req_handle = 0;
841         return 0;
842 }
843
844 /**
845  * This function is used for perodical data exchnage between PCD and gadget drivers.
846  * for Isochronous EPs
847  *
848  *      - Every time a sync period completes this function is called to
849  *        perform data exchange between PCD and gadget
850  */
851 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
852                              void *req_handle)
853 {
854         int i;
855         dwc_ep_t *dwc_ep;
856
857         dwc_ep = &ep->dwc_ep;
858
859         DWC_SPINUNLOCK(ep->pcd->lock);
860         pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
861                                  dwc_ep->proc_buf_num ^ 0x1);
862         DWC_SPINLOCK(ep->pcd->lock);
863
864         for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
865                 dwc_ep->pkt_info[i].status = 0;
866                 dwc_ep->pkt_info[i].offset = 0;
867                 dwc_ep->pkt_info[i].length = 0;
868         }
869 }
870
871 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
872                                      void *iso_req_handle)
873 {
874         dwc_otg_pcd_ep_t *ep;
875         dwc_ep_t *dwc_ep;
876
877         ep = get_ep_from_handle(pcd, ep_handle);
878         if (!ep->desc || ep->dwc_ep.num == 0) {
879                 DWC_WARN("bad ep\n");
880                 return -DWC_E_INVALID;
881         }
882         dwc_ep = &ep->dwc_ep;
883
884         return dwc_ep->pkt_cnt;
885 }
886
887 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
888                                        void *iso_req_handle, int packet,
889                                        int *status, int *actual, int *offset)
890 {
891         dwc_otg_pcd_ep_t *ep;
892         dwc_ep_t *dwc_ep;
893
894         ep = get_ep_from_handle(pcd, ep_handle);
895         if (!ep)
896                 DWC_WARN("bad ep\n");
897
898         dwc_ep = &ep->dwc_ep;
899
900         *status = dwc_ep->pkt_info[packet].status;
901         *actual = dwc_ep->pkt_info[packet].length;
902         *offset = dwc_ep->pkt_info[packet].offset;
903 }
904
905 #endif /* DWC_EN_ISOC */
906
907 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
908                                 uint32_t is_in, uint32_t ep_num)
909 {
910         /* Init EP structure */
911         pcd_ep->desc = 0;
912         pcd_ep->pcd = pcd;
913         pcd_ep->stopped = 1;
914         pcd_ep->queue_sof = 0;
915
916         /* Init DWC ep structure */
917         pcd_ep->dwc_ep.is_in = is_in;
918         pcd_ep->dwc_ep.num = ep_num;
919         pcd_ep->dwc_ep.active = 0;
920         pcd_ep->dwc_ep.tx_fifo_num = 0;
921         /* Control until ep is actvated */
922         pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
923         pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
924         pcd_ep->dwc_ep.dma_addr = 0;
925         pcd_ep->dwc_ep.start_xfer_buff = 0;
926         pcd_ep->dwc_ep.xfer_buff = 0;
927         pcd_ep->dwc_ep.xfer_len = 0;
928         pcd_ep->dwc_ep.xfer_count = 0;
929         pcd_ep->dwc_ep.sent_zlp = 0;
930         pcd_ep->dwc_ep.total_len = 0;
931         pcd_ep->dwc_ep.desc_addr = 0;
932         pcd_ep->dwc_ep.dma_desc_addr = 0;
933         DWC_CIRCLEQ_INIT(&pcd_ep->queue);
934 }
935
936 /**
937  * Initialize ep's
938  */
939 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
940 {
941         int i;
942         uint32_t hwcfg1;
943         dwc_otg_pcd_ep_t *ep;
944         int in_ep_cntr, out_ep_cntr;
945         uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
946         uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
947
948         /**
949          * Initialize the EP0 structure.
950          */
951         ep = &pcd->ep0;
952         dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
953
954         in_ep_cntr = 0;
955         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
956         for (i = 1; in_ep_cntr < num_in_eps; i++) {
957                 if ((hwcfg1 & 0x1) == 0) {
958                         dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
959                         in_ep_cntr++;
960                         /**
961                          * @todo NGS: Add direction to EP, based on contents
962                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
963                          * sprintf(";r
964                          */
965                         dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
966
967                         DWC_CIRCLEQ_INIT(&ep->queue);
968                 }
969                 hwcfg1 >>= 2;
970         }
971
972         out_ep_cntr = 0;
973         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
974         for (i = 1; out_ep_cntr < num_out_eps; i++) {
975                 if ((hwcfg1 & 0x1) == 0) {
976                         dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
977                         out_ep_cntr++;
978                         /**
979                          * @todo NGS: Add direction to EP, based on contents
980                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
981                          * sprintf(";r
982                          */
983                         dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
984                         DWC_CIRCLEQ_INIT(&ep->queue);
985                 }
986                 hwcfg1 >>= 2;
987         }
988
989         pcd->ep0state = EP0_DISCONNECT;
990         pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
991         pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
992 }
993
994 /**
995  * This function is called when the SRP timer expires. The SRP should
996  * complete within 6 seconds.
997  */
998 static void srp_timeout(void *ptr)
999 {
1000         gotgctl_data_t gotgctl;
1001         dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1002         volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1003
1004         gotgctl.d32 = DWC_READ_REG32(addr);
1005
1006         core_if->srp_timer_started = 0;
1007
1008         if (core_if->adp_enable) {
1009                 if (gotgctl.b.bsesvld == 0) {
1010                         gpwrdn_data_t gpwrdn = {.d32 = 0 };
1011                         DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1012                         /* Power off the core */
1013                         if (core_if->power_down == 2) {
1014                                 gpwrdn.b.pwrdnswtch = 1;
1015                                 DWC_MODIFY_REG32(&core_if->
1016                                                  core_global_regs->gpwrdn,
1017                                                  gpwrdn.d32, 0);
1018                         }
1019
1020                         gpwrdn.d32 = 0;
1021                         gpwrdn.b.pmuintsel = 1;
1022                         gpwrdn.b.pmuactv = 1;
1023                         DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1024                                          gpwrdn.d32);
1025                         dwc_otg_adp_probe_start(core_if);
1026                 } else {
1027                         DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1028                         core_if->op_state = B_PERIPHERAL;
1029                         dwc_otg_core_init(core_if);
1030                         dwc_otg_enable_global_interrupts(core_if);
1031                         cil_pcd_start(core_if);
1032                 }
1033         }
1034
1035         if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1036             (core_if->core_params->i2c_enable)) {
1037                 DWC_PRINTF("SRP Timeout\n");
1038
1039                 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1040                         if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1041                                 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
1042                         }
1043
1044                         /* Clear Session Request */
1045                         gotgctl.d32 = 0;
1046                         gotgctl.b.sesreq = 1;
1047                         DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1048                                          gotgctl.d32, 0);
1049
1050                         core_if->srp_success = 0;
1051                 } else {
1052                         __DWC_ERROR("Device not connected/responding\n");
1053                         gotgctl.b.sesreq = 0;
1054                         DWC_WRITE_REG32(addr, gotgctl.d32);
1055                 }
1056         } else if (gotgctl.b.sesreq) {
1057                 DWC_PRINTF("SRP Timeout\n");
1058
1059                 __DWC_ERROR("Device not connected/responding\n");
1060                 gotgctl.b.sesreq = 0;
1061                 DWC_WRITE_REG32(addr, gotgctl.d32);
1062         } else {
1063                 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1064         }
1065 }
1066
1067 /**
1068  * Tasklet
1069  *
1070  */
1071 extern void start_next_request(dwc_otg_pcd_ep_t * ep);
1072
1073 static void start_xfer_tasklet_func(void *data)
1074 {
1075         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1076         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1077
1078         int i;
1079         depctl_data_t diepctl;
1080
1081         DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1082
1083         diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1084
1085         if (pcd->ep0.queue_sof) {
1086                 pcd->ep0.queue_sof = 0;
1087                 start_next_request(&pcd->ep0);
1088                 // break;
1089         }
1090
1091         for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1092                 depctl_data_t diepctl;
1093                 diepctl.d32 =
1094                     DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1095
1096                 if (pcd->in_ep[i].queue_sof) {
1097                         pcd->in_ep[i].queue_sof = 0;
1098                         start_next_request(&pcd->in_ep[i]);
1099                         // break;
1100                 }
1101         }
1102
1103         return;
1104 }
1105
1106 /**
1107  * This function initialized the PCD portion of the driver.
1108  *
1109  */
1110 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_device_t *otg_dev)
1111 {
1112         struct device *dev = &otg_dev->os_dep.platformdev->dev;
1113         dwc_otg_core_if_t *core_if = otg_dev->core_if;
1114         dwc_otg_pcd_t *pcd = NULL;
1115         dwc_otg_dev_if_t *dev_if;
1116         int i;
1117
1118         /*
1119          * Allocate PCD structure
1120          */
1121         pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1122
1123         if (pcd == NULL) {
1124                 return NULL;
1125         }
1126
1127 #if (defined(DWC_LINUX) && defined(CONFIG_DEBUG_SPINLOCK))
1128         DWC_SPINLOCK_ALLOC_LINUX_DEBUG(pcd->lock);
1129 #else
1130         pcd->lock = DWC_SPINLOCK_ALLOC();
1131 #endif
1132         DWC_DEBUGPL(DBG_HCDV, "Init of PCD %p given core_if %p\n",
1133                     pcd, core_if);//GRAYG
1134         if (!pcd->lock) {
1135                 DWC_ERROR("Could not allocate lock for pcd");
1136                 DWC_FREE(pcd);
1137                 return NULL;
1138         }
1139         /* Set core_if's lock pointer to hcd->lock */
1140         core_if->lock = pcd->lock;
1141         pcd->core_if = core_if;
1142
1143         dev_if = core_if->dev_if;
1144         dev_if->isoc_ep = NULL;
1145
1146         if (core_if->hwcfg4.b.ded_fifo_en) {
1147                 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1148         } else {
1149                 DWC_PRINTF("Shared Tx FIFO mode\n");
1150         }
1151
1152         /*
1153          * Initialized the Core for Device mode here if there is nod ADP support.
1154          * Otherwise it will be done later in dwc_otg_adp_start routine.
1155          */
1156         if (dwc_otg_is_device_mode(core_if) /*&& !core_if->adp_enable*/) {
1157                 dwc_otg_core_dev_init(core_if);
1158         }
1159
1160         /*
1161          * Register the PCD Callbacks.
1162          */
1163         dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1164
1165         /*
1166          * Initialize the DMA buffer for SETUP packets
1167          */
1168         if (GET_CORE_IF(pcd)->dma_enable) {
1169                 pcd->setup_pkt =
1170                     DWC_DMA_ALLOC(dev, sizeof(*pcd->setup_pkt) * 5,
1171                                   &pcd->setup_pkt_dma_handle);
1172                 if (pcd->setup_pkt == NULL) {
1173                         DWC_FREE(pcd);
1174                         return NULL;
1175                 }
1176
1177                 pcd->status_buf =
1178                     DWC_DMA_ALLOC(dev, sizeof(uint16_t),
1179                                   &pcd->status_buf_dma_handle);
1180                 if (pcd->status_buf == NULL) {
1181                         DWC_DMA_FREE(dev, sizeof(*pcd->setup_pkt) * 5,
1182                                      pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1183                         DWC_FREE(pcd);
1184                         return NULL;
1185                 }
1186
1187                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1188                         dev_if->setup_desc_addr[0] =
1189                             dwc_otg_ep_alloc_desc_chain(dev,
1190                                 &dev_if->dma_setup_desc_addr[0], 1);
1191                         dev_if->setup_desc_addr[1] =
1192                             dwc_otg_ep_alloc_desc_chain(dev,
1193                                 &dev_if->dma_setup_desc_addr[1], 1);
1194                         dev_if->in_desc_addr =
1195                             dwc_otg_ep_alloc_desc_chain(dev,
1196                                 &dev_if->dma_in_desc_addr, 1);
1197                         dev_if->out_desc_addr =
1198                             dwc_otg_ep_alloc_desc_chain(dev,
1199                                 &dev_if->dma_out_desc_addr, 1);
1200                         pcd->data_terminated = 0;
1201
1202                         if (dev_if->setup_desc_addr[0] == 0
1203                             || dev_if->setup_desc_addr[1] == 0
1204                             || dev_if->in_desc_addr == 0
1205                             || dev_if->out_desc_addr == 0) {
1206
1207                                 if (dev_if->out_desc_addr)
1208                                         dwc_otg_ep_free_desc_chain(dev,
1209                                              dev_if->out_desc_addr,
1210                                              dev_if->dma_out_desc_addr, 1);
1211                                 if (dev_if->in_desc_addr)
1212                                         dwc_otg_ep_free_desc_chain(dev,
1213                                              dev_if->in_desc_addr,
1214                                              dev_if->dma_in_desc_addr, 1);
1215                                 if (dev_if->setup_desc_addr[1])
1216                                         dwc_otg_ep_free_desc_chain(dev,
1217                                              dev_if->setup_desc_addr[1],
1218                                              dev_if->dma_setup_desc_addr[1], 1);
1219                                 if (dev_if->setup_desc_addr[0])
1220                                         dwc_otg_ep_free_desc_chain(dev,
1221                                              dev_if->setup_desc_addr[0],
1222                                              dev_if->dma_setup_desc_addr[0], 1);
1223
1224                                 DWC_DMA_FREE(dev, sizeof(*pcd->setup_pkt) * 5,
1225                                              pcd->setup_pkt,
1226                                              pcd->setup_pkt_dma_handle);
1227                                 DWC_DMA_FREE(dev, sizeof(*pcd->status_buf),
1228                                              pcd->status_buf,
1229                                              pcd->status_buf_dma_handle);
1230
1231                                 DWC_FREE(pcd);
1232
1233                                 return NULL;
1234                         }
1235                 }
1236         } else {
1237                 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1238                 if (pcd->setup_pkt == NULL) {
1239                         DWC_FREE(pcd);
1240                         return NULL;
1241                 }
1242
1243                 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1244                 if (pcd->status_buf == NULL) {
1245                         DWC_FREE(pcd->setup_pkt);
1246                         DWC_FREE(pcd);
1247                         return NULL;
1248                 }
1249         }
1250
1251         dwc_otg_pcd_reinit(pcd);
1252
1253         /* Allocate the cfi object for the PCD */
1254 #ifdef DWC_UTE_CFI
1255         pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1256         if (NULL == pcd->cfi)
1257                 goto fail;
1258         if (init_cfi(pcd->cfi)) {
1259                 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1260                 goto fail;
1261         }
1262 #endif
1263
1264         /* Initialize tasklets */
1265         pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1266                                                  start_xfer_tasklet_func, pcd);
1267         pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1268                                                 do_test_mode, pcd);
1269
1270         /* Initialize SRP timer */
1271         core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1272
1273         if (core_if->core_params->dev_out_nak) {
1274                 /**
1275                 * Initialize xfer timeout timer. Implemented for
1276                 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1277                 */
1278                 for(i = 0; i < MAX_EPS_CHANNELS; i++) {
1279                         pcd->core_if->ep_xfer_timer[i] =
1280                                 DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1281                                 &pcd->core_if->ep_xfer_info[i]);
1282                 }
1283         }
1284
1285         return pcd;
1286 #ifdef DWC_UTE_CFI
1287 fail:
1288 #endif
1289         if (pcd->setup_pkt)
1290                 DWC_FREE(pcd->setup_pkt);
1291         if (pcd->status_buf)
1292                 DWC_FREE(pcd->status_buf);
1293 #ifdef DWC_UTE_CFI
1294         if (pcd->cfi)
1295                 DWC_FREE(pcd->cfi);
1296 #endif
1297         if (pcd)
1298                 DWC_FREE(pcd);
1299         return NULL;
1300
1301 }
1302
1303 /**
1304  * Remove PCD specific data
1305  */
1306 void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
1307 {
1308         dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1309         struct device *dev = dwc_otg_pcd_to_dev(pcd);
1310         int i;
1311
1312         if (pcd->core_if->core_params->dev_out_nak) {
1313                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1314                         DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1315                         pcd->core_if->ep_xfer_info[i].state = 0;
1316                 }
1317         }
1318
1319         if (GET_CORE_IF(pcd)->dma_enable) {
1320                 DWC_DMA_FREE(dev, sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1321                              pcd->setup_pkt_dma_handle);
1322                 DWC_DMA_FREE(dev, sizeof(uint16_t), pcd->status_buf,
1323                              pcd->status_buf_dma_handle);
1324                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1325                         dwc_otg_ep_free_desc_chain(dev,
1326                                                    dev_if->setup_desc_addr[0],
1327                                                    dev_if->dma_setup_desc_addr
1328                                                    [0], 1);
1329                         dwc_otg_ep_free_desc_chain(dev,
1330                                                    dev_if->setup_desc_addr[1],
1331                                                    dev_if->dma_setup_desc_addr
1332                                                    [1], 1);
1333                         dwc_otg_ep_free_desc_chain(dev,
1334                                                    dev_if->in_desc_addr,
1335                                                    dev_if->dma_in_desc_addr, 1);
1336                         dwc_otg_ep_free_desc_chain(dev,
1337                                                    dev_if->out_desc_addr,
1338                                                    dev_if->dma_out_desc_addr,
1339                                                    1);
1340                 }
1341         } else {
1342                 DWC_FREE(pcd->setup_pkt);
1343                 DWC_FREE(pcd->status_buf);
1344         }
1345         DWC_SPINLOCK_FREE(pcd->lock);
1346         /* Set core_if's lock pointer to NULL */
1347         pcd->core_if->lock = NULL;
1348
1349         DWC_TASK_FREE(pcd->start_xfer_tasklet);
1350         DWC_TASK_FREE(pcd->test_mode_tasklet);
1351         if (pcd->core_if->core_params->dev_out_nak) {
1352                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1353                         if (pcd->core_if->ep_xfer_timer[i]) {
1354                                         DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1355                         }
1356                 }
1357         }
1358
1359 /* Release the CFI object's dynamic memory */
1360 #ifdef DWC_UTE_CFI
1361         if (pcd->cfi->ops.release) {
1362                 pcd->cfi->ops.release(pcd->cfi);
1363         }
1364 #endif
1365
1366         DWC_FREE(pcd);
1367 }
1368
1369 /**
1370  * Returns whether registered pcd is dual speed or not
1371  */
1372 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
1373 {
1374         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1375
1376         if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1377             ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1378              (core_if->hwcfg2.b.fs_phy_type == 1) &&
1379              (core_if->core_params->ulpi_fs_ls))) {
1380                 return 0;
1381         }
1382
1383         return 1;
1384 }
1385
1386 /**
1387  * Returns whether registered pcd is OTG capable or not
1388  */
1389 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
1390 {
1391         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1392         gusbcfg_data_t usbcfg = {.d32 = 0 };
1393
1394         usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1395         if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap) {
1396                 return 0;
1397         }
1398
1399         return 1;
1400 }
1401
1402 /**
1403  * This function assigns periodic Tx FIFO to an periodic EP
1404  * in shared Tx FIFO mode
1405  */
1406 static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
1407 {
1408         uint32_t TxMsk = 1;
1409         int i;
1410
1411         for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1412                 if ((TxMsk & core_if->tx_msk) == 0) {
1413                         core_if->tx_msk |= TxMsk;
1414                         return i + 1;
1415                 }
1416                 TxMsk <<= 1;
1417         }
1418         return 0;
1419 }
1420
1421 /**
1422  * This function assigns periodic Tx FIFO to an periodic EP
1423  * in shared Tx FIFO mode
1424  */
1425 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
1426 {
1427         uint32_t PerTxMsk = 1;
1428         int i;
1429         for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1430                 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1431                         core_if->p_tx_msk |= PerTxMsk;
1432                         return i + 1;
1433                 }
1434                 PerTxMsk <<= 1;
1435         }
1436         return 0;
1437 }
1438
1439 /**
1440  * This function releases periodic Tx FIFO
1441  * in shared Tx FIFO mode
1442  */
1443 static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
1444                                   uint32_t fifo_num)
1445 {
1446         core_if->p_tx_msk =
1447             (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1448 }
1449
1450 /**
1451  * This function releases periodic Tx FIFO
1452  * in shared Tx FIFO mode
1453  */
1454 static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
1455 {
1456         core_if->tx_msk =
1457             (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1458 }
1459
1460 /**
1461  * This function is being called from gadget
1462  * to enable PCD endpoint.
1463  */
1464 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
1465                           const uint8_t * ep_desc, void *usb_ep)
1466 {
1467         int num, dir;
1468         dwc_otg_pcd_ep_t *ep = NULL;
1469         const usb_endpoint_descriptor_t *desc;
1470         dwc_irqflags_t flags;
1471         fifosize_data_t dptxfsiz = {.d32 = 0 };
1472         gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1473         gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1474         int retval = 0;
1475         int i, epcount;
1476         struct device *dev = dwc_otg_pcd_to_dev(pcd);
1477
1478         desc = (const usb_endpoint_descriptor_t *)ep_desc;
1479
1480         if (!desc) {
1481                 pcd->ep0.priv = usb_ep;
1482                 ep = &pcd->ep0;
1483                 retval = -DWC_E_INVALID;
1484                 goto out;
1485         }
1486
1487         num = UE_GET_ADDR(desc->bEndpointAddress);
1488         dir = UE_GET_DIR(desc->bEndpointAddress);
1489
1490         if (!UGETW(desc->wMaxPacketSize)) {
1491                 DWC_WARN("bad maxpacketsize\n");
1492                 retval = -DWC_E_INVALID;
1493                 goto out;
1494         }
1495
1496         if (dir == UE_DIR_IN) {
1497                 epcount = pcd->core_if->dev_if->num_in_eps;
1498                 for (i = 0; i < epcount; i++) {
1499                         if (num == pcd->in_ep[i].dwc_ep.num) {
1500                                 ep = &pcd->in_ep[i];
1501                                 break;
1502                         }
1503                 }
1504         } else {
1505                 epcount = pcd->core_if->dev_if->num_out_eps;
1506                 for (i = 0; i < epcount; i++) {
1507                         if (num == pcd->out_ep[i].dwc_ep.num) {
1508                                 ep = &pcd->out_ep[i];
1509                                 break;
1510                         }
1511                 }
1512         }
1513
1514         if (!ep) {
1515                 DWC_WARN("bad address\n");
1516                 retval = -DWC_E_INVALID;
1517                 goto out;
1518         }
1519
1520         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1521
1522         ep->desc = desc;
1523         ep->priv = usb_ep;
1524
1525         /*
1526          * Activate the EP
1527          */
1528         ep->stopped = 0;
1529
1530         ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1531         ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1532
1533         ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1534
1535         if (ep->dwc_ep.is_in) {
1536                 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1537                         ep->dwc_ep.tx_fifo_num = 0;
1538
1539                         if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1540                                 /*
1541                                  * if ISOC EP then assign a Periodic Tx FIFO.
1542                                  */
1543                                 ep->dwc_ep.tx_fifo_num =
1544                                     assign_perio_tx_fifo(GET_CORE_IF(pcd));
1545                         }
1546                 } else {
1547                         /*
1548                          * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1549                          */
1550                         ep->dwc_ep.tx_fifo_num =
1551                             assign_tx_fifo(GET_CORE_IF(pcd));
1552                 }
1553
1554                 /* Calculating EP info controller base address */
1555                 if (ep->dwc_ep.tx_fifo_num
1556                     && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1557                         gdfifocfg.d32 =
1558                             DWC_READ_REG32(&GET_CORE_IF(pcd)->
1559                                            core_global_regs->gdfifocfg);
1560                         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1561                         dptxfsiz.d32 =
1562                             (DWC_READ_REG32
1563                              (&GET_CORE_IF(pcd)->core_global_regs->
1564                               dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1565                         gdfifocfg.b.epinfobase =
1566                             gdfifocfgbase.d32 + dptxfsiz.d32;
1567                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1568                                 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1569                                                 core_global_regs->gdfifocfg,
1570                                                 gdfifocfg.d32);
1571                         }
1572                 }
1573         }
1574         /* Set initial data PID. */
1575         if (ep->dwc_ep.type == UE_BULK) {
1576                 ep->dwc_ep.data_pid_start = 0;
1577         }
1578
1579         /* Alloc DMA Descriptors */
1580         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1581 #ifndef DWC_UTE_PER_IO
1582                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1583 #endif
1584                         ep->dwc_ep.desc_addr =
1585                             dwc_otg_ep_alloc_desc_chain(dev,
1586                                                 &ep->dwc_ep.dma_desc_addr,
1587                                                 MAX_DMA_DESC_CNT);
1588                         if (!ep->dwc_ep.desc_addr) {
1589                                 DWC_WARN("%s, can't allocate DMA descriptor\n",
1590                                          __func__);
1591                                 retval = -DWC_E_SHUTDOWN;
1592                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1593                                 goto out;
1594                         }
1595 #ifndef DWC_UTE_PER_IO
1596                 }
1597 #endif
1598         }
1599
1600         DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1601                     (ep->dwc_ep.is_in ? "IN" : "OUT"),
1602                     ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1603 #ifdef DWC_UTE_PER_IO
1604         ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1605 #endif
1606         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1607                 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1608                 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1609         }
1610
1611         dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1612
1613 #ifdef DWC_UTE_CFI
1614         if (pcd->cfi->ops.ep_enable) {
1615                 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1616         }
1617 #endif
1618
1619         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1620
1621 out:
1622         return retval;
1623 }
1624
1625 /**
1626  * This function is being called from gadget
1627  * to disable PCD endpoint.
1628  */
1629 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
1630 {
1631         dwc_otg_pcd_ep_t *ep;
1632         dwc_irqflags_t flags;
1633         dwc_otg_dev_dma_desc_t *desc_addr;
1634         dwc_dma_t dma_desc_addr;
1635         gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1636         gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1637         fifosize_data_t dptxfsiz = {.d32 = 0 };
1638         struct device *dev = dwc_otg_pcd_to_dev(pcd);
1639
1640         ep = get_ep_from_handle(pcd, ep_handle);
1641
1642         if (!ep || !ep->desc) {
1643                 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1644                 return -DWC_E_INVALID;
1645         }
1646
1647         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1648
1649         dwc_otg_request_nuke(ep);
1650
1651         dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1652         if (pcd->core_if->core_params->dev_out_nak) {
1653                 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1654                 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1655         }
1656         ep->desc = NULL;
1657         ep->stopped = 1;
1658
1659         gdfifocfg.d32 =
1660             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1661         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1662
1663         if (ep->dwc_ep.is_in) {
1664                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1665                         /* Flush the Tx FIFO */
1666                         dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1667                                               ep->dwc_ep.tx_fifo_num);
1668                 }
1669                 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1670                 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1671                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1672                         /* Decreasing EPinfo Base Addr */
1673                         dptxfsiz.d32 =
1674                             (DWC_READ_REG32
1675                              (&GET_CORE_IF(pcd)->
1676                                 core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num-1]) >> 16);
1677                         gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
1678                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1679                                 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
1680                                         gdfifocfg.d32);
1681                         }
1682                 }
1683         }
1684
1685         /* Free DMA Descriptors */
1686         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1687                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1688                         desc_addr = ep->dwc_ep.desc_addr;
1689                         dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1690
1691                         /* Cannot call dma_free_coherent() with IRQs disabled */
1692                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1693                         dwc_otg_ep_free_desc_chain(dev, desc_addr, dma_desc_addr,
1694                                                    MAX_DMA_DESC_CNT);
1695
1696                         goto out_unlocked;
1697                 }
1698         }
1699         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1700
1701 out_unlocked:
1702         DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1703                     ep->dwc_ep.is_in ? "IN" : "OUT");
1704         return 0;
1705
1706 }
1707
1708 /******************************************************************************/
1709 #ifdef DWC_UTE_PER_IO
1710
1711 /**
1712  * Free the request and its extended parts
1713  *
1714  */
1715 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req)
1716 {
1717         DWC_FREE(req->ext_req.per_io_frame_descs);
1718         DWC_FREE(req);
1719 }
1720
1721 /**
1722  * Start the next request in the endpoint's queue.
1723  *
1724  */
1725 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t * pcd,
1726                                         dwc_otg_pcd_ep_t * ep)
1727 {
1728         int i;
1729         dwc_otg_pcd_request_t *req = NULL;
1730         dwc_ep_t *dwcep = NULL;
1731         struct dwc_iso_xreq_port *ereq = NULL;
1732         struct dwc_iso_pkt_desc_port *ddesc_iso;
1733         uint16_t nat;
1734         depctl_data_t diepctl;
1735
1736         dwcep = &ep->dwc_ep;
1737
1738         if (dwcep->xiso_active_xfers > 0) {
1739 #if 0   //Disable this to decrease s/w overhead that is crucial for Isoc transfers
1740                 DWC_WARN("There are currently active transfers for EP%d \
1741                                 (active=%d; queued=%d)", dwcep->num, dwcep->xiso_active_xfers,
1742                                 dwcep->xiso_queued_xfers);
1743 #endif
1744                 return 0;
1745         }
1746
1747         nat = UGETW(ep->desc->wMaxPacketSize);
1748         nat = (nat >> 11) & 0x03;
1749
1750         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1751                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1752                 ereq = &req->ext_req;
1753                 ep->stopped = 0;
1754
1755                 /* Get the frame number */
1756                 dwcep->xiso_frame_num =
1757                     dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1758                 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1759
1760                 ddesc_iso = ereq->per_io_frame_descs;
1761
1762                 if (dwcep->is_in) {
1763                         /* Setup DMA Descriptor chain for IN Isoc request */
1764                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1765                                 //if ((i % (nat + 1)) == 0)
1766                                 if ( i > 0 )
1767                                         dwcep->xiso_frame_num =
1768                                             (dwcep->xiso_bInterval +
1769                                                                                 dwcep->xiso_frame_num) & 0x3FFF;
1770                                 dwcep->desc_addr[i].buf =
1771                                     req->dma + ddesc_iso[i].offset;
1772                                 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1773                                     ddesc_iso[i].length;
1774                                 dwcep->desc_addr[i].status.b_iso_in.framenum =
1775                                     dwcep->xiso_frame_num;
1776                                 dwcep->desc_addr[i].status.b_iso_in.bs =
1777                                     BS_HOST_READY;
1778                                 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1779                                 dwcep->desc_addr[i].status.b_iso_in.sp =
1780                                     (ddesc_iso[i].length %
1781                                      dwcep->maxpacket) ? 1 : 0;
1782                                 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1783                                 dwcep->desc_addr[i].status.b_iso_in.pid = nat + 1;
1784                                 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1785
1786                                 /* Process the last descriptor */
1787                                 if (i == ereq->pio_pkt_count - 1) {
1788                                         dwcep->desc_addr[i].status.b_iso_in.ioc = 1;
1789                                         dwcep->desc_addr[i].status.b_iso_in.l = 1;
1790                                 }
1791                         }
1792
1793                         /* Setup and start the transfer for this endpoint */
1794                         dwcep->xiso_active_xfers++;
1795                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1796                                         in_ep_regs[dwcep->num]->diepdma,
1797                                         dwcep->dma_desc_addr);
1798                         diepctl.d32 = 0;
1799                         diepctl.b.epena = 1;
1800                         diepctl.b.cnak = 1;
1801                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1802                                          in_ep_regs[dwcep->num]->diepctl, 0,
1803                                          diepctl.d32);
1804                 } else {
1805                         /* Setup DMA Descriptor chain for OUT Isoc request */
1806                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1807                                 //if ((i % (nat + 1)) == 0)
1808                                 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1809                                                                                 dwcep->xiso_frame_num) & 0x3FFF;
1810                                 dwcep->desc_addr[i].buf =
1811                                     req->dma + ddesc_iso[i].offset;
1812                                 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1813                                     ddesc_iso[i].length;
1814                                 dwcep->desc_addr[i].status.b_iso_out.framenum =
1815                                     dwcep->xiso_frame_num;
1816                                 dwcep->desc_addr[i].status.b_iso_out.bs =
1817                                     BS_HOST_READY;
1818                                 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1819                                 dwcep->desc_addr[i].status.b_iso_out.sp =
1820                                     (ddesc_iso[i].length %
1821                                      dwcep->maxpacket) ? 1 : 0;
1822                                 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1823                                 dwcep->desc_addr[i].status.b_iso_out.pid = nat + 1;
1824                                 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1825
1826                                 /* Process the last descriptor */
1827                                 if (i == ereq->pio_pkt_count - 1) {
1828                                         dwcep->desc_addr[i].status.b_iso_out.ioc = 1;
1829                                         dwcep->desc_addr[i].status.b_iso_out.l = 1;
1830                                 }
1831                         }
1832
1833                         /* Setup and start the transfer for this endpoint */
1834                         dwcep->xiso_active_xfers++;
1835                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1836                                         dev_if->out_ep_regs[dwcep->num]->
1837                                         doepdma, dwcep->dma_desc_addr);
1838                         diepctl.d32 = 0;
1839                         diepctl.b.epena = 1;
1840                         diepctl.b.cnak = 1;
1841                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1842                                          dev_if->out_ep_regs[dwcep->num]->
1843                                          doepctl, 0, diepctl.d32);
1844                 }
1845
1846         } else {
1847                 ep->stopped = 1;
1848         }
1849
1850         return 0;
1851 }
1852
1853 /**
1854  *      - Remove the request from the queue
1855  */
1856 void complete_xiso_ep(dwc_otg_pcd_ep_t * ep)
1857 {
1858         dwc_otg_pcd_request_t *req = NULL;
1859         struct dwc_iso_xreq_port *ereq = NULL;
1860         struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
1861         dwc_ep_t *dwcep = NULL;
1862         int i;
1863
1864         //DWC_DEBUG();
1865         dwcep = &ep->dwc_ep;
1866
1867         /* Get the first pending request from the queue */
1868         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1869                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1870                 if (!req) {
1871                         DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
1872                         return;
1873                 }
1874                 dwcep->xiso_active_xfers--;
1875                 dwcep->xiso_queued_xfers--;
1876                 /* Remove this request from the queue */
1877                 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
1878         } else {
1879                 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
1880                 return;
1881         }
1882
1883         ep->stopped = 1;
1884         ereq = &req->ext_req;
1885         ddesc_iso = ereq->per_io_frame_descs;
1886
1887         if (dwcep->xiso_active_xfers < 0) {
1888                 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
1889                          dwcep->xiso_active_xfers);
1890         }
1891
1892         /* Fill the Isoc descs of portable extended req from dma descriptors */
1893         for (i = 0; i < ereq->pio_pkt_count; i++) {
1894                 if (dwcep->is_in) {     /* IN endpoints */
1895                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
1896                             dwcep->desc_addr[i].status.b_iso_in.txbytes;
1897                         ddesc_iso[i].status =
1898                             dwcep->desc_addr[i].status.b_iso_in.txsts;
1899                 } else {        /* OUT endpoints */
1900                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
1901                             dwcep->desc_addr[i].status.b_iso_out.rxbytes;
1902                         ddesc_iso[i].status =
1903                             dwcep->desc_addr[i].status.b_iso_out.rxsts;
1904                 }
1905         }
1906
1907         DWC_SPINUNLOCK(ep->pcd->lock);
1908
1909         /* Call the completion function in the non-portable logic */
1910         ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
1911                                       &req->ext_req);
1912
1913         DWC_SPINLOCK(ep->pcd->lock);
1914
1915         /* Free the request - specific freeing needed for extended request object */
1916         dwc_pcd_xiso_ereq_free(ep, req);
1917
1918         /* Start the next request */
1919         dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
1920
1921         return;
1922 }
1923
1924 /**
1925  * Create and initialize the Isoc pkt descriptors of the extended request.
1926  *
1927  */
1928 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t * req,
1929                                              void *ereq_nonport,
1930                                              int atomic_alloc)
1931 {
1932         struct dwc_iso_xreq_port *ereq = NULL;
1933         struct dwc_iso_xreq_port *req_mapped = NULL;
1934         struct dwc_iso_pkt_desc_port *ipds = NULL;      /* To be created in this function */
1935         uint32_t pkt_count;
1936         int i;
1937
1938         ereq = &req->ext_req;
1939         req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
1940         pkt_count = req_mapped->pio_pkt_count;
1941
1942         /* Create the isoc descs */
1943         if (atomic_alloc) {
1944                 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
1945         } else {
1946                 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
1947         }
1948
1949         if (!ipds) {
1950                 DWC_ERROR("Failed to allocate isoc descriptors");
1951                 return -DWC_E_NO_MEMORY;
1952         }
1953
1954         /* Initialize the extended request fields */
1955         ereq->per_io_frame_descs = ipds;
1956         ereq->error_count = 0;
1957         ereq->pio_alloc_pkt_count = pkt_count;
1958         ereq->pio_pkt_count = pkt_count;
1959         ereq->tr_sub_flags = req_mapped->tr_sub_flags;
1960
1961         /* Init the Isoc descriptors */
1962         for (i = 0; i < pkt_count; i++) {
1963                 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
1964                 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
1965                 ipds[i].status = req_mapped->per_io_frame_descs[i].status;      /* 0 */
1966                 ipds[i].actual_length =
1967                     req_mapped->per_io_frame_descs[i].actual_length;
1968         }
1969
1970         return 0;
1971 }
1972
1973 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
1974 {
1975         struct dwc_iso_pkt_desc_port *xfd = NULL;
1976         int i;
1977
1978         DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
1979         DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
1980         DWC_DEBUG("error_count=%d", ereq->error_count);
1981         DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
1982         DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
1983         DWC_DEBUG("res=%d", ereq->res);
1984
1985         for (i = 0; i < ereq->pio_pkt_count; i++) {
1986                 xfd = &ereq->per_io_frame_descs[0];
1987                 DWC_DEBUG("FD #%d", i);
1988
1989                 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
1990                 DWC_DEBUG("xfd->length=%d", xfd->length);
1991                 DWC_DEBUG("xfd->offset=%d", xfd->offset);
1992                 DWC_DEBUG("xfd->status=%d", xfd->status);
1993         }
1994 }
1995
1996 /**
1997  *
1998  */
1999 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2000                               uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2001                               int zero, void *req_handle, int atomic_alloc,
2002                               void *ereq_nonport)
2003 {
2004         dwc_otg_pcd_request_t *req = NULL;
2005         dwc_otg_pcd_ep_t *ep;
2006         dwc_irqflags_t flags;
2007         int res;
2008
2009         ep = get_ep_from_handle(pcd, ep_handle);
2010         if (!ep) {
2011                 DWC_WARN("bad ep\n");
2012                 return -DWC_E_INVALID;
2013         }
2014
2015         /* We support this extension only for DDMA mode */
2016         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2017                 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2018                         return -DWC_E_INVALID;
2019
2020         /* Create a dwc_otg_pcd_request_t object */
2021         if (atomic_alloc) {
2022                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2023         } else {
2024                 req = DWC_ALLOC(sizeof(*req));
2025         }
2026
2027         if (!req) {
2028                 return -DWC_E_NO_MEMORY;
2029         }
2030
2031         /* Create the Isoc descs for this request which shall be the exact match
2032          * of the structure sent to us from the non-portable logic */
2033         res =
2034             dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2035         if (res) {
2036                 DWC_WARN("Failed to init the Isoc descriptors");
2037                 DWC_FREE(req);
2038                 return res;
2039         }
2040
2041         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2042
2043         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2044         req->buf = buf;
2045         req->dma = dma_buf;
2046         req->length = buflen;
2047         req->sent_zlp = zero;
2048         req->priv = req_handle;
2049
2050         //DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2051         ep->dwc_ep.dma_addr = dma_buf;
2052         ep->dwc_ep.start_xfer_buff = buf;
2053         ep->dwc_ep.xfer_buff = buf;
2054         ep->dwc_ep.xfer_len = 0;
2055         ep->dwc_ep.xfer_count = 0;
2056         ep->dwc_ep.sent_zlp = 0;
2057         ep->dwc_ep.total_len = buflen;
2058
2059         /* Add this request to the tail */
2060         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2061         ep->dwc_ep.xiso_queued_xfers++;
2062
2063 //DWC_DEBUG("CP_0");
2064 //DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
2065 //prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport);
2066 //prn_ext_request(&req->ext_req);
2067
2068         //DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2069
2070         /* If the req->status == ASAP  then check if there is any active transfer
2071          * for this endpoint. If no active transfers, then get the first entry
2072          * from the queue and start that transfer
2073          */
2074         if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2075                 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2076                 if (res) {
2077                         DWC_WARN("Failed to start the next Isoc transfer");
2078                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2079                         DWC_FREE(req);
2080                         return res;
2081                 }
2082         }
2083
2084         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2085         return 0;
2086 }
2087
2088 #endif
2089 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2090 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2091                          uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2092                          int zero, void *req_handle, int atomic_alloc)
2093 {
2094         struct device *dev = dwc_otg_pcd_to_dev(pcd);
2095         dwc_irqflags_t flags;
2096         dwc_otg_pcd_request_t *req;
2097         dwc_otg_pcd_ep_t *ep;
2098         uint32_t max_transfer;
2099
2100         ep = get_ep_from_handle(pcd, ep_handle);
2101         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2102                 DWC_WARN("bad ep\n");
2103                 return -DWC_E_INVALID;
2104         }
2105
2106         if (atomic_alloc) {
2107                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2108         } else {
2109                 req = DWC_ALLOC(sizeof(*req));
2110         }
2111
2112         if (!req) {
2113                 return -DWC_E_NO_MEMORY;
2114         }
2115         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2116         if (!GET_CORE_IF(pcd)->core_params->opt) {
2117                 if (ep->dwc_ep.num != 0) {
2118                         DWC_ERROR("queue req %p, len %d buf %p\n",
2119                                   req_handle, buflen, buf);
2120                 }
2121         }
2122
2123         req->buf = buf;
2124         req->dma = dma_buf;
2125         req->length = buflen;
2126         req->sent_zlp = zero;
2127         req->priv = req_handle;
2128         req->dw_align_buf = NULL;
2129         if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2130                         && !GET_CORE_IF(pcd)->dma_desc_enable)
2131                 req->dw_align_buf = DWC_DMA_ALLOC(dev, buflen,
2132                                  &req->dw_align_buf_dma);
2133         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2134
2135         /*
2136          * After adding request to the queue for IN ISOC wait for In Token Received
2137          * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2138          * Received when EP is disabled interrupt to obtain starting microframe
2139          * (odd/even) start transfer
2140          */
2141         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2142                 if (req != 0) {
2143                         depctl_data_t depctl = {.d32 =
2144                                     DWC_READ_REG32(&pcd->core_if->dev_if->
2145                                                    in_ep_regs[ep->dwc_ep.num]->
2146                                                    diepctl) };
2147                         ++pcd->request_pending;
2148
2149                         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2150                         if (ep->dwc_ep.is_in) {
2151                                 depctl.b.cnak = 1;
2152                                 DWC_WRITE_REG32(&pcd->core_if->dev_if->
2153                                                 in_ep_regs[ep->dwc_ep.num]->
2154                                                 diepctl, depctl.d32);
2155                         }
2156
2157                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2158                 }
2159                 return 0;
2160         }
2161
2162         /*
2163          * For EP0 IN without premature status, zlp is required?
2164          */
2165         if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2166                 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2167                 //_req->zero = 1;
2168         }
2169
2170         /* Start the transfer */
2171         if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2172                 /* EP0 Transfer? */
2173                 if (ep->dwc_ep.num == 0) {
2174                         switch (pcd->ep0state) {
2175                         case EP0_IN_DATA_PHASE:
2176                                 DWC_DEBUGPL(DBG_PCD,
2177                                             "%s ep0: EP0_IN_DATA_PHASE\n",
2178                                             __func__);
2179                                 break;
2180
2181                         case EP0_OUT_DATA_PHASE:
2182                                 DWC_DEBUGPL(DBG_PCD,
2183                                             "%s ep0: EP0_OUT_DATA_PHASE\n",
2184                                             __func__);
2185                                 if (pcd->request_config) {
2186                                         /* Complete STATUS PHASE */
2187                                         ep->dwc_ep.is_in = 1;
2188                                         pcd->ep0state = EP0_IN_STATUS_PHASE;
2189                                 }
2190                                 break;
2191
2192                         case EP0_IN_STATUS_PHASE:
2193                                 DWC_DEBUGPL(DBG_PCD,
2194                                             "%s ep0: EP0_IN_STATUS_PHASE\n",
2195                                             __func__);
2196                                 break;
2197
2198                         default:
2199                                 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2200                                             pcd->ep0state);
2201                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2202                                 return -DWC_E_SHUTDOWN;
2203                         }
2204
2205                         ep->dwc_ep.dma_addr = dma_buf;
2206                         ep->dwc_ep.start_xfer_buff = buf;
2207                         ep->dwc_ep.xfer_buff = buf;
2208                         ep->dwc_ep.xfer_len = buflen;
2209                         ep->dwc_ep.xfer_count = 0;
2210                         ep->dwc_ep.sent_zlp = 0;
2211                         ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2212
2213                         if (zero) {
2214                                 if ((ep->dwc_ep.xfer_len %
2215                                      ep->dwc_ep.maxpacket == 0)
2216                                     && (ep->dwc_ep.xfer_len != 0)) {
2217                                         ep->dwc_ep.sent_zlp = 1;
2218                                 }
2219
2220                         }
2221
2222                         dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2223                                                    &ep->dwc_ep);
2224                 }               // non-ep0 endpoints
2225                 else {
2226 #ifdef DWC_UTE_CFI
2227                         if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2228                                 /* store the request length */
2229                                 ep->dwc_ep.cfi_req_len = buflen;
2230                                 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2231                                                                 ep, req);
2232                         } else {
2233 #endif
2234                                 max_transfer =
2235                                     GET_CORE_IF(ep->pcd)->core_params->
2236                                     max_transfer_size;
2237
2238                                 /* Setup and start the Transfer */
2239                                 if (req->dw_align_buf){
2240                                         if (ep->dwc_ep.is_in)
2241                                                 dwc_memcpy(req->dw_align_buf,
2242                                                            buf, buflen);
2243                                         ep->dwc_ep.dma_addr =
2244                                             req->dw_align_buf_dma;
2245                                         ep->dwc_ep.start_xfer_buff =
2246                                             req->dw_align_buf;
2247                                         ep->dwc_ep.xfer_buff =
2248                                             req->dw_align_buf;
2249                                 } else {
2250                                         ep->dwc_ep.dma_addr = dma_buf;
2251                                         ep->dwc_ep.start_xfer_buff = buf;
2252                                         ep->dwc_ep.xfer_buff = buf;
2253                                 }
2254                                 ep->dwc_ep.xfer_len = 0;
2255                                 ep->dwc_ep.xfer_count = 0;
2256                                 ep->dwc_ep.sent_zlp = 0;
2257                                 ep->dwc_ep.total_len = buflen;
2258
2259                                 ep->dwc_ep.maxxfer = max_transfer;
2260                                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2261                                         uint32_t out_max_xfer =
2262                                             DDMA_MAX_TRANSFER_SIZE -
2263                                             (DDMA_MAX_TRANSFER_SIZE % 4);
2264                                         if (ep->dwc_ep.is_in) {
2265                                                 if (ep->dwc_ep.maxxfer >
2266                                                     DDMA_MAX_TRANSFER_SIZE) {
2267                                                         ep->dwc_ep.maxxfer =
2268                                                             DDMA_MAX_TRANSFER_SIZE;
2269                                                 }
2270                                         } else {
2271                                                 if (ep->dwc_ep.maxxfer >
2272                                                     out_max_xfer) {
2273                                                         ep->dwc_ep.maxxfer =
2274                                                             out_max_xfer;
2275                                                 }
2276                                         }
2277                                 }
2278                                 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2279                                         ep->dwc_ep.maxxfer -=
2280                                             (ep->dwc_ep.maxxfer %
2281                                              ep->dwc_ep.maxpacket);
2282                                 }
2283
2284                                 if (zero) {
2285                                         if ((ep->dwc_ep.total_len %
2286                                              ep->dwc_ep.maxpacket == 0)
2287                                             && (ep->dwc_ep.total_len != 0)) {
2288                                                 ep->dwc_ep.sent_zlp = 1;
2289                                         }
2290                                 }
2291 #ifdef DWC_UTE_CFI
2292                         }
2293 #endif
2294                         dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2295                                                   &ep->dwc_ep);
2296                 }
2297         }
2298
2299         if (req != 0) {
2300                 ++pcd->request_pending;
2301                 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2302                 if (ep->dwc_ep.is_in && ep->stopped
2303                     && !(GET_CORE_IF(pcd)->dma_enable)) {
2304                         /** @todo NGS Create a function for this. */
2305                         diepmsk_data_t diepmsk = {.d32 = 0 };
2306                         diepmsk.b.intktxfemp = 1;
2307                         if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2308                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2309                                                  dev_if->dev_global_regs->diepeachintmsk
2310                                                  [ep->dwc_ep.num], 0,
2311                                                  diepmsk.d32);
2312                         } else {
2313                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2314                                                  dev_if->dev_global_regs->
2315                                                  diepmsk, 0, diepmsk.d32);
2316                         }
2317
2318                 }
2319         }
2320         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2321
2322         return 0;
2323 }
2324
2325 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
2326                            void *req_handle)
2327 {
2328         dwc_irqflags_t flags;
2329         dwc_otg_pcd_request_t *req;
2330         dwc_otg_pcd_ep_t *ep;
2331
2332         ep = get_ep_from_handle(pcd, ep_handle);
2333         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2334                 DWC_WARN("bad argument\n");
2335                 return -DWC_E_INVALID;
2336         }
2337
2338         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2339
2340         /* make sure it's actually queued on this endpoint */
2341         DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2342                 if (req->priv == (void *)req_handle) {
2343                         break;
2344                 }
2345         }
2346
2347         if (req->priv != (void *)req_handle) {
2348                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2349                 return -DWC_E_INVALID;
2350         }
2351
2352         if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2353                 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2354         } else {
2355                 req = NULL;
2356         }
2357
2358         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2359
2360         return req ? 0 : -DWC_E_SHUTDOWN;
2361
2362 }
2363
2364 /**
2365  * dwc_otg_pcd_ep_wedge - sets the halt feature and ignores clear requests
2366  *
2367  * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
2368  * requests. If the gadget driver clears the halt status, it will
2369  * automatically unwedge the endpoint.
2370  *
2371  * Returns zero on success, else negative DWC error code.
2372  */
2373 int dwc_otg_pcd_ep_wedge(dwc_otg_pcd_t * pcd, void *ep_handle)
2374 {
2375         dwc_otg_pcd_ep_t *ep;
2376         dwc_irqflags_t flags;
2377         int retval = 0;
2378
2379         ep = get_ep_from_handle(pcd, ep_handle);
2380
2381         if ((!ep->desc && ep != &pcd->ep0) ||
2382             (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2383                 DWC_WARN("%s, bad ep\n", __func__);
2384                 return -DWC_E_INVALID;
2385         }
2386
2387         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2388         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2389                 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2390                          ep->dwc_ep.is_in ? "IN" : "OUT");
2391                 retval = -DWC_E_AGAIN;
2392         } else {
2393                 /* This code needs to be reviewed */
2394                 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2395                         dtxfsts_data_t txstatus;
2396                         fifosize_data_t txfifosize;
2397
2398                         txfifosize.d32 =
2399                             DWC_READ_REG32(&GET_CORE_IF(pcd)->
2400                                            core_global_regs->dtxfsiz[ep->dwc_ep.
2401                                                                      tx_fifo_num]);
2402                         txstatus.d32 =
2403                             DWC_READ_REG32(&GET_CORE_IF(pcd)->
2404                                            dev_if->in_ep_regs[ep->dwc_ep.num]->
2405                                            dtxfsts);
2406
2407                         if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2408                                 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2409                                 retval = -DWC_E_AGAIN;
2410                         } else {
2411                                 if (ep->dwc_ep.num == 0) {
2412                                         pcd->ep0state = EP0_STALL;
2413                                 }
2414
2415                                 ep->stopped = 1;
2416                                 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2417                                                      &ep->dwc_ep);
2418                         }
2419                 } else {
2420                         if (ep->dwc_ep.num == 0) {
2421                                 pcd->ep0state = EP0_STALL;
2422                         }
2423
2424                         ep->stopped = 1;
2425                         dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2426                 }
2427         }
2428
2429         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2430
2431         return retval;
2432 }
2433
2434 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
2435 {
2436         dwc_otg_pcd_ep_t *ep;
2437         dwc_irqflags_t flags;
2438         int retval = 0;
2439
2440         ep = get_ep_from_handle(pcd, ep_handle);
2441
2442         if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2443             (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2444                 DWC_WARN("%s, bad ep\n", __func__);
2445                 return -DWC_E_INVALID;
2446         }
2447
2448         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2449         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2450                 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2451                          ep->dwc_ep.is_in ? "IN" : "OUT");
2452                 retval = -DWC_E_AGAIN;
2453         } else if (value == 0) {
2454                 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2455         } else if (value == 1) {
2456                 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2457                         dtxfsts_data_t txstatus;
2458                         fifosize_data_t txfifosize;
2459
2460                         txfifosize.d32 =
2461                             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
2462                                            dtxfsiz[ep->dwc_ep.tx_fifo_num]);
2463                         txstatus.d32 =
2464                             DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
2465                                            in_ep_regs[ep->dwc_ep.num]->dtxfsts);
2466
2467                         if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2468                                 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2469                                 retval = -DWC_E_AGAIN;
2470                         } else {
2471                                 if (ep->dwc_ep.num == 0) {
2472                                         pcd->ep0state = EP0_STALL;
2473                                 }
2474
2475                                 ep->stopped = 1;
2476                                 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2477                                                      &ep->dwc_ep);
2478                         }
2479                 } else {
2480                         if (ep->dwc_ep.num == 0) {
2481                                 pcd->ep0state = EP0_STALL;
2482                         }
2483
2484                         ep->stopped = 1;
2485                         dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2486                 }
2487         } else if (value == 2) {
2488                 ep->dwc_ep.stall_clear_flag = 0;
2489         } else if (value == 3) {
2490                 ep->dwc_ep.stall_clear_flag = 1;
2491         }
2492
2493         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2494
2495         return retval;
2496 }
2497
2498 /**
2499  * This function initiates remote wakeup of the host from suspend state.
2500  */
2501 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
2502 {
2503         dctl_data_t dctl = { 0 };
2504         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2505         dsts_data_t dsts;
2506
2507         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2508         if (!dsts.b.suspsts) {
2509                 DWC_WARN("Remote wakeup while is not in suspend state\n");
2510         }
2511         /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2512         if (pcd->remote_wakeup_enable) {
2513                 if (set) {
2514
2515                         if (core_if->adp_enable) {
2516                                 gpwrdn_data_t gpwrdn;
2517
2518                                 dwc_otg_adp_probe_stop(core_if);
2519
2520                                 /* Mask SRP detected interrupt from Power Down Logic */
2521                                 gpwrdn.d32 = 0;
2522                                 gpwrdn.b.srp_det_msk = 1;
2523                                 DWC_MODIFY_REG32(&core_if->
2524                                                  core_global_regs->gpwrdn,
2525                                                  gpwrdn.d32, 0);
2526
2527                                 /* Disable Power Down Logic */
2528                                 gpwrdn.d32 = 0;
2529                                 gpwrdn.b.pmuactv = 1;
2530                                 DWC_MODIFY_REG32(&core_if->
2531                                                  core_global_regs->gpwrdn,
2532                                                  gpwrdn.d32, 0);
2533
2534                                 /*
2535                                  * Initialize the Core for Device mode.
2536                                  */
2537                                 core_if->op_state = B_PERIPHERAL;
2538                                 dwc_otg_core_init(core_if);
2539                                 dwc_otg_enable_global_interrupts(core_if);
2540                                 cil_pcd_start(core_if);
2541
2542                                 dwc_otg_initiate_srp(core_if);
2543                         }
2544
2545                         dctl.b.rmtwkupsig = 1;
2546                         DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2547                                          dctl, 0, dctl.d32);
2548                         DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2549
2550                         dwc_mdelay(2);
2551                         DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2552                                          dctl, dctl.d32, 0);
2553                         DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2554                 }
2555         } else {
2556                 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2557         }
2558 }
2559
2560 #ifdef CONFIG_USB_DWC_OTG_LPM
2561 /**
2562  * This function initiates remote wakeup of the host from L1 sleep state.
2563  */
2564 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
2565 {
2566         glpmcfg_data_t lpmcfg;
2567         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2568
2569         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2570
2571         /* Check if we are in L1 state */
2572         if (!lpmcfg.b.prt_sleep_sts) {
2573                 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2574                 return;
2575         }
2576
2577         /* Check if host allows remote wakeup */
2578         if (!lpmcfg.b.rem_wkup_en) {
2579                 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2580                 return;
2581         }
2582
2583         /* Check if Resume OK */
2584         if (!lpmcfg.b.sleep_state_resumeok) {
2585                 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2586                 return;
2587         }
2588
2589         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2590         lpmcfg.b.en_utmi_sleep = 0;
2591         lpmcfg.b.hird_thres &= (~(1 << 4));
2592         DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2593
2594         if (set) {
2595                 dctl_data_t dctl = {.d32 = 0 };
2596                 dctl.b.rmtwkupsig = 1;
2597                 /* Set RmtWkUpSig bit to start remote wakup signaling.
2598                  * Hardware will automatically clear this bit.
2599                  */
2600                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2601                                  0, dctl.d32);
2602                 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2603         }
2604
2605 }
2606 #endif
2607
2608 /**
2609  * Performs remote wakeup.
2610  */
2611 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
2612 {
2613         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2614         dwc_irqflags_t flags;
2615         if (dwc_otg_is_device_mode(core_if)) {
2616                 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2617 #ifdef CONFIG_USB_DWC_OTG_LPM
2618                 if (core_if->lx_state == DWC_OTG_L1) {
2619                         dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2620                 } else {
2621 #endif
2622                         dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2623 #ifdef CONFIG_USB_DWC_OTG_LPM
2624                 }
2625 #endif
2626                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2627         }
2628         return;
2629 }
2630
2631 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t * pcd, int no_of_usecs)
2632 {
2633         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2634         dctl_data_t dctl = { 0 };
2635
2636         if (dwc_otg_is_device_mode(core_if)) {
2637                 dctl.b.sftdiscon = 1;
2638                 DWC_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
2639                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
2640                 dwc_udelay(no_of_usecs);
2641                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
2642
2643         } else{
2644                 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2645         }
2646         return;
2647
2648 }
2649
2650 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
2651 {
2652         dsts_data_t dsts;
2653         gotgctl_data_t gotgctl;
2654
2655         /*
2656          * This function starts the Protocol if no session is in progress. If
2657          * a session is already in progress, but the device is suspended,
2658          * remote wakeup signaling is started.
2659          */
2660
2661         /* Check if valid session */
2662         gotgctl.d32 =
2663             DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2664         if (gotgctl.b.bsesvld) {
2665                 /* Check if suspend state */
2666                 dsts.d32 =
2667                     DWC_READ_REG32(&
2668                                    (GET_CORE_IF(pcd)->dev_if->
2669                                     dev_global_regs->dsts));
2670                 if (dsts.b.suspsts) {
2671                         dwc_otg_pcd_remote_wakeup(pcd, 1);
2672                 }
2673         } else {
2674                 dwc_otg_pcd_initiate_srp(pcd);
2675         }
2676
2677         return 0;
2678
2679 }
2680
2681 /**
2682  * Start the SRP timer to detect when the SRP does not complete within
2683  * 6 seconds.
2684  *
2685  * @param pcd the pcd structure.
2686  */
2687 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
2688 {
2689         dwc_irqflags_t flags;
2690         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2691         dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2692         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2693 }
2694
2695 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
2696 {
2697         return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2698 }
2699
2700 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
2701 {
2702         return GET_CORE_IF(pcd)->core_params->lpm_enable;
2703 }
2704
2705 uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
2706 {
2707         return pcd->b_hnp_enable;
2708 }
2709
2710 uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
2711 {
2712         return pcd->a_hnp_support;
2713 }
2714
2715 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
2716 {
2717         return pcd->a_alt_hnp_support;
2718 }
2719
2720 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
2721 {
2722         return pcd->remote_wakeup_enable;
2723 }
2724
2725 #endif /* DWC_HOST_ONLY */