Merge branch '6.6/scsi-staging' into 6.6/scsi-fixes
[platform/kernel/linux-rpi.git] / drivers / usb / cdns3 / cdnsp-ring.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence CDNSP DRD Driver.
4  *
5  * Copyright (C) 2020 Cadence.
6  *
7  * Author: Pawel Laszczak <pawell@cadence.com>
8  *
9  * Code based on Linux XHCI driver.
10  * Origin: Copyright (C) 2008 Intel Corp
11  */
12
13 /*
14  * Ring initialization rules:
15  * 1. Each segment is initialized to zero, except for link TRBs.
16  * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
17  *    Consumer Cycle State (CCS), depending on ring function.
18  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
19  *
20  * Ring behavior rules:
21  * 1. A ring is empty if enqueue == dequeue. This means there will always be at
22  *    least one free TRB in the ring. This is useful if you want to turn that
23  *    into a link TRB and expand the ring.
24  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
25  *    link TRB, then load the pointer with the address in the link TRB. If the
26  *    link TRB had its toggle bit set, you may need to update the ring cycle
27  *    state (see cycle bit rules). You may have to do this multiple times
28  *    until you reach a non-link TRB.
29  * 3. A ring is full if enqueue++ (for the definition of increment above)
30  *    equals the dequeue pointer.
31  *
32  * Cycle bit rules:
33  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
34  *    in a link TRB, it must toggle the ring cycle state.
35  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
36  *    in a link TRB, it must toggle the ring cycle state.
37  *
38  * Producer rules:
39  * 1. Check if ring is full before you enqueue.
40  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
41  *    Update enqueue pointer between each write (which may update the ring
42  *    cycle state).
43  * 3. Notify consumer. If SW is producer, it rings the doorbell for command
44  *    and endpoint rings. If controller is the producer for the event ring,
45  *    and it generates an interrupt according to interrupt modulation rules.
46  *
47  * Consumer rules:
48  * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
49  *    the TRB is owned by the consumer.
50  * 2. Update dequeue pointer (which may update the ring cycle state) and
51  *    continue processing TRBs until you reach a TRB which is not owned by you.
52  * 3. Notify the producer. SW is the consumer for the event ring, and it
53  *    updates event ring dequeue pointer. Controller is the consumer for the
54  *    command and endpoint rings; it generates events on the event ring
55  *    for these.
56  */
57
58 #include <linux/scatterlist.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/delay.h>
61 #include <linux/slab.h>
62 #include <linux/irq.h>
63
64 #include "cdnsp-trace.h"
65 #include "cdnsp-gadget.h"
66
67 /*
68  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
69  * address of the TRB.
70  */
71 dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
72                                  union cdnsp_trb *trb)
73 {
74         unsigned long segment_offset = trb - seg->trbs;
75
76         if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
77                 return 0;
78
79         return seg->dma + (segment_offset * sizeof(*trb));
80 }
81
82 static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
83 {
84         return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
85 }
86
87 static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
88 {
89         return TRB_TYPE_LINK_LE32(trb->link.control);
90 }
91
92 bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
93 {
94         return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
95 }
96
97 bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
98                             struct cdnsp_segment *seg,
99                             union cdnsp_trb *trb)
100 {
101         return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
102 }
103
104 static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
105 {
106         return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
107 }
108
109 static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
110 {
111         if (cdnsp_trb_is_link(trb)) {
112                 /* Unchain chained link TRBs. */
113                 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
114         } else {
115                 trb->generic.field[0] = 0;
116                 trb->generic.field[1] = 0;
117                 trb->generic.field[2] = 0;
118                 /* Preserve only the cycle bit of this TRB. */
119                 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
120                 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
121         }
122 }
123
124 /*
125  * Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment. This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void cdnsp_next_trb(struct cdnsp_device *pdev,
130                            struct cdnsp_ring *ring,
131                            struct cdnsp_segment **seg,
132                            union cdnsp_trb **trb)
133 {
134         if (cdnsp_trb_is_link(*trb)) {
135                 *seg = (*seg)->next;
136                 *trb = ((*seg)->trbs);
137         } else {
138                 (*trb)++;
139         }
140 }
141
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
145  */
146 void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
147 {
148         /* event ring doesn't have link trbs, check for last trb. */
149         if (ring->type == TYPE_EVENT) {
150                 if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
151                         ring->dequeue++;
152                         goto out;
153                 }
154
155                 if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
156                         ring->cycle_state ^= 1;
157
158                 ring->deq_seg = ring->deq_seg->next;
159                 ring->dequeue = ring->deq_seg->trbs;
160                 goto out;
161         }
162
163         /* All other rings have link trbs. */
164         if (!cdnsp_trb_is_link(ring->dequeue)) {
165                 ring->dequeue++;
166                 ring->num_trbs_free++;
167         }
168         while (cdnsp_trb_is_link(ring->dequeue)) {
169                 ring->deq_seg = ring->deq_seg->next;
170                 ring->dequeue = ring->deq_seg->trbs;
171         }
172 out:
173         trace_cdnsp_inc_deq(ring);
174 }
175
176 /*
177  * See Cycle bit rules. SW is the consumer for the event ring only.
178  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
179  *
180  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
181  * chain bit is set), then set the chain bit in all the following link TRBs.
182  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
183  * have their chain bit cleared (so that each Link TRB is a separate TD).
184  *
185  * @more_trbs_coming:   Will you enqueue more TRBs before ringing the doorbell.
186  */
187 static void cdnsp_inc_enq(struct cdnsp_device *pdev,
188                           struct cdnsp_ring *ring,
189                           bool more_trbs_coming)
190 {
191         union cdnsp_trb *next;
192         u32 chain;
193
194         chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
195
196         /* If this is not event ring, there is one less usable TRB. */
197         if (!cdnsp_trb_is_link(ring->enqueue))
198                 ring->num_trbs_free--;
199         next = ++(ring->enqueue);
200
201         /* Update the dequeue pointer further if that was a link TRB */
202         while (cdnsp_trb_is_link(next)) {
203                 /*
204                  * If the caller doesn't plan on enqueuing more TDs before
205                  * ringing the doorbell, then we don't want to give the link TRB
206                  * to the hardware just yet. We'll give the link TRB back in
207                  * cdnsp_prepare_ring() just before we enqueue the TD at the
208                  * top of the ring.
209                  */
210                 if (!chain && !more_trbs_coming)
211                         break;
212
213                 next->link.control &= cpu_to_le32(~TRB_CHAIN);
214                 next->link.control |= cpu_to_le32(chain);
215
216                 /* Give this link TRB to the hardware */
217                 wmb();
218                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
219
220                 /* Toggle the cycle bit after the last ring segment. */
221                 if (cdnsp_link_trb_toggles_cycle(next))
222                         ring->cycle_state ^= 1;
223
224                 ring->enq_seg = ring->enq_seg->next;
225                 ring->enqueue = ring->enq_seg->trbs;
226                 next = ring->enqueue;
227         }
228
229         trace_cdnsp_inc_enq(ring);
230 }
231
232 /*
233  * Check to see if there's room to enqueue num_trbs on the ring and make sure
234  * enqueue pointer will not advance into dequeue segment.
235  */
236 static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
237                                struct cdnsp_ring *ring,
238                                unsigned int num_trbs)
239 {
240         int num_trbs_in_deq_seg;
241
242         if (ring->num_trbs_free < num_trbs)
243                 return false;
244
245         if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
246                 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
247
248                 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
249                         return false;
250         }
251
252         return true;
253 }
254
255 /*
256  * Workaround for L1: controller has issue with resuming from L1 after
257  * setting doorbell for endpoint during L1 state. This function forces
258  * resume signal in such case.
259  */
260 static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
261 {
262         if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
263                 cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
264 }
265
266 /* Ring the doorbell after placing a command on the ring. */
267 void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
268 {
269         writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
270 }
271
272 /*
273  * Ring the doorbell after placing a transfer on the ring.
274  * Returns true if doorbell was set, otherwise false.
275  */
276 static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
277                                    struct cdnsp_ep *pep,
278                                    unsigned int stream_id)
279 {
280         __le32 __iomem *reg_addr = &pdev->dba->ep_db;
281         unsigned int ep_state = pep->ep_state;
282         unsigned int db_value;
283
284         /*
285          * Don't ring the doorbell for this endpoint if endpoint is halted or
286          * disabled.
287          */
288         if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
289                 return false;
290
291         /* For stream capable endpoints driver can ring doorbell only twice. */
292         if (pep->ep_state & EP_HAS_STREAMS) {
293                 if (pep->stream_info.drbls_count >= 2)
294                         return false;
295
296                 pep->stream_info.drbls_count++;
297         }
298
299         pep->ep_state &= ~EP_STOPPED;
300
301         if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
302             !pdev->ep0_expect_in)
303                 db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
304         else
305                 db_value = DB_VALUE(pep->idx, stream_id);
306
307         trace_cdnsp_tr_drbl(pep, stream_id);
308
309         writel(db_value, reg_addr);
310
311         cdnsp_force_l0_go(pdev);
312
313         /* Doorbell was set. */
314         return true;
315 }
316
317 /*
318  * Get the right ring for the given pep and stream_id.
319  * If the endpoint supports streams, boundary check the USB request's stream ID.
320  * If the endpoint doesn't support streams, return the singular endpoint ring.
321  */
322 static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
323                                                   struct cdnsp_ep *pep,
324                                                   unsigned int stream_id)
325 {
326         if (!(pep->ep_state & EP_HAS_STREAMS))
327                 return pep->ring;
328
329         if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
330                 dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
331                         pep->name, stream_id);
332                 return NULL;
333         }
334
335         return pep->stream_info.stream_rings[stream_id];
336 }
337
338 static struct cdnsp_ring *
339         cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
340                                        struct cdnsp_request *preq)
341 {
342         return cdnsp_get_transfer_ring(pdev, preq->pep,
343                                        preq->request.stream_id);
344 }
345
346 /* Ring the doorbell for any rings with pending requests. */
347 void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
348                                           struct cdnsp_ep *pep)
349 {
350         struct cdnsp_stream_info *stream_info;
351         unsigned int stream_id;
352         int ret;
353
354         if (pep->ep_state & EP_DIS_IN_RROGRESS)
355                 return;
356
357         /* A ring has pending Request if its TD list is not empty. */
358         if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
359                 if (pep->ring && !list_empty(&pep->ring->td_list))
360                         cdnsp_ring_ep_doorbell(pdev, pep, 0);
361                 return;
362         }
363
364         stream_info = &pep->stream_info;
365
366         for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
367                 struct cdnsp_td *td, *td_temp;
368                 struct cdnsp_ring *ep_ring;
369
370                 if (stream_info->drbls_count >= 2)
371                         return;
372
373                 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
374                 if (!ep_ring)
375                         continue;
376
377                 if (!ep_ring->stream_active || ep_ring->stream_rejected)
378                         continue;
379
380                 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
381                                          td_list) {
382                         if (td->drbl)
383                                 continue;
384
385                         ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
386                         if (ret)
387                                 td->drbl = 1;
388                 }
389         }
390 }
391
392 /*
393  * Get the hw dequeue pointer controller stopped on, either directly from the
394  * endpoint context, or if streams are in use from the stream context.
395  * The returned hw_dequeue contains the lowest four bits with cycle state
396  * and possible stream context type.
397  */
398 static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
399                             unsigned int ep_index,
400                             unsigned int stream_id)
401 {
402         struct cdnsp_stream_ctx *st_ctx;
403         struct cdnsp_ep *pep;
404
405         pep = &pdev->eps[stream_id];
406
407         if (pep->ep_state & EP_HAS_STREAMS) {
408                 st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
409                 return le64_to_cpu(st_ctx->stream_ring);
410         }
411
412         return le64_to_cpu(pep->out_ctx->deq);
413 }
414
415 /*
416  * Move the controller endpoint ring dequeue pointer past cur_td.
417  * Record the new state of the controller endpoint ring dequeue segment,
418  * dequeue pointer, and new consumer cycle state in state.
419  * Update internal representation of the ring's dequeue pointer.
420  *
421  * We do this in three jumps:
422  *  - First we update our new ring state to be the same as when the
423  *    controller stopped.
424  *  - Then we traverse the ring to find the segment that contains
425  *    the last TRB in the TD. We toggle the controller new cycle state
426  *    when we pass any link TRBs with the toggle cycle bit set.
427  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
428  *    if we've moved it past a link TRB with the toggle cycle bit set.
429  */
430 static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
431                                          struct cdnsp_ep *pep,
432                                          unsigned int stream_id,
433                                          struct cdnsp_td *cur_td,
434                                          struct cdnsp_dequeue_state *state)
435 {
436         bool td_last_trb_found = false;
437         struct cdnsp_segment *new_seg;
438         struct cdnsp_ring *ep_ring;
439         union cdnsp_trb *new_deq;
440         bool cycle_found = false;
441         u64 hw_dequeue;
442
443         ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
444         if (!ep_ring)
445                 return;
446
447         /*
448          * Dig out the cycle state saved by the controller during the
449          * stop endpoint command.
450          */
451         hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
452         new_seg = ep_ring->deq_seg;
453         new_deq = ep_ring->dequeue;
454         state->new_cycle_state = hw_dequeue & 0x1;
455         state->stream_id = stream_id;
456
457         /*
458          * We want to find the pointer, segment and cycle state of the new trb
459          * (the one after current TD's last_trb). We know the cycle state at
460          * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
461          * found.
462          */
463         do {
464                 if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
465                     == (dma_addr_t)(hw_dequeue & ~0xf)) {
466                         cycle_found = true;
467
468                         if (td_last_trb_found)
469                                 break;
470                 }
471
472                 if (new_deq == cur_td->last_trb)
473                         td_last_trb_found = true;
474
475                 if (cycle_found && cdnsp_trb_is_link(new_deq) &&
476                     cdnsp_link_trb_toggles_cycle(new_deq))
477                         state->new_cycle_state ^= 0x1;
478
479                 cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
480
481                 /* Search wrapped around, bail out. */
482                 if (new_deq == pep->ring->dequeue) {
483                         dev_err(pdev->dev,
484                                 "Error: Failed finding new dequeue state\n");
485                         state->new_deq_seg = NULL;
486                         state->new_deq_ptr = NULL;
487                         return;
488                 }
489
490         } while (!cycle_found || !td_last_trb_found);
491
492         state->new_deq_seg = new_seg;
493         state->new_deq_ptr = new_deq;
494
495         trace_cdnsp_new_deq_state(state);
496 }
497
498 /*
499  * flip_cycle means flip the cycle bit of all but the first and last TRB.
500  * (The last TRB actually points to the ring enqueue pointer, which is not part
501  * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
502  */
503 static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
504                              struct cdnsp_ring *ep_ring,
505                              struct cdnsp_td *td,
506                              bool flip_cycle)
507 {
508         struct cdnsp_segment *seg = td->start_seg;
509         union cdnsp_trb *trb = td->first_trb;
510
511         while (1) {
512                 cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
513
514                 /* flip cycle if asked to */
515                 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
516                         trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
517
518                 if (trb == td->last_trb)
519                         break;
520
521                 cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
522         }
523 }
524
525 /*
526  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
527  * at end_trb, which may be in another segment. If the suspect DMA address is a
528  * TRB in this TD, this function returns that TRB's segment. Otherwise it
529  * returns 0.
530  */
531 static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
532                                              struct cdnsp_segment *start_seg,
533                                              union cdnsp_trb *start_trb,
534                                              union cdnsp_trb *end_trb,
535                                              dma_addr_t suspect_dma)
536 {
537         struct cdnsp_segment *cur_seg;
538         union cdnsp_trb *temp_trb;
539         dma_addr_t end_seg_dma;
540         dma_addr_t end_trb_dma;
541         dma_addr_t start_dma;
542
543         start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
544         cur_seg = start_seg;
545
546         do {
547                 if (start_dma == 0)
548                         return NULL;
549
550                 temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
551                 /* We may get an event for a Link TRB in the middle of a TD */
552                 end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
553                 /* If the end TRB isn't in this segment, this is set to 0 */
554                 end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
555
556                 trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
557                                               end_trb_dma, cur_seg->dma,
558                                               end_seg_dma);
559
560                 if (end_trb_dma > 0) {
561                         /*
562                          * The end TRB is in this segment, so suspect should
563                          * be here
564                          */
565                         if (start_dma <= end_trb_dma) {
566                                 if (suspect_dma >= start_dma &&
567                                     suspect_dma <= end_trb_dma) {
568                                         return cur_seg;
569                                 }
570                         } else {
571                                 /*
572                                  * Case for one segment with a
573                                  * TD wrapped around to the top
574                                  */
575                                 if ((suspect_dma >= start_dma &&
576                                      suspect_dma <= end_seg_dma) ||
577                                     (suspect_dma >= cur_seg->dma &&
578                                      suspect_dma <= end_trb_dma)) {
579                                         return cur_seg;
580                                 }
581                         }
582
583                         return NULL;
584                 }
585
586                 /* Might still be somewhere in this segment */
587                 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
588                         return cur_seg;
589
590                 cur_seg = cur_seg->next;
591                 start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
592         } while (cur_seg != start_seg);
593
594         return NULL;
595 }
596
597 static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
598                                          struct cdnsp_ring *ring,
599                                          struct cdnsp_td *td)
600 {
601         struct cdnsp_segment *seg = td->bounce_seg;
602         struct cdnsp_request *preq;
603         size_t len;
604
605         if (!seg)
606                 return;
607
608         preq = td->preq;
609
610         trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
611                                  seg->bounce_dma, 0);
612
613         if (!preq->direction) {
614                 dma_unmap_single(pdev->dev, seg->bounce_dma,
615                                  ring->bounce_buf_len,  DMA_TO_DEVICE);
616                 return;
617         }
618
619         dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
620                          DMA_FROM_DEVICE);
621
622         /* For in transfers we need to copy the data from bounce to sg */
623         len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
624                                    seg->bounce_buf, seg->bounce_len,
625                                    seg->bounce_offs);
626         if (len != seg->bounce_len)
627                 dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
628                          len, seg->bounce_len);
629
630         seg->bounce_len = 0;
631         seg->bounce_offs = 0;
632 }
633
634 static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
635                              struct cdnsp_ep *pep,
636                              struct cdnsp_dequeue_state *deq_state)
637 {
638         struct cdnsp_ring *ep_ring;
639         int ret;
640
641         if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
642                 cdnsp_ring_doorbell_for_active_rings(pdev, pep);
643                 return 0;
644         }
645
646         cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
647         cdnsp_ring_cmd_db(pdev);
648         ret = cdnsp_wait_for_cmd_compl(pdev);
649
650         trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
651         trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
652
653         /*
654          * Update the ring's dequeue segment and dequeue pointer
655          * to reflect the new position.
656          */
657         ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
658
659         if (cdnsp_trb_is_link(ep_ring->dequeue)) {
660                 ep_ring->deq_seg = ep_ring->deq_seg->next;
661                 ep_ring->dequeue = ep_ring->deq_seg->trbs;
662         }
663
664         while (ep_ring->dequeue != deq_state->new_deq_ptr) {
665                 ep_ring->num_trbs_free++;
666                 ep_ring->dequeue++;
667
668                 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
669                         if (ep_ring->dequeue == deq_state->new_deq_ptr)
670                                 break;
671
672                         ep_ring->deq_seg = ep_ring->deq_seg->next;
673                         ep_ring->dequeue = ep_ring->deq_seg->trbs;
674                 }
675         }
676
677         /*
678          * Probably there was TIMEOUT during handling Set Dequeue Pointer
679          * command. It's critical error and controller will be stopped.
680          */
681         if (ret)
682                 return -ESHUTDOWN;
683
684         /* Restart any rings with pending requests */
685         cdnsp_ring_doorbell_for_active_rings(pdev, pep);
686
687         return 0;
688 }
689
690 int cdnsp_remove_request(struct cdnsp_device *pdev,
691                          struct cdnsp_request *preq,
692                          struct cdnsp_ep *pep)
693 {
694         struct cdnsp_dequeue_state deq_state;
695         struct cdnsp_td *cur_td = NULL;
696         struct cdnsp_ring *ep_ring;
697         struct cdnsp_segment *seg;
698         int status = -ECONNRESET;
699         int ret = 0;
700         u64 hw_deq;
701
702         memset(&deq_state, 0, sizeof(deq_state));
703
704         trace_cdnsp_remove_request(pep->out_ctx);
705         trace_cdnsp_remove_request_td(preq);
706
707         cur_td = &preq->td;
708         ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
709
710         /*
711          * If we stopped on the TD we need to cancel, then we have to
712          * move the controller endpoint ring dequeue pointer past
713          * this TD.
714          */
715         hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
716         hw_deq &= ~0xf;
717
718         seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
719                               cur_td->last_trb, hw_deq);
720
721         if (seg && (pep->ep_state & EP_ENABLED))
722                 cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
723                                              cur_td, &deq_state);
724         else
725                 cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
726
727         /*
728          * The event handler won't see a completion for this TD anymore,
729          * so remove it from the endpoint ring's TD list.
730          */
731         list_del_init(&cur_td->td_list);
732         ep_ring->num_tds--;
733         pep->stream_info.td_count--;
734
735         /*
736          * During disconnecting all endpoint will be disabled so we don't
737          * have to worry about updating dequeue pointer.
738          */
739         if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
740                 status = -ESHUTDOWN;
741                 ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
742         }
743
744         cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
745         cdnsp_gadget_giveback(pep, cur_td->preq, status);
746
747         return ret;
748 }
749
750 static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
751 {
752         struct cdnsp_port *port = pdev->active_port;
753         u8 old_port = 0;
754
755         if (port && port->port_num == port_id)
756                 return 0;
757
758         if (port)
759                 old_port = port->port_num;
760
761         if (port_id == pdev->usb2_port.port_num) {
762                 port = &pdev->usb2_port;
763         } else if (port_id == pdev->usb3_port.port_num) {
764                 port  = &pdev->usb3_port;
765         } else {
766                 dev_err(pdev->dev, "Port event with invalid port ID %d\n",
767                         port_id);
768                 return -EINVAL;
769         }
770
771         if (port_id != old_port) {
772                 cdnsp_disable_slot(pdev);
773                 pdev->active_port = port;
774                 cdnsp_enable_slot(pdev);
775         }
776
777         if (port_id == pdev->usb2_port.port_num)
778                 cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
779         else
780                 writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
781                        &pdev->usb3_port.regs->portpmsc);
782
783         return 0;
784 }
785
786 static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
787                                      union cdnsp_trb *event)
788 {
789         struct cdnsp_port_regs __iomem *port_regs;
790         u32 portsc, cmd_regs;
791         bool port2 = false;
792         u32 link_state;
793         u32 port_id;
794
795         /* Port status change events always have a successful completion code */
796         if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
797                 dev_err(pdev->dev, "ERR: incorrect PSC event\n");
798
799         port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
800
801         if (cdnsp_update_port_id(pdev, port_id))
802                 goto cleanup;
803
804         port_regs = pdev->active_port->regs;
805
806         if (port_id == pdev->usb2_port.port_num)
807                 port2 = true;
808
809 new_event:
810         portsc = readl(&port_regs->portsc);
811         writel(cdnsp_port_state_to_neutral(portsc) |
812                (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
813
814         trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
815
816         pdev->gadget.speed = cdnsp_port_speed(portsc);
817         link_state = portsc & PORT_PLS_MASK;
818
819         /* Port Link State change detected. */
820         if ((portsc & PORT_PLC)) {
821                 if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING)  &&
822                     link_state == XDEV_RESUME) {
823                         cmd_regs = readl(&pdev->op_regs->command);
824                         if (!(cmd_regs & CMD_R_S))
825                                 goto cleanup;
826
827                         if (DEV_SUPERSPEED_ANY(portsc)) {
828                                 cdnsp_set_link_state(pdev, &port_regs->portsc,
829                                                      XDEV_U0);
830
831                                 cdnsp_resume_gadget(pdev);
832                         }
833                 }
834
835                 if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
836                     link_state == XDEV_U0) {
837                         pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
838
839                         cdnsp_force_header_wakeup(pdev, 1);
840                         cdnsp_ring_cmd_db(pdev);
841                         cdnsp_wait_for_cmd_compl(pdev);
842                 }
843
844                 if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
845                     !DEV_SUPERSPEED_ANY(portsc))
846                         cdnsp_resume_gadget(pdev);
847
848                 if (link_state == XDEV_U3 &&  pdev->link_state != XDEV_U3)
849                         cdnsp_suspend_gadget(pdev);
850
851                 pdev->link_state = link_state;
852         }
853
854         if (portsc & PORT_CSC) {
855                 /* Detach device. */
856                 if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
857                         cdnsp_disconnect_gadget(pdev);
858
859                 /* Attach device. */
860                 if (portsc & PORT_CONNECT) {
861                         if (!port2)
862                                 cdnsp_irq_reset(pdev);
863
864                         usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
865                 }
866         }
867
868         /* Port reset. */
869         if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
870                 cdnsp_irq_reset(pdev);
871                 pdev->u1_allowed = 0;
872                 pdev->u2_allowed = 0;
873                 pdev->may_wakeup = 0;
874         }
875
876         if (portsc & PORT_CEC)
877                 dev_err(pdev->dev, "Port Over Current detected\n");
878
879         if (portsc & PORT_CEC)
880                 dev_err(pdev->dev, "Port Configure Error detected\n");
881
882         if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
883                 goto new_event;
884
885 cleanup:
886         cdnsp_inc_deq(pdev, pdev->event_ring);
887 }
888
889 static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
890                              struct cdnsp_td *td,
891                              struct cdnsp_ring *ep_ring,
892                              int *status)
893 {
894         struct cdnsp_request *preq = td->preq;
895
896         /* if a bounce buffer was used to align this td then unmap it */
897         cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
898
899         /*
900          * If the controller said we transferred more data than the buffer
901          * length, Play it safe and say we didn't transfer anything.
902          */
903         if (preq->request.actual > preq->request.length) {
904                 preq->request.actual = 0;
905                 *status = 0;
906         }
907
908         list_del_init(&td->td_list);
909         ep_ring->num_tds--;
910         preq->pep->stream_info.td_count--;
911
912         cdnsp_gadget_giveback(preq->pep, preq, *status);
913 }
914
915 static void cdnsp_finish_td(struct cdnsp_device *pdev,
916                             struct cdnsp_td *td,
917                             struct cdnsp_transfer_event *event,
918                             struct cdnsp_ep *ep,
919                             int *status)
920 {
921         struct cdnsp_ring *ep_ring;
922         u32 trb_comp_code;
923
924         ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
925         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
926
927         if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
928             trb_comp_code == COMP_STOPPED ||
929             trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
930                 /*
931                  * The Endpoint Stop Command completion will take care of any
932                  * stopped TDs. A stopped TD may be restarted, so don't update
933                  * the ring dequeue pointer or take this TD off any lists yet.
934                  */
935                 return;
936         }
937
938         /* Update ring dequeue pointer */
939         while (ep_ring->dequeue != td->last_trb)
940                 cdnsp_inc_deq(pdev, ep_ring);
941
942         cdnsp_inc_deq(pdev, ep_ring);
943
944         cdnsp_td_cleanup(pdev, td, ep_ring, status);
945 }
946
947 /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
948 static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
949                                  struct cdnsp_ring *ring,
950                                  union cdnsp_trb *stop_trb)
951 {
952         struct cdnsp_segment *seg = ring->deq_seg;
953         union cdnsp_trb *trb = ring->dequeue;
954         u32 sum;
955
956         for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
957                 if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
958                         sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
959         }
960         return sum;
961 }
962
963 static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
964                                     struct cdnsp_ep *pep,
965                                     unsigned int stream_id,
966                                     int start_cycle,
967                                     struct cdnsp_generic_trb *start_trb)
968 {
969         /*
970          * Pass all the TRBs to the hardware at once and make sure this write
971          * isn't reordered.
972          */
973         wmb();
974
975         if (start_cycle)
976                 start_trb->field[3] |= cpu_to_le32(start_cycle);
977         else
978                 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
979
980         if ((pep->ep_state & EP_HAS_STREAMS) &&
981             !pep->stream_info.first_prime_det) {
982                 trace_cdnsp_wait_for_prime(pep, stream_id);
983                 return 0;
984         }
985
986         return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
987 }
988
989 /*
990  * Process control tds, update USB request status and actual_length.
991  */
992 static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
993                                   struct cdnsp_td *td,
994                                   union cdnsp_trb *event_trb,
995                                   struct cdnsp_transfer_event *event,
996                                   struct cdnsp_ep *pep,
997                                   int *status)
998 {
999         struct cdnsp_ring *ep_ring;
1000         u32 remaining;
1001         u32 trb_type;
1002
1003         trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
1004         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1005         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1006
1007         /*
1008          * if on data stage then update the actual_length of the USB
1009          * request and flag it as set, so it won't be overwritten in the event
1010          * for the last TRB.
1011          */
1012         if (trb_type == TRB_DATA) {
1013                 td->request_length_set = true;
1014                 td->preq->request.actual = td->preq->request.length - remaining;
1015         }
1016
1017         /* at status stage */
1018         if (!td->request_length_set)
1019                 td->preq->request.actual = td->preq->request.length;
1020
1021         if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
1022             pdev->three_stage_setup) {
1023                 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1024                                 td_list);
1025                 pdev->ep0_stage = CDNSP_STATUS_STAGE;
1026
1027                 cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1028                                          &td->last_trb->generic);
1029                 return;
1030         }
1031
1032         *status = 0;
1033
1034         cdnsp_finish_td(pdev, td, event, pep, status);
1035 }
1036
1037 /*
1038  * Process isochronous tds, update usb request status and actual_length.
1039  */
1040 static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
1041                                   struct cdnsp_td *td,
1042                                   union cdnsp_trb *ep_trb,
1043                                   struct cdnsp_transfer_event *event,
1044                                   struct cdnsp_ep *pep,
1045                                   int status)
1046 {
1047         struct cdnsp_request *preq = td->preq;
1048         u32 remaining, requested, ep_trb_len;
1049         bool sum_trbs_for_length = false;
1050         struct cdnsp_ring *ep_ring;
1051         u32 trb_comp_code;
1052         u32 td_length;
1053
1054         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1055         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1056         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1057         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1058
1059         requested = preq->request.length;
1060
1061         /* handle completion code */
1062         switch (trb_comp_code) {
1063         case COMP_SUCCESS:
1064                 preq->request.status = 0;
1065                 break;
1066         case COMP_SHORT_PACKET:
1067                 preq->request.status = 0;
1068                 sum_trbs_for_length = true;
1069                 break;
1070         case COMP_ISOCH_BUFFER_OVERRUN:
1071         case COMP_BABBLE_DETECTED_ERROR:
1072                 preq->request.status = -EOVERFLOW;
1073                 break;
1074         case COMP_STOPPED:
1075                 sum_trbs_for_length = true;
1076                 break;
1077         case COMP_STOPPED_SHORT_PACKET:
1078                 /* field normally containing residue now contains transferred */
1079                 preq->request.status  = 0;
1080                 requested = remaining;
1081                 break;
1082         case COMP_STOPPED_LENGTH_INVALID:
1083                 requested = 0;
1084                 remaining = 0;
1085                 break;
1086         default:
1087                 sum_trbs_for_length = true;
1088                 preq->request.status = -1;
1089                 break;
1090         }
1091
1092         if (sum_trbs_for_length) {
1093                 td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1094                 td_length += ep_trb_len - remaining;
1095         } else {
1096                 td_length = requested;
1097         }
1098
1099         td->preq->request.actual += td_length;
1100
1101         cdnsp_finish_td(pdev, td, event, pep, &status);
1102 }
1103
1104 static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
1105                                struct cdnsp_td *td,
1106                                struct cdnsp_transfer_event *event,
1107                                struct cdnsp_ep *pep,
1108                                int status)
1109 {
1110         struct cdnsp_ring *ep_ring;
1111
1112         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1113         td->preq->request.status = -EXDEV;
1114         td->preq->request.actual = 0;
1115
1116         /* Update ring dequeue pointer */
1117         while (ep_ring->dequeue != td->last_trb)
1118                 cdnsp_inc_deq(pdev, ep_ring);
1119
1120         cdnsp_inc_deq(pdev, ep_ring);
1121
1122         cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1123 }
1124
1125 /*
1126  * Process bulk and interrupt tds, update usb request status and actual_length.
1127  */
1128 static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
1129                                        struct cdnsp_td *td,
1130                                        union cdnsp_trb *ep_trb,
1131                                        struct cdnsp_transfer_event *event,
1132                                        struct cdnsp_ep *ep,
1133                                        int *status)
1134 {
1135         u32 remaining, requested, ep_trb_len;
1136         struct cdnsp_ring *ep_ring;
1137         u32 trb_comp_code;
1138
1139         ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1140         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1141         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1142         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1143         requested = td->preq->request.length;
1144
1145         switch (trb_comp_code) {
1146         case COMP_SUCCESS:
1147         case COMP_SHORT_PACKET:
1148                 *status = 0;
1149                 break;
1150         case COMP_STOPPED_SHORT_PACKET:
1151                 td->preq->request.actual = remaining;
1152                 goto finish_td;
1153         case COMP_STOPPED_LENGTH_INVALID:
1154                 /* Stopped on ep trb with invalid length, exclude it. */
1155                 ep_trb_len = 0;
1156                 remaining = 0;
1157                 break;
1158         }
1159
1160         if (ep_trb == td->last_trb)
1161                 ep_trb_len = requested - remaining;
1162         else
1163                 ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1164                                                    ep_trb_len - remaining;
1165         td->preq->request.actual = ep_trb_len;
1166
1167 finish_td:
1168         ep->stream_info.drbls_count--;
1169
1170         cdnsp_finish_td(pdev, td, event, ep, status);
1171 }
1172
1173 static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
1174                                  struct cdnsp_transfer_event *event)
1175 {
1176         struct cdnsp_generic_trb *generic;
1177         struct cdnsp_ring *ep_ring;
1178         struct cdnsp_ep *pep;
1179         int cur_stream;
1180         int ep_index;
1181         int host_sid;
1182         int dev_sid;
1183
1184         generic = (struct cdnsp_generic_trb *)event;
1185         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1186         dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
1187         host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
1188
1189         pep = &pdev->eps[ep_index];
1190
1191         if (!(pep->ep_state & EP_HAS_STREAMS))
1192                 return;
1193
1194         if (host_sid == STREAM_PRIME_ACK) {
1195                 pep->stream_info.first_prime_det = 1;
1196                 for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
1197                     cur_stream++) {
1198                         ep_ring = pep->stream_info.stream_rings[cur_stream];
1199                         ep_ring->stream_active = 1;
1200                         ep_ring->stream_rejected = 0;
1201                 }
1202         }
1203
1204         if (host_sid == STREAM_REJECTED) {
1205                 struct cdnsp_td *td, *td_temp;
1206
1207                 pep->stream_info.drbls_count--;
1208                 ep_ring = pep->stream_info.stream_rings[dev_sid];
1209                 ep_ring->stream_active = 0;
1210                 ep_ring->stream_rejected = 1;
1211
1212                 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1213                                          td_list) {
1214                         td->drbl = 0;
1215                 }
1216         }
1217
1218         cdnsp_ring_doorbell_for_active_rings(pdev, pep);
1219 }
1220
1221 /*
1222  * If this function returns an error condition, it means it got a Transfer
1223  * event with a corrupted TRB DMA address or endpoint is disabled.
1224  */
1225 static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
1226                                  struct cdnsp_transfer_event *event)
1227 {
1228         const struct usb_endpoint_descriptor *desc;
1229         bool handling_skipped_tds = false;
1230         struct cdnsp_segment *ep_seg;
1231         struct cdnsp_ring *ep_ring;
1232         int status = -EINPROGRESS;
1233         union cdnsp_trb *ep_trb;
1234         dma_addr_t ep_trb_dma;
1235         struct cdnsp_ep *pep;
1236         struct cdnsp_td *td;
1237         u32 trb_comp_code;
1238         int invalidate;
1239         int ep_index;
1240
1241         invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
1242         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1243         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1244         ep_trb_dma = le64_to_cpu(event->buffer);
1245
1246         pep = &pdev->eps[ep_index];
1247         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1248
1249         /*
1250          * If device is disconnect then all requests will be dequeued
1251          * by upper layers as part of disconnect sequence.
1252          * We don't want handle such event to avoid racing.
1253          */
1254         if (invalidate || !pdev->gadget.connected)
1255                 goto cleanup;
1256
1257         if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
1258                 trace_cdnsp_ep_disabled(pep->out_ctx);
1259                 goto err_out;
1260         }
1261
1262         /* Some transfer events don't always point to a trb*/
1263         if (!ep_ring) {
1264                 switch (trb_comp_code) {
1265                 case COMP_INVALID_STREAM_TYPE_ERROR:
1266                 case COMP_INVALID_STREAM_ID_ERROR:
1267                 case COMP_RING_UNDERRUN:
1268                 case COMP_RING_OVERRUN:
1269                         goto cleanup;
1270                 default:
1271                         dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
1272                                 pep->name);
1273                         goto err_out;
1274                 }
1275         }
1276
1277         /* Look for some error cases that need special treatment. */
1278         switch (trb_comp_code) {
1279         case COMP_BABBLE_DETECTED_ERROR:
1280                 status = -EOVERFLOW;
1281                 break;
1282         case COMP_RING_UNDERRUN:
1283         case COMP_RING_OVERRUN:
1284                 /*
1285                  * When the Isoch ring is empty, the controller will generate
1286                  * a Ring Overrun Event for IN Isoch endpoint or Ring
1287                  * Underrun Event for OUT Isoch endpoint.
1288                  */
1289                 goto cleanup;
1290         case COMP_MISSED_SERVICE_ERROR:
1291                 /*
1292                  * When encounter missed service error, one or more isoc tds
1293                  * may be missed by controller.
1294                  * Set skip flag of the ep_ring; Complete the missed tds as
1295                  * short transfer when process the ep_ring next time.
1296                  */
1297                 pep->skip = true;
1298                 break;
1299         }
1300
1301         do {
1302                 /*
1303                  * This TRB should be in the TD at the head of this ring's TD
1304                  * list.
1305                  */
1306                 if (list_empty(&ep_ring->td_list)) {
1307                         /*
1308                          * Don't print warnings if it's due to a stopped
1309                          * endpoint generating an extra completion event, or
1310                          * a event for the last TRB of a short TD we already
1311                          * got a short event for.
1312                          * The short TD is already removed from the TD list.
1313                          */
1314                         if (!(trb_comp_code == COMP_STOPPED ||
1315                               trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1316                               ep_ring->last_td_was_short))
1317                                 trace_cdnsp_trb_without_td(ep_ring,
1318                                         (struct cdnsp_generic_trb *)event);
1319
1320                         if (pep->skip) {
1321                                 pep->skip = false;
1322                                 trace_cdnsp_ep_list_empty_with_skip(pep, 0);
1323                         }
1324
1325                         goto cleanup;
1326                 }
1327
1328                 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1329                                 td_list);
1330
1331                 /* Is this a TRB in the currently executing TD? */
1332                 ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1333                                          ep_ring->dequeue, td->last_trb,
1334                                          ep_trb_dma);
1335
1336                 desc = td->preq->pep->endpoint.desc;
1337
1338                 if (ep_seg) {
1339                         ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
1340                                                / sizeof(*ep_trb)];
1341
1342                         trace_cdnsp_handle_transfer(ep_ring,
1343                                         (struct cdnsp_generic_trb *)ep_trb);
1344
1345                         if (pep->skip && usb_endpoint_xfer_isoc(desc) &&
1346                             td->last_trb != ep_trb)
1347                                 return -EAGAIN;
1348                 }
1349
1350                 /*
1351                  * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
1352                  * of FSE is not in the current TD pointed by ep_ring->dequeue
1353                  * because that the hardware dequeue pointer still at the
1354                  * previous TRB of the current TD. The previous TRB maybe a
1355                  * Link TD or the last TRB of the previous TD. The command
1356                  * completion handle will take care the rest.
1357                  */
1358                 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
1359                                 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
1360                         pep->skip = false;
1361                         goto cleanup;
1362                 }
1363
1364                 if (!ep_seg) {
1365                         if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
1366                                 /* Something is busted, give up! */
1367                                 dev_err(pdev->dev,
1368                                         "ERROR Transfer event TRB DMA ptr not "
1369                                         "part of current TD ep_index %d "
1370                                         "comp_code %u\n", ep_index,
1371                                         trb_comp_code);
1372                                 return -EINVAL;
1373                         }
1374
1375                         cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1376                         goto cleanup;
1377                 }
1378
1379                 if (trb_comp_code == COMP_SHORT_PACKET)
1380                         ep_ring->last_td_was_short = true;
1381                 else
1382                         ep_ring->last_td_was_short = false;
1383
1384                 if (pep->skip) {
1385                         pep->skip = false;
1386                         cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1387                         goto cleanup;
1388                 }
1389
1390                 if (cdnsp_trb_is_noop(ep_trb))
1391                         goto cleanup;
1392
1393                 if (usb_endpoint_xfer_control(desc))
1394                         cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
1395                                               &status);
1396                 else if (usb_endpoint_xfer_isoc(desc))
1397                         cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
1398                                               status);
1399                 else
1400                         cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
1401                                                    &status);
1402 cleanup:
1403                 handling_skipped_tds = pep->skip;
1404
1405                 /*
1406                  * Do not update event ring dequeue pointer if we're in a loop
1407                  * processing missed tds.
1408                  */
1409                 if (!handling_skipped_tds)
1410                         cdnsp_inc_deq(pdev, pdev->event_ring);
1411
1412         /*
1413          * If ep->skip is set, it means there are missed tds on the
1414          * endpoint ring need to take care of.
1415          * Process them as short transfer until reach the td pointed by
1416          * the event.
1417          */
1418         } while (handling_skipped_tds);
1419         return 0;
1420
1421 err_out:
1422         dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
1423                 (unsigned long long)
1424                 cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1425                                       pdev->event_ring->dequeue),
1426                  lower_32_bits(le64_to_cpu(event->buffer)),
1427                  upper_32_bits(le64_to_cpu(event->buffer)),
1428                  le32_to_cpu(event->transfer_len),
1429                  le32_to_cpu(event->flags));
1430         return -EINVAL;
1431 }
1432
1433 /*
1434  * This function handles all events on the event ring.
1435  * Returns true for "possibly more events to process" (caller should call
1436  * again), otherwise false if done.
1437  */
1438 static bool cdnsp_handle_event(struct cdnsp_device *pdev)
1439 {
1440         unsigned int comp_code;
1441         union cdnsp_trb *event;
1442         bool update_ptrs = true;
1443         u32 cycle_bit;
1444         int ret = 0;
1445         u32 flags;
1446
1447         event = pdev->event_ring->dequeue;
1448         flags = le32_to_cpu(event->event_cmd.flags);
1449         cycle_bit = (flags & TRB_CYCLE);
1450
1451         /* Does the controller or driver own the TRB? */
1452         if (cycle_bit != pdev->event_ring->cycle_state)
1453                 return false;
1454
1455         trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
1456
1457         /*
1458          * Barrier between reading the TRB_CYCLE (valid) flag above and any
1459          * reads of the event's flags/data below.
1460          */
1461         rmb();
1462
1463         switch (flags & TRB_TYPE_BITMASK) {
1464         case TRB_TYPE(TRB_COMPLETION):
1465                 /*
1466                  * Command can't be handled in interrupt context so just
1467                  * increment command ring dequeue pointer.
1468                  */
1469                 cdnsp_inc_deq(pdev, pdev->cmd_ring);
1470                 break;
1471         case TRB_TYPE(TRB_PORT_STATUS):
1472                 cdnsp_handle_port_status(pdev, event);
1473                 update_ptrs = false;
1474                 break;
1475         case TRB_TYPE(TRB_TRANSFER):
1476                 ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
1477                 if (ret >= 0)
1478                         update_ptrs = false;
1479                 break;
1480         case TRB_TYPE(TRB_SETUP):
1481                 pdev->ep0_stage = CDNSP_SETUP_STAGE;
1482                 pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
1483                 pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
1484                 pdev->setup = *((struct usb_ctrlrequest *)
1485                                 &event->trans_event.buffer);
1486
1487                 cdnsp_setup_analyze(pdev);
1488                 break;
1489         case TRB_TYPE(TRB_ENDPOINT_NRDY):
1490                 cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
1491                 break;
1492         case TRB_TYPE(TRB_HC_EVENT): {
1493                 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
1494
1495                 switch (comp_code) {
1496                 case COMP_EVENT_RING_FULL_ERROR:
1497                         dev_err(pdev->dev, "Event Ring Full\n");
1498                         break;
1499                 default:
1500                         dev_err(pdev->dev, "Controller error code 0x%02x\n",
1501                                 comp_code);
1502                 }
1503
1504                 break;
1505         }
1506         case TRB_TYPE(TRB_MFINDEX_WRAP):
1507         case TRB_TYPE(TRB_DRB_OVERFLOW):
1508                 break;
1509         default:
1510                 dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
1511                          TRB_FIELD_TO_TYPE(flags));
1512         }
1513
1514         if (update_ptrs)
1515                 /* Update SW event ring dequeue pointer. */
1516                 cdnsp_inc_deq(pdev, pdev->event_ring);
1517
1518         /*
1519          * Caller will call us again to check if there are more items
1520          * on the event ring.
1521          */
1522         return true;
1523 }
1524
1525 irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
1526 {
1527         struct cdnsp_device *pdev = (struct cdnsp_device *)data;
1528         union cdnsp_trb *event_ring_deq;
1529         unsigned long flags;
1530         int counter = 0;
1531
1532         spin_lock_irqsave(&pdev->lock, flags);
1533
1534         if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
1535                 /*
1536                  * While removing or stopping driver there may still be deferred
1537                  * not handled interrupt which should not be treated as error.
1538                  * Driver should simply ignore it.
1539                  */
1540                 if (pdev->gadget_driver)
1541                         cdnsp_died(pdev);
1542
1543                 spin_unlock_irqrestore(&pdev->lock, flags);
1544                 return IRQ_HANDLED;
1545         }
1546
1547         event_ring_deq = pdev->event_ring->dequeue;
1548
1549         while (cdnsp_handle_event(pdev)) {
1550                 if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
1551                         cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
1552                         event_ring_deq = pdev->event_ring->dequeue;
1553                         counter = 0;
1554                 }
1555         }
1556
1557         cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
1558
1559         spin_unlock_irqrestore(&pdev->lock, flags);
1560
1561         return IRQ_HANDLED;
1562 }
1563
1564 irqreturn_t cdnsp_irq_handler(int irq, void *priv)
1565 {
1566         struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
1567         u32 irq_pending;
1568         u32 status;
1569
1570         status = readl(&pdev->op_regs->status);
1571
1572         if (status == ~(u32)0) {
1573                 cdnsp_died(pdev);
1574                 return IRQ_HANDLED;
1575         }
1576
1577         if (!(status & STS_EINT))
1578                 return IRQ_NONE;
1579
1580         writel(status | STS_EINT, &pdev->op_regs->status);
1581         irq_pending = readl(&pdev->ir_set->irq_pending);
1582         irq_pending |= IMAN_IP;
1583         writel(irq_pending, &pdev->ir_set->irq_pending);
1584
1585         if (status & STS_FATAL) {
1586                 cdnsp_died(pdev);
1587                 return IRQ_HANDLED;
1588         }
1589
1590         return IRQ_WAKE_THREAD;
1591 }
1592
1593 /*
1594  * Generic function for queuing a TRB on a ring.
1595  * The caller must have checked to make sure there's room on the ring.
1596  *
1597  * @more_trbs_coming:   Will you enqueue more TRBs before setting doorbell?
1598  */
1599 static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
1600                             bool more_trbs_coming, u32 field1, u32 field2,
1601                             u32 field3, u32 field4)
1602 {
1603         struct cdnsp_generic_trb *trb;
1604
1605         trb = &ring->enqueue->generic;
1606
1607         trb->field[0] = cpu_to_le32(field1);
1608         trb->field[1] = cpu_to_le32(field2);
1609         trb->field[2] = cpu_to_le32(field3);
1610         trb->field[3] = cpu_to_le32(field4);
1611
1612         trace_cdnsp_queue_trb(ring, trb);
1613         cdnsp_inc_enq(pdev, ring, more_trbs_coming);
1614 }
1615
1616 /*
1617  * Does various checks on the endpoint ring, and makes it ready to
1618  * queue num_trbs.
1619  */
1620 static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
1621                               struct cdnsp_ring *ep_ring,
1622                               u32 ep_state, unsigned
1623                               int num_trbs,
1624                               gfp_t mem_flags)
1625 {
1626         unsigned int num_trbs_needed;
1627
1628         /* Make sure the endpoint has been added to controller schedule. */
1629         switch (ep_state) {
1630         case EP_STATE_STOPPED:
1631         case EP_STATE_RUNNING:
1632         case EP_STATE_HALTED:
1633                 break;
1634         default:
1635                 dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
1636                 return -EINVAL;
1637         }
1638
1639         while (1) {
1640                 if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1641                         break;
1642
1643                 trace_cdnsp_no_room_on_ring("try ring expansion");
1644
1645                 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1646                 if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1647                                          mem_flags)) {
1648                         dev_err(pdev->dev, "Ring expansion failed\n");
1649                         return -ENOMEM;
1650                 }
1651         }
1652
1653         while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1654                 ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1655                 /* The cycle bit must be set as the last operation. */
1656                 wmb();
1657                 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1658
1659                 /* Toggle the cycle bit after the last ring segment. */
1660                 if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1661                         ep_ring->cycle_state ^= 1;
1662                 ep_ring->enq_seg = ep_ring->enq_seg->next;
1663                 ep_ring->enqueue = ep_ring->enq_seg->trbs;
1664         }
1665         return 0;
1666 }
1667
1668 static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
1669                                   struct cdnsp_request *preq,
1670                                   unsigned int num_trbs)
1671 {
1672         struct cdnsp_ring *ep_ring;
1673         int ret;
1674
1675         ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1676                                           preq->request.stream_id);
1677         if (!ep_ring)
1678                 return -EINVAL;
1679
1680         ret = cdnsp_prepare_ring(pdev, ep_ring,
1681                                  GET_EP_CTX_STATE(preq->pep->out_ctx),
1682                                  num_trbs, GFP_ATOMIC);
1683         if (ret)
1684                 return ret;
1685
1686         INIT_LIST_HEAD(&preq->td.td_list);
1687         preq->td.preq = preq;
1688
1689         /* Add this TD to the tail of the endpoint ring's TD list. */
1690         list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1691         ep_ring->num_tds++;
1692         preq->pep->stream_info.td_count++;
1693
1694         preq->td.start_seg = ep_ring->enq_seg;
1695         preq->td.first_trb = ep_ring->enqueue;
1696
1697         return 0;
1698 }
1699
1700 static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
1701 {
1702         unsigned int num_trbs;
1703
1704         num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
1705                                 TRB_MAX_BUFF_SIZE);
1706         if (num_trbs == 0)
1707                 num_trbs++;
1708
1709         return num_trbs;
1710 }
1711
1712 static unsigned int count_trbs_needed(struct cdnsp_request *preq)
1713 {
1714         return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1715 }
1716
1717 static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
1718 {
1719         unsigned int i, len, full_len, num_trbs = 0;
1720         struct scatterlist *sg;
1721
1722         full_len = preq->request.length;
1723
1724         for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
1725                 len = sg_dma_len(sg);
1726                 num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
1727                 len = min(len, full_len);
1728                 full_len -= len;
1729                 if (full_len == 0)
1730                         break;
1731         }
1732
1733         return num_trbs;
1734 }
1735
1736 static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
1737 {
1738         if (running_total != preq->request.length)
1739                 dev_err(preq->pep->pdev->dev,
1740                         "%s - Miscalculated tx length, "
1741                         "queued %#x, asked for %#x (%d)\n",
1742                         preq->pep->name, running_total,
1743                         preq->request.length, preq->request.actual);
1744 }
1745
1746 /*
1747  * TD size is the number of max packet sized packets remaining in the TD
1748  * (*not* including this TRB).
1749  *
1750  * Total TD packet count = total_packet_count =
1751  *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
1752  *
1753  * Packets transferred up to and including this TRB = packets_transferred =
1754  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
1755  *
1756  * TD size = total_packet_count - packets_transferred
1757  *
1758  * It must fit in bits 21:17, so it can't be bigger than 31.
1759  * This is taken care of in the TRB_TD_SIZE() macro
1760  *
1761  * The last TRB in a TD must have the TD size set to zero.
1762  */
1763 static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
1764                               int transferred,
1765                               int trb_buff_len,
1766                               unsigned int td_total_len,
1767                               struct cdnsp_request *preq,
1768                               bool more_trbs_coming,
1769                               bool zlp)
1770 {
1771         u32 maxp, total_packet_count;
1772
1773         /* Before ZLP driver needs set TD_SIZE = 1. */
1774         if (zlp)
1775                 return 1;
1776
1777         /* One TRB with a zero-length data packet. */
1778         if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
1779             trb_buff_len == td_total_len)
1780                 return 0;
1781
1782         maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
1783         total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
1784
1785         /* Queuing functions don't count the current TRB into transferred. */
1786         return (total_packet_count - ((transferred + trb_buff_len) / maxp));
1787 }
1788
1789 static int cdnsp_align_td(struct cdnsp_device *pdev,
1790                           struct cdnsp_request *preq, u32 enqd_len,
1791                           u32 *trb_buff_len, struct cdnsp_segment *seg)
1792 {
1793         struct device *dev = pdev->dev;
1794         unsigned int unalign;
1795         unsigned int max_pkt;
1796         u32 new_buff_len;
1797
1798         max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
1799         unalign = (enqd_len + *trb_buff_len) % max_pkt;
1800
1801         /* We got lucky, last normal TRB data on segment is packet aligned. */
1802         if (unalign == 0)
1803                 return 0;
1804
1805         /* Is the last nornal TRB alignable by splitting it. */
1806         if (*trb_buff_len > unalign) {
1807                 *trb_buff_len -= unalign;
1808                 trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
1809                                                   enqd_len, 0, unalign);
1810                 return 0;
1811         }
1812
1813         /*
1814          * We want enqd_len + trb_buff_len to sum up to a number aligned to
1815          * number which is divisible by the endpoint's wMaxPacketSize. IOW:
1816          * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
1817          */
1818         new_buff_len = max_pkt - (enqd_len % max_pkt);
1819
1820         if (new_buff_len > (preq->request.length - enqd_len))
1821                 new_buff_len = (preq->request.length - enqd_len);
1822
1823         /* Create a max max_pkt sized bounce buffer pointed to by last trb. */
1824         if (preq->direction) {
1825                 sg_pcopy_to_buffer(preq->request.sg,
1826                                    preq->request.num_mapped_sgs,
1827                                    seg->bounce_buf, new_buff_len, enqd_len);
1828                 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1829                                                  max_pkt, DMA_TO_DEVICE);
1830         } else {
1831                 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1832                                                  max_pkt, DMA_FROM_DEVICE);
1833         }
1834
1835         if (dma_mapping_error(dev, seg->bounce_dma)) {
1836                 /* Try without aligning.*/
1837                 dev_warn(pdev->dev,
1838                          "Failed mapping bounce buffer, not aligning\n");
1839                 return 0;
1840         }
1841
1842         *trb_buff_len = new_buff_len;
1843         seg->bounce_len = new_buff_len;
1844         seg->bounce_offs = enqd_len;
1845
1846         trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
1847                                unalign);
1848
1849         /*
1850          * Bounce buffer successful aligned and seg->bounce_dma will be used
1851          * in transfer TRB as new transfer buffer address.
1852          */
1853         return 1;
1854 }
1855
1856 int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1857 {
1858         unsigned int enqd_len, block_len, trb_buff_len, full_len;
1859         unsigned int start_cycle, num_sgs = 0;
1860         struct cdnsp_generic_trb *start_trb;
1861         u32 field, length_field, remainder;
1862         struct scatterlist *sg = NULL;
1863         bool more_trbs_coming = true;
1864         bool need_zero_pkt = false;
1865         bool zero_len_trb = false;
1866         struct cdnsp_ring *ring;
1867         bool first_trb = true;
1868         unsigned int num_trbs;
1869         struct cdnsp_ep *pep;
1870         u64 addr, send_addr;
1871         int sent_len, ret;
1872
1873         ring = cdnsp_request_to_transfer_ring(pdev, preq);
1874         if (!ring)
1875                 return -EINVAL;
1876
1877         full_len = preq->request.length;
1878
1879         if (preq->request.num_sgs) {
1880                 num_sgs = preq->request.num_sgs;
1881                 sg = preq->request.sg;
1882                 addr = (u64)sg_dma_address(sg);
1883                 block_len = sg_dma_len(sg);
1884                 num_trbs = count_sg_trbs_needed(preq);
1885         } else {
1886                 num_trbs = count_trbs_needed(preq);
1887                 addr = (u64)preq->request.dma;
1888                 block_len = full_len;
1889         }
1890
1891         pep = preq->pep;
1892
1893         /* Deal with request.zero - need one more td/trb. */
1894         if (preq->request.zero && preq->request.length &&
1895             IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
1896                 need_zero_pkt = true;
1897                 num_trbs++;
1898         }
1899
1900         ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
1901         if (ret)
1902                 return ret;
1903
1904         /*
1905          * Don't give the first TRB to the hardware (by toggling the cycle bit)
1906          * until we've finished creating all the other TRBs. The ring's cycle
1907          * state may change as we enqueue the other TRBs, so save it too.
1908          */
1909         start_trb = &ring->enqueue->generic;
1910         start_cycle = ring->cycle_state;
1911         send_addr = addr;
1912
1913         /* Queue the TRBs, even if they are zero-length */
1914         for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
1915              enqd_len += trb_buff_len) {
1916                 field = TRB_TYPE(TRB_NORMAL);
1917
1918                 /* TRB buffer should not cross 64KB boundaries */
1919                 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
1920                 trb_buff_len = min(trb_buff_len, block_len);
1921                 if (enqd_len + trb_buff_len > full_len)
1922                         trb_buff_len = full_len - enqd_len;
1923
1924                 /* Don't change the cycle bit of the first TRB until later */
1925                 if (first_trb) {
1926                         first_trb = false;
1927                         if (start_cycle == 0)
1928                                 field |= TRB_CYCLE;
1929                 } else {
1930                         field |= ring->cycle_state;
1931                 }
1932
1933                 /*
1934                  * Chain all the TRBs together; clear the chain bit in the last
1935                  * TRB to indicate it's the last TRB in the chain.
1936                  */
1937                 if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
1938                         field |= TRB_CHAIN;
1939                         if (cdnsp_trb_is_link(ring->enqueue + 1)) {
1940                                 if (cdnsp_align_td(pdev, preq, enqd_len,
1941                                                    &trb_buff_len,
1942                                                    ring->enq_seg)) {
1943                                         send_addr = ring->enq_seg->bounce_dma;
1944                                         /* Assuming TD won't span 2 segs */
1945                                         preq->td.bounce_seg = ring->enq_seg;
1946                                 }
1947                         }
1948                 }
1949
1950                 if (enqd_len + trb_buff_len >= full_len) {
1951                         if (need_zero_pkt && !zero_len_trb) {
1952                                 zero_len_trb = true;
1953                         } else {
1954                                 zero_len_trb = false;
1955                                 field &= ~TRB_CHAIN;
1956                                 field |= TRB_IOC;
1957                                 more_trbs_coming = false;
1958                                 need_zero_pkt = false;
1959                                 preq->td.last_trb = ring->enqueue;
1960                         }
1961                 }
1962
1963                 /* Only set interrupt on short packet for OUT endpoints. */
1964                 if (!preq->direction)
1965                         field |= TRB_ISP;
1966
1967                 /* Set the TRB length, TD size, and interrupter fields. */
1968                 remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
1969                                                full_len, preq,
1970                                                more_trbs_coming,
1971                                                zero_len_trb);
1972
1973                 length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
1974                         TRB_INTR_TARGET(0);
1975
1976                 cdnsp_queue_trb(pdev, ring, more_trbs_coming,
1977                                 lower_32_bits(send_addr),
1978                                 upper_32_bits(send_addr),
1979                                 length_field,
1980                                 field);
1981
1982                 addr += trb_buff_len;
1983                 sent_len = trb_buff_len;
1984                 while (sg && sent_len >= block_len) {
1985                         /* New sg entry */
1986                         --num_sgs;
1987                         sent_len -= block_len;
1988                         if (num_sgs != 0) {
1989                                 sg = sg_next(sg);
1990                                 block_len = sg_dma_len(sg);
1991                                 addr = (u64)sg_dma_address(sg);
1992                                 addr += sent_len;
1993                         }
1994                 }
1995                 block_len -= sent_len;
1996                 send_addr = addr;
1997         }
1998
1999         cdnsp_check_trb_math(preq, enqd_len);
2000         ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
2001                                        start_cycle, start_trb);
2002
2003         if (ret)
2004                 preq->td.drbl = 1;
2005
2006         return 0;
2007 }
2008
2009 int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
2010 {
2011         u32 field, length_field, zlp = 0;
2012         struct cdnsp_ep *pep = preq->pep;
2013         struct cdnsp_ring *ep_ring;
2014         int num_trbs;
2015         u32 maxp;
2016         int ret;
2017
2018         ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
2019         if (!ep_ring)
2020                 return -EINVAL;
2021
2022         /* 1 TRB for data, 1 for status */
2023         num_trbs = (pdev->three_stage_setup) ? 2 : 1;
2024
2025         maxp = usb_endpoint_maxp(pep->endpoint.desc);
2026
2027         if (preq->request.zero && preq->request.length &&
2028             (preq->request.length % maxp == 0)) {
2029                 num_trbs++;
2030                 zlp = 1;
2031         }
2032
2033         ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
2034         if (ret)
2035                 return ret;
2036
2037         /* If there's data, queue data TRBs */
2038         if (preq->request.length > 0) {
2039                 field = TRB_TYPE(TRB_DATA);
2040
2041                 if (zlp)
2042                         field |= TRB_CHAIN;
2043                 else
2044                         field |= TRB_IOC | (pdev->ep0_expect_in ? 0 : TRB_ISP);
2045
2046                 if (pdev->ep0_expect_in)
2047                         field |= TRB_DIR_IN;
2048
2049                 length_field = TRB_LEN(preq->request.length) |
2050                                TRB_TD_SIZE(zlp) | TRB_INTR_TARGET(0);
2051
2052                 cdnsp_queue_trb(pdev, ep_ring, true,
2053                                 lower_32_bits(preq->request.dma),
2054                                 upper_32_bits(preq->request.dma), length_field,
2055                                 field | ep_ring->cycle_state |
2056                                 TRB_SETUPID(pdev->setup_id) |
2057                                 pdev->setup_speed);
2058
2059                 if (zlp) {
2060                         field = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
2061
2062                         if (!pdev->ep0_expect_in)
2063                                 field = TRB_ISP;
2064
2065                         cdnsp_queue_trb(pdev, ep_ring, true,
2066                                         lower_32_bits(preq->request.dma),
2067                                         upper_32_bits(preq->request.dma), 0,
2068                                         field | ep_ring->cycle_state |
2069                                         TRB_SETUPID(pdev->setup_id) |
2070                                         pdev->setup_speed);
2071                 }
2072
2073                 pdev->ep0_stage = CDNSP_DATA_STAGE;
2074         }
2075
2076         /* Save the DMA address of the last TRB in the TD. */
2077         preq->td.last_trb = ep_ring->enqueue;
2078
2079         /* Queue status TRB. */
2080         if (preq->request.length == 0)
2081                 field = ep_ring->cycle_state;
2082         else
2083                 field = (ep_ring->cycle_state ^ 1);
2084
2085         if (preq->request.length > 0 && pdev->ep0_expect_in)
2086                 field |= TRB_DIR_IN;
2087
2088         if (pep->ep_state & EP0_HALTED_STATUS) {
2089                 pep->ep_state &= ~EP0_HALTED_STATUS;
2090                 field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
2091         } else {
2092                 field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
2093         }
2094
2095         cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2096                         field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
2097                         TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
2098
2099         cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
2100
2101         return 0;
2102 }
2103
2104 int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2105 {
2106         u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
2107         int ret = 0;
2108
2109         if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED ||
2110             ep_state == EP_STATE_HALTED) {
2111                 trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
2112                 goto ep_stopped;
2113         }
2114
2115         cdnsp_queue_stop_endpoint(pdev, pep->idx);
2116         cdnsp_ring_cmd_db(pdev);
2117         ret = cdnsp_wait_for_cmd_compl(pdev);
2118
2119         trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
2120
2121 ep_stopped:
2122         pep->ep_state |= EP_STOPPED;
2123         return ret;
2124 }
2125
2126 int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2127 {
2128         int ret;
2129
2130         cdnsp_queue_flush_endpoint(pdev, pep->idx);
2131         cdnsp_ring_cmd_db(pdev);
2132         ret = cdnsp_wait_for_cmd_compl(pdev);
2133
2134         trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
2135
2136         return ret;
2137 }
2138
2139 /*
2140  * The transfer burst count field of the isochronous TRB defines the number of
2141  * bursts that are required to move all packets in this TD. Only SuperSpeed
2142  * devices can burst up to bMaxBurst number of packets per service interval.
2143  * This field is zero based, meaning a value of zero in the field means one
2144  * burst. Basically, for everything but SuperSpeed devices, this field will be
2145  * zero.
2146  */
2147 static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
2148                                           struct cdnsp_request *preq,
2149                                           unsigned int total_packet_count)
2150 {
2151         unsigned int max_burst;
2152
2153         if (pdev->gadget.speed < USB_SPEED_SUPER)
2154                 return 0;
2155
2156         max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2157         return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
2158 }
2159
2160 /*
2161  * Returns the number of packets in the last "burst" of packets. This field is
2162  * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
2163  * the last burst packet count is equal to the total number of packets in the
2164  * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
2165  * must contain (bMaxBurst + 1) number of packets, but the last burst can
2166  * contain 1 to (bMaxBurst + 1) packets.
2167  */
2168 static unsigned int
2169         cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
2170                                           struct cdnsp_request *preq,
2171                                           unsigned int total_packet_count)
2172 {
2173         unsigned int max_burst;
2174         unsigned int residue;
2175
2176         if (pdev->gadget.speed >= USB_SPEED_SUPER) {
2177                 /* bMaxBurst is zero based: 0 means 1 packet per burst. */
2178                 max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2179                 residue = total_packet_count % (max_burst + 1);
2180
2181                 /*
2182                  * If residue is zero, the last burst contains (max_burst + 1)
2183                  * number of packets, but the TLBPC field is zero-based.
2184                  */
2185                 if (residue == 0)
2186                         return max_burst;
2187
2188                 return residue - 1;
2189         }
2190         if (total_packet_count == 0)
2191                 return 0;
2192
2193         return total_packet_count - 1;
2194 }
2195
2196 /* Queue function isoc transfer */
2197 int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
2198                         struct cdnsp_request *preq)
2199 {
2200         unsigned int trb_buff_len, td_len, td_remain_len, block_len;
2201         unsigned int burst_count, last_burst_pkt;
2202         unsigned int total_pkt_count, max_pkt;
2203         struct cdnsp_generic_trb *start_trb;
2204         struct scatterlist *sg = NULL;
2205         bool more_trbs_coming = true;
2206         struct cdnsp_ring *ep_ring;
2207         unsigned int num_sgs = 0;
2208         int running_total = 0;
2209         u32 field, length_field;
2210         u64 addr, send_addr;
2211         int start_cycle;
2212         int trbs_per_td;
2213         int i, sent_len, ret;
2214
2215         ep_ring = preq->pep->ring;
2216
2217         td_len = preq->request.length;
2218
2219         if (preq->request.num_sgs) {
2220                 num_sgs = preq->request.num_sgs;
2221                 sg = preq->request.sg;
2222                 addr = (u64)sg_dma_address(sg);
2223                 block_len = sg_dma_len(sg);
2224                 trbs_per_td = count_sg_trbs_needed(preq);
2225         } else {
2226                 addr = (u64)preq->request.dma;
2227                 block_len = td_len;
2228                 trbs_per_td = count_trbs_needed(preq);
2229         }
2230
2231         ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
2232         if (ret)
2233                 return ret;
2234
2235         start_trb = &ep_ring->enqueue->generic;
2236         start_cycle = ep_ring->cycle_state;
2237         td_remain_len = td_len;
2238         send_addr = addr;
2239
2240         max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
2241         total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
2242
2243         /* A zero-length transfer still involves at least one packet. */
2244         if (total_pkt_count == 0)
2245                 total_pkt_count++;
2246
2247         burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
2248         last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
2249                                                            total_pkt_count);
2250
2251         /*
2252          * Set isoc specific data for the first TRB in a TD.
2253          * Prevent HW from getting the TRBs by keeping the cycle state
2254          * inverted in the first TDs isoc TRB.
2255          */
2256         field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
2257                 TRB_SIA | TRB_TBC(burst_count);
2258
2259         if (!start_cycle)
2260                 field |= TRB_CYCLE;
2261
2262         /* Fill the rest of the TRB fields, and remaining normal TRBs. */
2263         for (i = 0; i < trbs_per_td; i++) {
2264                 u32 remainder;
2265
2266                 /* Calculate TRB length. */
2267                 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
2268                 trb_buff_len = min(trb_buff_len, block_len);
2269                 if (trb_buff_len > td_remain_len)
2270                         trb_buff_len = td_remain_len;
2271
2272                 /* Set the TRB length, TD size, & interrupter fields. */
2273                 remainder = cdnsp_td_remainder(pdev, running_total,
2274                                                trb_buff_len, td_len, preq,
2275                                                more_trbs_coming, 0);
2276
2277                 length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
2278                         TRB_INTR_TARGET(0);
2279
2280                 /* Only first TRB is isoc, overwrite otherwise. */
2281                 if (i) {
2282                         field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2283                         length_field |= TRB_TD_SIZE(remainder);
2284                 } else {
2285                         length_field |= TRB_TD_SIZE_TBC(burst_count);
2286                 }
2287
2288                 /* Only set interrupt on short packet for OUT EPs. */
2289                 if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
2290                         field |= TRB_ISP;
2291
2292                 /* Set the chain bit for all except the last TRB. */
2293                 if (i < trbs_per_td - 1) {
2294                         more_trbs_coming = true;
2295                         field |= TRB_CHAIN;
2296                 } else {
2297                         more_trbs_coming = false;
2298                         preq->td.last_trb = ep_ring->enqueue;
2299                         field |= TRB_IOC;
2300                 }
2301
2302                 cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2303                                 lower_32_bits(send_addr), upper_32_bits(send_addr),
2304                                 length_field, field);
2305
2306                 running_total += trb_buff_len;
2307                 addr += trb_buff_len;
2308                 td_remain_len -= trb_buff_len;
2309
2310                 sent_len = trb_buff_len;
2311                 while (sg && sent_len >= block_len) {
2312                         /* New sg entry */
2313                         --num_sgs;
2314                         sent_len -= block_len;
2315                         if (num_sgs != 0) {
2316                                 sg = sg_next(sg);
2317                                 block_len = sg_dma_len(sg);
2318                                 addr = (u64)sg_dma_address(sg);
2319                                 addr += sent_len;
2320                         }
2321                 }
2322                 block_len -= sent_len;
2323                 send_addr = addr;
2324         }
2325
2326         /* Check TD length */
2327         if (running_total != td_len) {
2328                 dev_err(pdev->dev, "ISOC TD length unmatch\n");
2329                 ret = -EINVAL;
2330                 goto cleanup;
2331         }
2332
2333         cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
2334                                  start_cycle, start_trb);
2335
2336         return 0;
2337
2338 cleanup:
2339         /* Clean up a partially enqueued isoc transfer. */
2340         list_del_init(&preq->td.td_list);
2341         ep_ring->num_tds--;
2342
2343         /*
2344          * Use the first TD as a temporary variable to turn the TDs we've
2345          * queued into No-ops with a software-owned cycle bit.
2346          * That way the hardware won't accidentally start executing bogus TDs
2347          * when we partially overwrite them.
2348          * td->first_trb and td->start_seg are already set.
2349          */
2350         preq->td.last_trb = ep_ring->enqueue;
2351         /* Every TRB except the first & last will have its cycle bit flipped. */
2352         cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2353
2354         /* Reset the ring enqueue back to the first TRB and its cycle bit. */
2355         ep_ring->enqueue = preq->td.first_trb;
2356         ep_ring->enq_seg = preq->td.start_seg;
2357         ep_ring->cycle_state = start_cycle;
2358         return ret;
2359 }
2360
2361 /****           Command Ring Operations         ****/
2362 /*
2363  * Generic function for queuing a command TRB on the command ring.
2364  * Driver queue only one command to ring in the moment.
2365  */
2366 static void cdnsp_queue_command(struct cdnsp_device *pdev,
2367                                 u32 field1,
2368                                 u32 field2,
2369                                 u32 field3,
2370                                 u32 field4)
2371 {
2372         cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
2373                            GFP_ATOMIC);
2374
2375         pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
2376
2377         cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
2378                         field3, field4 | pdev->cmd_ring->cycle_state);
2379 }
2380
2381 /* Queue a slot enable or disable request on the command ring */
2382 void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
2383 {
2384         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
2385                             SLOT_ID_FOR_TRB(pdev->slot_id));
2386 }
2387
2388 /* Queue an address device command TRB */
2389 void cdnsp_queue_address_device(struct cdnsp_device *pdev,
2390                                 dma_addr_t in_ctx_ptr,
2391                                 enum cdnsp_setup_dev setup)
2392 {
2393         cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2394                             upper_32_bits(in_ctx_ptr), 0,
2395                             TRB_TYPE(TRB_ADDR_DEV) |
2396                             SLOT_ID_FOR_TRB(pdev->slot_id) |
2397                             (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
2398 }
2399
2400 /* Queue a reset device command TRB */
2401 void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
2402 {
2403         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
2404                             SLOT_ID_FOR_TRB(pdev->slot_id));
2405 }
2406
2407 /* Queue a configure endpoint command TRB */
2408 void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
2409                                     dma_addr_t in_ctx_ptr)
2410 {
2411         cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2412                             upper_32_bits(in_ctx_ptr), 0,
2413                             TRB_TYPE(TRB_CONFIG_EP) |
2414                             SLOT_ID_FOR_TRB(pdev->slot_id));
2415 }
2416
2417 /*
2418  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
2419  * activity on an endpoint that is about to be suspended.
2420  */
2421 void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2422 {
2423         cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
2424                             EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
2425 }
2426
2427 /* Set Transfer Ring Dequeue Pointer command. */
2428 void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
2429                                    struct cdnsp_ep *pep,
2430                                    struct cdnsp_dequeue_state *deq_state)
2431 {
2432         u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
2433         u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
2434         u32 type = TRB_TYPE(TRB_SET_DEQ);
2435         u32 trb_sct = 0;
2436         dma_addr_t addr;
2437
2438         addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
2439                                      deq_state->new_deq_ptr);
2440
2441         if (deq_state->stream_id)
2442                 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
2443
2444         cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
2445                             deq_state->new_cycle_state, upper_32_bits(addr),
2446                             trb_stream_id, trb_slot_id |
2447                             EP_ID_FOR_TRB(pep->idx) | type);
2448 }
2449
2450 void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
2451 {
2452         return cdnsp_queue_command(pdev, 0, 0, 0,
2453                                    SLOT_ID_FOR_TRB(pdev->slot_id) |
2454                                    EP_ID_FOR_TRB(ep_index) |
2455                                    TRB_TYPE(TRB_RESET_EP));
2456 }
2457
2458 /*
2459  * Queue a halt endpoint request on the command ring.
2460  */
2461 void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2462 {
2463         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
2464                             SLOT_ID_FOR_TRB(pdev->slot_id) |
2465                             EP_ID_FOR_TRB(ep_index));
2466 }
2467
2468 /*
2469  * Queue a flush endpoint request on the command ring.
2470  */
2471 void  cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
2472                                  unsigned int ep_index)
2473 {
2474         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
2475                             SLOT_ID_FOR_TRB(pdev->slot_id) |
2476                             EP_ID_FOR_TRB(ep_index));
2477 }
2478
2479 void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
2480 {
2481         u32 lo, mid;
2482
2483         lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
2484              TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
2485         mid = TRB_FH_TR_PACKET_DEV_NOT |
2486               TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
2487               TRB_FH_TO_INTERFACE(intf_num);
2488
2489         cdnsp_queue_command(pdev, lo, mid, 0,
2490                             TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
2491 }