xfs: preserve DIFLAG2_NREXT64 when setting other inode attributes
[platform/kernel/linux-starfive.git] / drivers / usb / cdns3 / cdnsp-ring.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence CDNSP DRD Driver.
4  *
5  * Copyright (C) 2020 Cadence.
6  *
7  * Author: Pawel Laszczak <pawell@cadence.com>
8  *
9  * Code based on Linux XHCI driver.
10  * Origin: Copyright (C) 2008 Intel Corp
11  */
12
13 /*
14  * Ring initialization rules:
15  * 1. Each segment is initialized to zero, except for link TRBs.
16  * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
17  *    Consumer Cycle State (CCS), depending on ring function.
18  * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
19  *
20  * Ring behavior rules:
21  * 1. A ring is empty if enqueue == dequeue. This means there will always be at
22  *    least one free TRB in the ring. This is useful if you want to turn that
23  *    into a link TRB and expand the ring.
24  * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
25  *    link TRB, then load the pointer with the address in the link TRB. If the
26  *    link TRB had its toggle bit set, you may need to update the ring cycle
27  *    state (see cycle bit rules). You may have to do this multiple times
28  *    until you reach a non-link TRB.
29  * 3. A ring is full if enqueue++ (for the definition of increment above)
30  *    equals the dequeue pointer.
31  *
32  * Cycle bit rules:
33  * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
34  *    in a link TRB, it must toggle the ring cycle state.
35  * 2. When a producer increments an enqueue pointer and encounters a toggle bit
36  *    in a link TRB, it must toggle the ring cycle state.
37  *
38  * Producer rules:
39  * 1. Check if ring is full before you enqueue.
40  * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
41  *    Update enqueue pointer between each write (which may update the ring
42  *    cycle state).
43  * 3. Notify consumer. If SW is producer, it rings the doorbell for command
44  *    and endpoint rings. If controller is the producer for the event ring,
45  *    and it generates an interrupt according to interrupt modulation rules.
46  *
47  * Consumer rules:
48  * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
49  *    the TRB is owned by the consumer.
50  * 2. Update dequeue pointer (which may update the ring cycle state) and
51  *    continue processing TRBs until you reach a TRB which is not owned by you.
52  * 3. Notify the producer. SW is the consumer for the event ring, and it
53  *    updates event ring dequeue pointer. Controller is the consumer for the
54  *    command and endpoint rings; it generates events on the event ring
55  *    for these.
56  */
57
58 #include <linux/scatterlist.h>
59 #include <linux/dma-mapping.h>
60 #include <linux/delay.h>
61 #include <linux/slab.h>
62 #include <linux/irq.h>
63
64 #include "cdnsp-trace.h"
65 #include "cdnsp-gadget.h"
66
67 /*
68  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
69  * address of the TRB.
70  */
71 dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
72                                  union cdnsp_trb *trb)
73 {
74         unsigned long segment_offset = trb - seg->trbs;
75
76         if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
77                 return 0;
78
79         return seg->dma + (segment_offset * sizeof(*trb));
80 }
81
82 static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
83 {
84         return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
85 }
86
87 static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
88 {
89         return TRB_TYPE_LINK_LE32(trb->link.control);
90 }
91
92 bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
93 {
94         return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
95 }
96
97 bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
98                             struct cdnsp_segment *seg,
99                             union cdnsp_trb *trb)
100 {
101         return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
102 }
103
104 static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
105 {
106         return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
107 }
108
109 static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
110 {
111         if (cdnsp_trb_is_link(trb)) {
112                 /* Unchain chained link TRBs. */
113                 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
114         } else {
115                 trb->generic.field[0] = 0;
116                 trb->generic.field[1] = 0;
117                 trb->generic.field[2] = 0;
118                 /* Preserve only the cycle bit of this TRB. */
119                 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
120                 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
121         }
122 }
123
124 /*
125  * Updates trb to point to the next TRB in the ring, and updates seg if the next
126  * TRB is in a new segment. This does not skip over link TRBs, and it does not
127  * effect the ring dequeue or enqueue pointers.
128  */
129 static void cdnsp_next_trb(struct cdnsp_device *pdev,
130                            struct cdnsp_ring *ring,
131                            struct cdnsp_segment **seg,
132                            union cdnsp_trb **trb)
133 {
134         if (cdnsp_trb_is_link(*trb)) {
135                 *seg = (*seg)->next;
136                 *trb = ((*seg)->trbs);
137         } else {
138                 (*trb)++;
139         }
140 }
141
142 /*
143  * See Cycle bit rules. SW is the consumer for the event ring only.
144  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
145  */
146 void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
147 {
148         /* event ring doesn't have link trbs, check for last trb. */
149         if (ring->type == TYPE_EVENT) {
150                 if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
151                         ring->dequeue++;
152                         goto out;
153                 }
154
155                 if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
156                         ring->cycle_state ^= 1;
157
158                 ring->deq_seg = ring->deq_seg->next;
159                 ring->dequeue = ring->deq_seg->trbs;
160                 goto out;
161         }
162
163         /* All other rings have link trbs. */
164         if (!cdnsp_trb_is_link(ring->dequeue)) {
165                 ring->dequeue++;
166                 ring->num_trbs_free++;
167         }
168         while (cdnsp_trb_is_link(ring->dequeue)) {
169                 ring->deq_seg = ring->deq_seg->next;
170                 ring->dequeue = ring->deq_seg->trbs;
171         }
172 out:
173         trace_cdnsp_inc_deq(ring);
174 }
175
176 /*
177  * See Cycle bit rules. SW is the consumer for the event ring only.
178  * Don't make a ring full of link TRBs. That would be dumb and this would loop.
179  *
180  * If we've just enqueued a TRB that is in the middle of a TD (meaning the
181  * chain bit is set), then set the chain bit in all the following link TRBs.
182  * If we've enqueued the last TRB in a TD, make sure the following link TRBs
183  * have their chain bit cleared (so that each Link TRB is a separate TD).
184  *
185  * @more_trbs_coming:   Will you enqueue more TRBs before ringing the doorbell.
186  */
187 static void cdnsp_inc_enq(struct cdnsp_device *pdev,
188                           struct cdnsp_ring *ring,
189                           bool more_trbs_coming)
190 {
191         union cdnsp_trb *next;
192         u32 chain;
193
194         chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
195
196         /* If this is not event ring, there is one less usable TRB. */
197         if (!cdnsp_trb_is_link(ring->enqueue))
198                 ring->num_trbs_free--;
199         next = ++(ring->enqueue);
200
201         /* Update the dequeue pointer further if that was a link TRB */
202         while (cdnsp_trb_is_link(next)) {
203                 /*
204                  * If the caller doesn't plan on enqueuing more TDs before
205                  * ringing the doorbell, then we don't want to give the link TRB
206                  * to the hardware just yet. We'll give the link TRB back in
207                  * cdnsp_prepare_ring() just before we enqueue the TD at the
208                  * top of the ring.
209                  */
210                 if (!chain && !more_trbs_coming)
211                         break;
212
213                 next->link.control &= cpu_to_le32(~TRB_CHAIN);
214                 next->link.control |= cpu_to_le32(chain);
215
216                 /* Give this link TRB to the hardware */
217                 wmb();
218                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
219
220                 /* Toggle the cycle bit after the last ring segment. */
221                 if (cdnsp_link_trb_toggles_cycle(next))
222                         ring->cycle_state ^= 1;
223
224                 ring->enq_seg = ring->enq_seg->next;
225                 ring->enqueue = ring->enq_seg->trbs;
226                 next = ring->enqueue;
227         }
228
229         trace_cdnsp_inc_enq(ring);
230 }
231
232 /*
233  * Check to see if there's room to enqueue num_trbs on the ring and make sure
234  * enqueue pointer will not advance into dequeue segment.
235  */
236 static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
237                                struct cdnsp_ring *ring,
238                                unsigned int num_trbs)
239 {
240         int num_trbs_in_deq_seg;
241
242         if (ring->num_trbs_free < num_trbs)
243                 return false;
244
245         if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
246                 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
247
248                 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
249                         return false;
250         }
251
252         return true;
253 }
254
255 /*
256  * Workaround for L1: controller has issue with resuming from L1 after
257  * setting doorbell for endpoint during L1 state. This function forces
258  * resume signal in such case.
259  */
260 static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
261 {
262         if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
263                 cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
264 }
265
266 /* Ring the doorbell after placing a command on the ring. */
267 void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
268 {
269         writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
270 }
271
272 /*
273  * Ring the doorbell after placing a transfer on the ring.
274  * Returns true if doorbell was set, otherwise false.
275  */
276 static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
277                                    struct cdnsp_ep *pep,
278                                    unsigned int stream_id)
279 {
280         __le32 __iomem *reg_addr = &pdev->dba->ep_db;
281         unsigned int ep_state = pep->ep_state;
282         unsigned int db_value;
283
284         /*
285          * Don't ring the doorbell for this endpoint if endpoint is halted or
286          * disabled.
287          */
288         if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
289                 return false;
290
291         /* For stream capable endpoints driver can ring doorbell only twice. */
292         if (pep->ep_state & EP_HAS_STREAMS) {
293                 if (pep->stream_info.drbls_count >= 2)
294                         return false;
295
296                 pep->stream_info.drbls_count++;
297         }
298
299         pep->ep_state &= ~EP_STOPPED;
300
301         if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
302             !pdev->ep0_expect_in)
303                 db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
304         else
305                 db_value = DB_VALUE(pep->idx, stream_id);
306
307         trace_cdnsp_tr_drbl(pep, stream_id);
308
309         writel(db_value, reg_addr);
310
311         cdnsp_force_l0_go(pdev);
312
313         /* Doorbell was set. */
314         return true;
315 }
316
317 /*
318  * Get the right ring for the given pep and stream_id.
319  * If the endpoint supports streams, boundary check the USB request's stream ID.
320  * If the endpoint doesn't support streams, return the singular endpoint ring.
321  */
322 static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
323                                                   struct cdnsp_ep *pep,
324                                                   unsigned int stream_id)
325 {
326         if (!(pep->ep_state & EP_HAS_STREAMS))
327                 return pep->ring;
328
329         if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
330                 dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
331                         pep->name, stream_id);
332                 return NULL;
333         }
334
335         return pep->stream_info.stream_rings[stream_id];
336 }
337
338 static struct cdnsp_ring *
339         cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
340                                        struct cdnsp_request *preq)
341 {
342         return cdnsp_get_transfer_ring(pdev, preq->pep,
343                                        preq->request.stream_id);
344 }
345
346 /* Ring the doorbell for any rings with pending requests. */
347 void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
348                                           struct cdnsp_ep *pep)
349 {
350         struct cdnsp_stream_info *stream_info;
351         unsigned int stream_id;
352         int ret;
353
354         if (pep->ep_state & EP_DIS_IN_RROGRESS)
355                 return;
356
357         /* A ring has pending Request if its TD list is not empty. */
358         if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
359                 if (pep->ring && !list_empty(&pep->ring->td_list))
360                         cdnsp_ring_ep_doorbell(pdev, pep, 0);
361                 return;
362         }
363
364         stream_info = &pep->stream_info;
365
366         for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
367                 struct cdnsp_td *td, *td_temp;
368                 struct cdnsp_ring *ep_ring;
369
370                 if (stream_info->drbls_count >= 2)
371                         return;
372
373                 ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
374                 if (!ep_ring)
375                         continue;
376
377                 if (!ep_ring->stream_active || ep_ring->stream_rejected)
378                         continue;
379
380                 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
381                                          td_list) {
382                         if (td->drbl)
383                                 continue;
384
385                         ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
386                         if (ret)
387                                 td->drbl = 1;
388                 }
389         }
390 }
391
392 /*
393  * Get the hw dequeue pointer controller stopped on, either directly from the
394  * endpoint context, or if streams are in use from the stream context.
395  * The returned hw_dequeue contains the lowest four bits with cycle state
396  * and possible stream context type.
397  */
398 static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
399                             unsigned int ep_index,
400                             unsigned int stream_id)
401 {
402         struct cdnsp_stream_ctx *st_ctx;
403         struct cdnsp_ep *pep;
404
405         pep = &pdev->eps[stream_id];
406
407         if (pep->ep_state & EP_HAS_STREAMS) {
408                 st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
409                 return le64_to_cpu(st_ctx->stream_ring);
410         }
411
412         return le64_to_cpu(pep->out_ctx->deq);
413 }
414
415 /*
416  * Move the controller endpoint ring dequeue pointer past cur_td.
417  * Record the new state of the controller endpoint ring dequeue segment,
418  * dequeue pointer, and new consumer cycle state in state.
419  * Update internal representation of the ring's dequeue pointer.
420  *
421  * We do this in three jumps:
422  *  - First we update our new ring state to be the same as when the
423  *    controller stopped.
424  *  - Then we traverse the ring to find the segment that contains
425  *    the last TRB in the TD. We toggle the controller new cycle state
426  *    when we pass any link TRBs with the toggle cycle bit set.
427  *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
428  *    if we've moved it past a link TRB with the toggle cycle bit set.
429  */
430 static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
431                                          struct cdnsp_ep *pep,
432                                          unsigned int stream_id,
433                                          struct cdnsp_td *cur_td,
434                                          struct cdnsp_dequeue_state *state)
435 {
436         bool td_last_trb_found = false;
437         struct cdnsp_segment *new_seg;
438         struct cdnsp_ring *ep_ring;
439         union cdnsp_trb *new_deq;
440         bool cycle_found = false;
441         u64 hw_dequeue;
442
443         ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
444         if (!ep_ring)
445                 return;
446
447         /*
448          * Dig out the cycle state saved by the controller during the
449          * stop endpoint command.
450          */
451         hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
452         new_seg = ep_ring->deq_seg;
453         new_deq = ep_ring->dequeue;
454         state->new_cycle_state = hw_dequeue & 0x1;
455         state->stream_id = stream_id;
456
457         /*
458          * We want to find the pointer, segment and cycle state of the new trb
459          * (the one after current TD's last_trb). We know the cycle state at
460          * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
461          * found.
462          */
463         do {
464                 if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
465                     == (dma_addr_t)(hw_dequeue & ~0xf)) {
466                         cycle_found = true;
467
468                         if (td_last_trb_found)
469                                 break;
470                 }
471
472                 if (new_deq == cur_td->last_trb)
473                         td_last_trb_found = true;
474
475                 if (cycle_found && cdnsp_trb_is_link(new_deq) &&
476                     cdnsp_link_trb_toggles_cycle(new_deq))
477                         state->new_cycle_state ^= 0x1;
478
479                 cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
480
481                 /* Search wrapped around, bail out. */
482                 if (new_deq == pep->ring->dequeue) {
483                         dev_err(pdev->dev,
484                                 "Error: Failed finding new dequeue state\n");
485                         state->new_deq_seg = NULL;
486                         state->new_deq_ptr = NULL;
487                         return;
488                 }
489
490         } while (!cycle_found || !td_last_trb_found);
491
492         state->new_deq_seg = new_seg;
493         state->new_deq_ptr = new_deq;
494
495         trace_cdnsp_new_deq_state(state);
496 }
497
498 /*
499  * flip_cycle means flip the cycle bit of all but the first and last TRB.
500  * (The last TRB actually points to the ring enqueue pointer, which is not part
501  * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
502  */
503 static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
504                              struct cdnsp_ring *ep_ring,
505                              struct cdnsp_td *td,
506                              bool flip_cycle)
507 {
508         struct cdnsp_segment *seg = td->start_seg;
509         union cdnsp_trb *trb = td->first_trb;
510
511         while (1) {
512                 cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
513
514                 /* flip cycle if asked to */
515                 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
516                         trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
517
518                 if (trb == td->last_trb)
519                         break;
520
521                 cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
522         }
523 }
524
525 /*
526  * This TD is defined by the TRBs starting at start_trb in start_seg and ending
527  * at end_trb, which may be in another segment. If the suspect DMA address is a
528  * TRB in this TD, this function returns that TRB's segment. Otherwise it
529  * returns 0.
530  */
531 static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
532                                              struct cdnsp_segment *start_seg,
533                                              union cdnsp_trb *start_trb,
534                                              union cdnsp_trb *end_trb,
535                                              dma_addr_t suspect_dma)
536 {
537         struct cdnsp_segment *cur_seg;
538         union cdnsp_trb *temp_trb;
539         dma_addr_t end_seg_dma;
540         dma_addr_t end_trb_dma;
541         dma_addr_t start_dma;
542
543         start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
544         cur_seg = start_seg;
545
546         do {
547                 if (start_dma == 0)
548                         return NULL;
549
550                 temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
551                 /* We may get an event for a Link TRB in the middle of a TD */
552                 end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
553                 /* If the end TRB isn't in this segment, this is set to 0 */
554                 end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
555
556                 trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
557                                               end_trb_dma, cur_seg->dma,
558                                               end_seg_dma);
559
560                 if (end_trb_dma > 0) {
561                         /*
562                          * The end TRB is in this segment, so suspect should
563                          * be here
564                          */
565                         if (start_dma <= end_trb_dma) {
566                                 if (suspect_dma >= start_dma &&
567                                     suspect_dma <= end_trb_dma) {
568                                         return cur_seg;
569                                 }
570                         } else {
571                                 /*
572                                  * Case for one segment with a
573                                  * TD wrapped around to the top
574                                  */
575                                 if ((suspect_dma >= start_dma &&
576                                      suspect_dma <= end_seg_dma) ||
577                                     (suspect_dma >= cur_seg->dma &&
578                                      suspect_dma <= end_trb_dma)) {
579                                         return cur_seg;
580                                 }
581                         }
582
583                         return NULL;
584                 }
585
586                 /* Might still be somewhere in this segment */
587                 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
588                         return cur_seg;
589
590                 cur_seg = cur_seg->next;
591                 start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
592         } while (cur_seg != start_seg);
593
594         return NULL;
595 }
596
597 static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
598                                          struct cdnsp_ring *ring,
599                                          struct cdnsp_td *td)
600 {
601         struct cdnsp_segment *seg = td->bounce_seg;
602         struct cdnsp_request *preq;
603         size_t len;
604
605         if (!seg)
606                 return;
607
608         preq = td->preq;
609
610         trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
611                                  seg->bounce_dma, 0);
612
613         if (!preq->direction) {
614                 dma_unmap_single(pdev->dev, seg->bounce_dma,
615                                  ring->bounce_buf_len,  DMA_TO_DEVICE);
616                 return;
617         }
618
619         dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
620                          DMA_FROM_DEVICE);
621
622         /* For in transfers we need to copy the data from bounce to sg */
623         len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
624                                    seg->bounce_buf, seg->bounce_len,
625                                    seg->bounce_offs);
626         if (len != seg->bounce_len)
627                 dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
628                          len, seg->bounce_len);
629
630         seg->bounce_len = 0;
631         seg->bounce_offs = 0;
632 }
633
634 static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
635                              struct cdnsp_ep *pep,
636                              struct cdnsp_dequeue_state *deq_state)
637 {
638         struct cdnsp_ring *ep_ring;
639         int ret;
640
641         if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
642                 cdnsp_ring_doorbell_for_active_rings(pdev, pep);
643                 return 0;
644         }
645
646         cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
647         cdnsp_ring_cmd_db(pdev);
648         ret = cdnsp_wait_for_cmd_compl(pdev);
649
650         trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
651         trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
652
653         /*
654          * Update the ring's dequeue segment and dequeue pointer
655          * to reflect the new position.
656          */
657         ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
658
659         if (cdnsp_trb_is_link(ep_ring->dequeue)) {
660                 ep_ring->deq_seg = ep_ring->deq_seg->next;
661                 ep_ring->dequeue = ep_ring->deq_seg->trbs;
662         }
663
664         while (ep_ring->dequeue != deq_state->new_deq_ptr) {
665                 ep_ring->num_trbs_free++;
666                 ep_ring->dequeue++;
667
668                 if (cdnsp_trb_is_link(ep_ring->dequeue)) {
669                         if (ep_ring->dequeue == deq_state->new_deq_ptr)
670                                 break;
671
672                         ep_ring->deq_seg = ep_ring->deq_seg->next;
673                         ep_ring->dequeue = ep_ring->deq_seg->trbs;
674                 }
675         }
676
677         /*
678          * Probably there was TIMEOUT during handling Set Dequeue Pointer
679          * command. It's critical error and controller will be stopped.
680          */
681         if (ret)
682                 return -ESHUTDOWN;
683
684         /* Restart any rings with pending requests */
685         cdnsp_ring_doorbell_for_active_rings(pdev, pep);
686
687         return 0;
688 }
689
690 int cdnsp_remove_request(struct cdnsp_device *pdev,
691                          struct cdnsp_request *preq,
692                          struct cdnsp_ep *pep)
693 {
694         struct cdnsp_dequeue_state deq_state;
695         struct cdnsp_td *cur_td = NULL;
696         struct cdnsp_ring *ep_ring;
697         struct cdnsp_segment *seg;
698         int status = -ECONNRESET;
699         int ret = 0;
700         u64 hw_deq;
701
702         memset(&deq_state, 0, sizeof(deq_state));
703
704         trace_cdnsp_remove_request(pep->out_ctx);
705         trace_cdnsp_remove_request_td(preq);
706
707         cur_td = &preq->td;
708         ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
709
710         /*
711          * If we stopped on the TD we need to cancel, then we have to
712          * move the controller endpoint ring dequeue pointer past
713          * this TD.
714          */
715         hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
716         hw_deq &= ~0xf;
717
718         seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
719                               cur_td->last_trb, hw_deq);
720
721         if (seg && (pep->ep_state & EP_ENABLED))
722                 cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
723                                              cur_td, &deq_state);
724         else
725                 cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
726
727         /*
728          * The event handler won't see a completion for this TD anymore,
729          * so remove it from the endpoint ring's TD list.
730          */
731         list_del_init(&cur_td->td_list);
732         ep_ring->num_tds--;
733         pep->stream_info.td_count--;
734
735         /*
736          * During disconnecting all endpoint will be disabled so we don't
737          * have to worry about updating dequeue pointer.
738          */
739         if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
740                 status = -ESHUTDOWN;
741                 ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
742         }
743
744         cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
745         cdnsp_gadget_giveback(pep, cur_td->preq, status);
746
747         return ret;
748 }
749
750 static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
751 {
752         struct cdnsp_port *port = pdev->active_port;
753         u8 old_port = 0;
754
755         if (port && port->port_num == port_id)
756                 return 0;
757
758         if (port)
759                 old_port = port->port_num;
760
761         if (port_id == pdev->usb2_port.port_num) {
762                 port = &pdev->usb2_port;
763         } else if (port_id == pdev->usb3_port.port_num) {
764                 port  = &pdev->usb3_port;
765         } else {
766                 dev_err(pdev->dev, "Port event with invalid port ID %d\n",
767                         port_id);
768                 return -EINVAL;
769         }
770
771         if (port_id != old_port) {
772                 cdnsp_disable_slot(pdev);
773                 pdev->active_port = port;
774                 cdnsp_enable_slot(pdev);
775         }
776
777         if (port_id == pdev->usb2_port.port_num)
778                 cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
779         else
780                 writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
781                        &pdev->usb3_port.regs->portpmsc);
782
783         return 0;
784 }
785
786 static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
787                                      union cdnsp_trb *event)
788 {
789         struct cdnsp_port_regs __iomem *port_regs;
790         u32 portsc, cmd_regs;
791         bool port2 = false;
792         u32 link_state;
793         u32 port_id;
794
795         /* Port status change events always have a successful completion code */
796         if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
797                 dev_err(pdev->dev, "ERR: incorrect PSC event\n");
798
799         port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
800
801         if (cdnsp_update_port_id(pdev, port_id))
802                 goto cleanup;
803
804         port_regs = pdev->active_port->regs;
805
806         if (port_id == pdev->usb2_port.port_num)
807                 port2 = true;
808
809 new_event:
810         portsc = readl(&port_regs->portsc);
811         writel(cdnsp_port_state_to_neutral(portsc) |
812                (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
813
814         trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
815
816         pdev->gadget.speed = cdnsp_port_speed(portsc);
817         link_state = portsc & PORT_PLS_MASK;
818
819         /* Port Link State change detected. */
820         if ((portsc & PORT_PLC)) {
821                 if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING)  &&
822                     link_state == XDEV_RESUME) {
823                         cmd_regs = readl(&pdev->op_regs->command);
824                         if (!(cmd_regs & CMD_R_S))
825                                 goto cleanup;
826
827                         if (DEV_SUPERSPEED_ANY(portsc)) {
828                                 cdnsp_set_link_state(pdev, &port_regs->portsc,
829                                                      XDEV_U0);
830
831                                 cdnsp_resume_gadget(pdev);
832                         }
833                 }
834
835                 if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
836                     link_state == XDEV_U0) {
837                         pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
838
839                         cdnsp_force_header_wakeup(pdev, 1);
840                         cdnsp_ring_cmd_db(pdev);
841                         cdnsp_wait_for_cmd_compl(pdev);
842                 }
843
844                 if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
845                     !DEV_SUPERSPEED_ANY(portsc))
846                         cdnsp_resume_gadget(pdev);
847
848                 if (link_state == XDEV_U3 &&  pdev->link_state != XDEV_U3)
849                         cdnsp_suspend_gadget(pdev);
850
851                 pdev->link_state = link_state;
852         }
853
854         if (portsc & PORT_CSC) {
855                 /* Detach device. */
856                 if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
857                         cdnsp_disconnect_gadget(pdev);
858
859                 /* Attach device. */
860                 if (portsc & PORT_CONNECT) {
861                         if (!port2)
862                                 cdnsp_irq_reset(pdev);
863
864                         usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
865                 }
866         }
867
868         /* Port reset. */
869         if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
870                 cdnsp_irq_reset(pdev);
871                 pdev->u1_allowed = 0;
872                 pdev->u2_allowed = 0;
873                 pdev->may_wakeup = 0;
874         }
875
876         if (portsc & PORT_CEC)
877                 dev_err(pdev->dev, "Port Over Current detected\n");
878
879         if (portsc & PORT_CEC)
880                 dev_err(pdev->dev, "Port Configure Error detected\n");
881
882         if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
883                 goto new_event;
884
885 cleanup:
886         cdnsp_inc_deq(pdev, pdev->event_ring);
887 }
888
889 static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
890                              struct cdnsp_td *td,
891                              struct cdnsp_ring *ep_ring,
892                              int *status)
893 {
894         struct cdnsp_request *preq = td->preq;
895
896         /* if a bounce buffer was used to align this td then unmap it */
897         cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
898
899         /*
900          * If the controller said we transferred more data than the buffer
901          * length, Play it safe and say we didn't transfer anything.
902          */
903         if (preq->request.actual > preq->request.length) {
904                 preq->request.actual = 0;
905                 *status = 0;
906         }
907
908         list_del_init(&td->td_list);
909         ep_ring->num_tds--;
910         preq->pep->stream_info.td_count--;
911
912         cdnsp_gadget_giveback(preq->pep, preq, *status);
913 }
914
915 static void cdnsp_finish_td(struct cdnsp_device *pdev,
916                             struct cdnsp_td *td,
917                             struct cdnsp_transfer_event *event,
918                             struct cdnsp_ep *ep,
919                             int *status)
920 {
921         struct cdnsp_ring *ep_ring;
922         u32 trb_comp_code;
923
924         ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
925         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
926
927         if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
928             trb_comp_code == COMP_STOPPED ||
929             trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
930                 /*
931                  * The Endpoint Stop Command completion will take care of any
932                  * stopped TDs. A stopped TD may be restarted, so don't update
933                  * the ring dequeue pointer or take this TD off any lists yet.
934                  */
935                 return;
936         }
937
938         /* Update ring dequeue pointer */
939         while (ep_ring->dequeue != td->last_trb)
940                 cdnsp_inc_deq(pdev, ep_ring);
941
942         cdnsp_inc_deq(pdev, ep_ring);
943
944         cdnsp_td_cleanup(pdev, td, ep_ring, status);
945 }
946
947 /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
948 static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
949                                  struct cdnsp_ring *ring,
950                                  union cdnsp_trb *stop_trb)
951 {
952         struct cdnsp_segment *seg = ring->deq_seg;
953         union cdnsp_trb *trb = ring->dequeue;
954         u32 sum;
955
956         for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
957                 if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
958                         sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
959         }
960         return sum;
961 }
962
963 static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
964                                     struct cdnsp_ep *pep,
965                                     unsigned int stream_id,
966                                     int start_cycle,
967                                     struct cdnsp_generic_trb *start_trb)
968 {
969         /*
970          * Pass all the TRBs to the hardware at once and make sure this write
971          * isn't reordered.
972          */
973         wmb();
974
975         if (start_cycle)
976                 start_trb->field[3] |= cpu_to_le32(start_cycle);
977         else
978                 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
979
980         if ((pep->ep_state & EP_HAS_STREAMS) &&
981             !pep->stream_info.first_prime_det) {
982                 trace_cdnsp_wait_for_prime(pep, stream_id);
983                 return 0;
984         }
985
986         return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
987 }
988
989 /*
990  * Process control tds, update USB request status and actual_length.
991  */
992 static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
993                                   struct cdnsp_td *td,
994                                   union cdnsp_trb *event_trb,
995                                   struct cdnsp_transfer_event *event,
996                                   struct cdnsp_ep *pep,
997                                   int *status)
998 {
999         struct cdnsp_ring *ep_ring;
1000         u32 remaining;
1001         u32 trb_type;
1002
1003         trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
1004         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1005         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1006
1007         /*
1008          * if on data stage then update the actual_length of the USB
1009          * request and flag it as set, so it won't be overwritten in the event
1010          * for the last TRB.
1011          */
1012         if (trb_type == TRB_DATA) {
1013                 td->request_length_set = true;
1014                 td->preq->request.actual = td->preq->request.length - remaining;
1015         }
1016
1017         /* at status stage */
1018         if (!td->request_length_set)
1019                 td->preq->request.actual = td->preq->request.length;
1020
1021         if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
1022             pdev->three_stage_setup) {
1023                 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1024                                 td_list);
1025                 pdev->ep0_stage = CDNSP_STATUS_STAGE;
1026
1027                 cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
1028                                          &td->last_trb->generic);
1029                 return;
1030         }
1031
1032         *status = 0;
1033
1034         cdnsp_finish_td(pdev, td, event, pep, status);
1035 }
1036
1037 /*
1038  * Process isochronous tds, update usb request status and actual_length.
1039  */
1040 static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
1041                                   struct cdnsp_td *td,
1042                                   union cdnsp_trb *ep_trb,
1043                                   struct cdnsp_transfer_event *event,
1044                                   struct cdnsp_ep *pep,
1045                                   int status)
1046 {
1047         struct cdnsp_request *preq = td->preq;
1048         u32 remaining, requested, ep_trb_len;
1049         bool sum_trbs_for_length = false;
1050         struct cdnsp_ring *ep_ring;
1051         u32 trb_comp_code;
1052         u32 td_length;
1053
1054         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1055         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1056         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1057         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1058
1059         requested = preq->request.length;
1060
1061         /* handle completion code */
1062         switch (trb_comp_code) {
1063         case COMP_SUCCESS:
1064                 preq->request.status = 0;
1065                 break;
1066         case COMP_SHORT_PACKET:
1067                 preq->request.status = 0;
1068                 sum_trbs_for_length = true;
1069                 break;
1070         case COMP_ISOCH_BUFFER_OVERRUN:
1071         case COMP_BABBLE_DETECTED_ERROR:
1072                 preq->request.status = -EOVERFLOW;
1073                 break;
1074         case COMP_STOPPED:
1075                 sum_trbs_for_length = true;
1076                 break;
1077         case COMP_STOPPED_SHORT_PACKET:
1078                 /* field normally containing residue now contains transferred */
1079                 preq->request.status  = 0;
1080                 requested = remaining;
1081                 break;
1082         case COMP_STOPPED_LENGTH_INVALID:
1083                 requested = 0;
1084                 remaining = 0;
1085                 break;
1086         default:
1087                 sum_trbs_for_length = true;
1088                 preq->request.status = -1;
1089                 break;
1090         }
1091
1092         if (sum_trbs_for_length) {
1093                 td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
1094                 td_length += ep_trb_len - remaining;
1095         } else {
1096                 td_length = requested;
1097         }
1098
1099         td->preq->request.actual += td_length;
1100
1101         cdnsp_finish_td(pdev, td, event, pep, &status);
1102 }
1103
1104 static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
1105                                struct cdnsp_td *td,
1106                                struct cdnsp_transfer_event *event,
1107                                struct cdnsp_ep *pep,
1108                                int status)
1109 {
1110         struct cdnsp_ring *ep_ring;
1111
1112         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1113         td->preq->request.status = -EXDEV;
1114         td->preq->request.actual = 0;
1115
1116         /* Update ring dequeue pointer */
1117         while (ep_ring->dequeue != td->last_trb)
1118                 cdnsp_inc_deq(pdev, ep_ring);
1119
1120         cdnsp_inc_deq(pdev, ep_ring);
1121
1122         cdnsp_td_cleanup(pdev, td, ep_ring, &status);
1123 }
1124
1125 /*
1126  * Process bulk and interrupt tds, update usb request status and actual_length.
1127  */
1128 static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
1129                                        struct cdnsp_td *td,
1130                                        union cdnsp_trb *ep_trb,
1131                                        struct cdnsp_transfer_event *event,
1132                                        struct cdnsp_ep *ep,
1133                                        int *status)
1134 {
1135         u32 remaining, requested, ep_trb_len;
1136         struct cdnsp_ring *ep_ring;
1137         u32 trb_comp_code;
1138
1139         ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1140         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1141         remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
1142         ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
1143         requested = td->preq->request.length;
1144
1145         switch (trb_comp_code) {
1146         case COMP_SUCCESS:
1147         case COMP_SHORT_PACKET:
1148                 *status = 0;
1149                 break;
1150         case COMP_STOPPED_SHORT_PACKET:
1151                 td->preq->request.actual = remaining;
1152                 goto finish_td;
1153         case COMP_STOPPED_LENGTH_INVALID:
1154                 /* Stopped on ep trb with invalid length, exclude it. */
1155                 ep_trb_len = 0;
1156                 remaining = 0;
1157                 break;
1158         }
1159
1160         if (ep_trb == td->last_trb)
1161                 ep_trb_len = requested - remaining;
1162         else
1163                 ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
1164                                                    ep_trb_len - remaining;
1165         td->preq->request.actual = ep_trb_len;
1166
1167 finish_td:
1168         ep->stream_info.drbls_count--;
1169
1170         cdnsp_finish_td(pdev, td, event, ep, status);
1171 }
1172
1173 static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
1174                                  struct cdnsp_transfer_event *event)
1175 {
1176         struct cdnsp_generic_trb *generic;
1177         struct cdnsp_ring *ep_ring;
1178         struct cdnsp_ep *pep;
1179         int cur_stream;
1180         int ep_index;
1181         int host_sid;
1182         int dev_sid;
1183
1184         generic = (struct cdnsp_generic_trb *)event;
1185         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1186         dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
1187         host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
1188
1189         pep = &pdev->eps[ep_index];
1190
1191         if (!(pep->ep_state & EP_HAS_STREAMS))
1192                 return;
1193
1194         if (host_sid == STREAM_PRIME_ACK) {
1195                 pep->stream_info.first_prime_det = 1;
1196                 for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
1197                     cur_stream++) {
1198                         ep_ring = pep->stream_info.stream_rings[cur_stream];
1199                         ep_ring->stream_active = 1;
1200                         ep_ring->stream_rejected = 0;
1201                 }
1202         }
1203
1204         if (host_sid == STREAM_REJECTED) {
1205                 struct cdnsp_td *td, *td_temp;
1206
1207                 pep->stream_info.drbls_count--;
1208                 ep_ring = pep->stream_info.stream_rings[dev_sid];
1209                 ep_ring->stream_active = 0;
1210                 ep_ring->stream_rejected = 1;
1211
1212                 list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
1213                                          td_list) {
1214                         td->drbl = 0;
1215                 }
1216         }
1217
1218         cdnsp_ring_doorbell_for_active_rings(pdev, pep);
1219 }
1220
1221 /*
1222  * If this function returns an error condition, it means it got a Transfer
1223  * event with a corrupted TRB DMA address or endpoint is disabled.
1224  */
1225 static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
1226                                  struct cdnsp_transfer_event *event)
1227 {
1228         const struct usb_endpoint_descriptor *desc;
1229         bool handling_skipped_tds = false;
1230         struct cdnsp_segment *ep_seg;
1231         struct cdnsp_ring *ep_ring;
1232         int status = -EINPROGRESS;
1233         union cdnsp_trb *ep_trb;
1234         dma_addr_t ep_trb_dma;
1235         struct cdnsp_ep *pep;
1236         struct cdnsp_td *td;
1237         u32 trb_comp_code;
1238         int invalidate;
1239         int ep_index;
1240
1241         invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
1242         ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
1243         trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
1244         ep_trb_dma = le64_to_cpu(event->buffer);
1245
1246         pep = &pdev->eps[ep_index];
1247         ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
1248
1249         /*
1250          * If device is disconnect then all requests will be dequeued
1251          * by upper layers as part of disconnect sequence.
1252          * We don't want handle such event to avoid racing.
1253          */
1254         if (invalidate || !pdev->gadget.connected)
1255                 goto cleanup;
1256
1257         if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
1258                 trace_cdnsp_ep_disabled(pep->out_ctx);
1259                 goto err_out;
1260         }
1261
1262         /* Some transfer events don't always point to a trb*/
1263         if (!ep_ring) {
1264                 switch (trb_comp_code) {
1265                 case COMP_INVALID_STREAM_TYPE_ERROR:
1266                 case COMP_INVALID_STREAM_ID_ERROR:
1267                 case COMP_RING_UNDERRUN:
1268                 case COMP_RING_OVERRUN:
1269                         goto cleanup;
1270                 default:
1271                         dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
1272                                 pep->name);
1273                         goto err_out;
1274                 }
1275         }
1276
1277         /* Look for some error cases that need special treatment. */
1278         switch (trb_comp_code) {
1279         case COMP_BABBLE_DETECTED_ERROR:
1280                 status = -EOVERFLOW;
1281                 break;
1282         case COMP_RING_UNDERRUN:
1283         case COMP_RING_OVERRUN:
1284                 /*
1285                  * When the Isoch ring is empty, the controller will generate
1286                  * a Ring Overrun Event for IN Isoch endpoint or Ring
1287                  * Underrun Event for OUT Isoch endpoint.
1288                  */
1289                 goto cleanup;
1290         case COMP_MISSED_SERVICE_ERROR:
1291                 /*
1292                  * When encounter missed service error, one or more isoc tds
1293                  * may be missed by controller.
1294                  * Set skip flag of the ep_ring; Complete the missed tds as
1295                  * short transfer when process the ep_ring next time.
1296                  */
1297                 pep->skip = true;
1298                 break;
1299         }
1300
1301         do {
1302                 /*
1303                  * This TRB should be in the TD at the head of this ring's TD
1304                  * list.
1305                  */
1306                 if (list_empty(&ep_ring->td_list)) {
1307                         /*
1308                          * Don't print warnings if it's due to a stopped
1309                          * endpoint generating an extra completion event, or
1310                          * a event for the last TRB of a short TD we already
1311                          * got a short event for.
1312                          * The short TD is already removed from the TD list.
1313                          */
1314                         if (!(trb_comp_code == COMP_STOPPED ||
1315                               trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
1316                               ep_ring->last_td_was_short))
1317                                 trace_cdnsp_trb_without_td(ep_ring,
1318                                         (struct cdnsp_generic_trb *)event);
1319
1320                         if (pep->skip) {
1321                                 pep->skip = false;
1322                                 trace_cdnsp_ep_list_empty_with_skip(pep, 0);
1323                         }
1324
1325                         goto cleanup;
1326                 }
1327
1328                 td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
1329                                 td_list);
1330
1331                 /* Is this a TRB in the currently executing TD? */
1332                 ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
1333                                          ep_ring->dequeue, td->last_trb,
1334                                          ep_trb_dma);
1335
1336                 /*
1337                  * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
1338                  * of FSE is not in the current TD pointed by ep_ring->dequeue
1339                  * because that the hardware dequeue pointer still at the
1340                  * previous TRB of the current TD. The previous TRB maybe a
1341                  * Link TD or the last TRB of the previous TD. The command
1342                  * completion handle will take care the rest.
1343                  */
1344                 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
1345                                 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
1346                         pep->skip = false;
1347                         goto cleanup;
1348                 }
1349
1350                 desc = td->preq->pep->endpoint.desc;
1351                 if (!ep_seg) {
1352                         if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
1353                                 /* Something is busted, give up! */
1354                                 dev_err(pdev->dev,
1355                                         "ERROR Transfer event TRB DMA ptr not "
1356                                         "part of current TD ep_index %d "
1357                                         "comp_code %u\n", ep_index,
1358                                         trb_comp_code);
1359                                 return -EINVAL;
1360                         }
1361
1362                         cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1363                         goto cleanup;
1364                 }
1365
1366                 if (trb_comp_code == COMP_SHORT_PACKET)
1367                         ep_ring->last_td_was_short = true;
1368                 else
1369                         ep_ring->last_td_was_short = false;
1370
1371                 if (pep->skip) {
1372                         pep->skip = false;
1373                         cdnsp_skip_isoc_td(pdev, td, event, pep, status);
1374                         goto cleanup;
1375                 }
1376
1377                 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
1378                                        / sizeof(*ep_trb)];
1379
1380                 trace_cdnsp_handle_transfer(ep_ring,
1381                                             (struct cdnsp_generic_trb *)ep_trb);
1382
1383                 if (cdnsp_trb_is_noop(ep_trb))
1384                         goto cleanup;
1385
1386                 if (usb_endpoint_xfer_control(desc))
1387                         cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
1388                                               &status);
1389                 else if (usb_endpoint_xfer_isoc(desc))
1390                         cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
1391                                               status);
1392                 else
1393                         cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
1394                                                    &status);
1395 cleanup:
1396                 handling_skipped_tds = pep->skip;
1397
1398                 /*
1399                  * Do not update event ring dequeue pointer if we're in a loop
1400                  * processing missed tds.
1401                  */
1402                 if (!handling_skipped_tds)
1403                         cdnsp_inc_deq(pdev, pdev->event_ring);
1404
1405         /*
1406          * If ep->skip is set, it means there are missed tds on the
1407          * endpoint ring need to take care of.
1408          * Process them as short transfer until reach the td pointed by
1409          * the event.
1410          */
1411         } while (handling_skipped_tds);
1412         return 0;
1413
1414 err_out:
1415         dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
1416                 (unsigned long long)
1417                 cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1418                                       pdev->event_ring->dequeue),
1419                  lower_32_bits(le64_to_cpu(event->buffer)),
1420                  upper_32_bits(le64_to_cpu(event->buffer)),
1421                  le32_to_cpu(event->transfer_len),
1422                  le32_to_cpu(event->flags));
1423         return -EINVAL;
1424 }
1425
1426 /*
1427  * This function handles all events on the event ring.
1428  * Returns true for "possibly more events to process" (caller should call
1429  * again), otherwise false if done.
1430  */
1431 static bool cdnsp_handle_event(struct cdnsp_device *pdev)
1432 {
1433         unsigned int comp_code;
1434         union cdnsp_trb *event;
1435         bool update_ptrs = true;
1436         u32 cycle_bit;
1437         int ret = 0;
1438         u32 flags;
1439
1440         event = pdev->event_ring->dequeue;
1441         flags = le32_to_cpu(event->event_cmd.flags);
1442         cycle_bit = (flags & TRB_CYCLE);
1443
1444         /* Does the controller or driver own the TRB? */
1445         if (cycle_bit != pdev->event_ring->cycle_state)
1446                 return false;
1447
1448         trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
1449
1450         /*
1451          * Barrier between reading the TRB_CYCLE (valid) flag above and any
1452          * reads of the event's flags/data below.
1453          */
1454         rmb();
1455
1456         switch (flags & TRB_TYPE_BITMASK) {
1457         case TRB_TYPE(TRB_COMPLETION):
1458                 /*
1459                  * Command can't be handled in interrupt context so just
1460                  * increment command ring dequeue pointer.
1461                  */
1462                 cdnsp_inc_deq(pdev, pdev->cmd_ring);
1463                 break;
1464         case TRB_TYPE(TRB_PORT_STATUS):
1465                 cdnsp_handle_port_status(pdev, event);
1466                 update_ptrs = false;
1467                 break;
1468         case TRB_TYPE(TRB_TRANSFER):
1469                 ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
1470                 if (ret >= 0)
1471                         update_ptrs = false;
1472                 break;
1473         case TRB_TYPE(TRB_SETUP):
1474                 pdev->ep0_stage = CDNSP_SETUP_STAGE;
1475                 pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
1476                 pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
1477                 pdev->setup = *((struct usb_ctrlrequest *)
1478                                 &event->trans_event.buffer);
1479
1480                 cdnsp_setup_analyze(pdev);
1481                 break;
1482         case TRB_TYPE(TRB_ENDPOINT_NRDY):
1483                 cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
1484                 break;
1485         case TRB_TYPE(TRB_HC_EVENT): {
1486                 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
1487
1488                 switch (comp_code) {
1489                 case COMP_EVENT_RING_FULL_ERROR:
1490                         dev_err(pdev->dev, "Event Ring Full\n");
1491                         break;
1492                 default:
1493                         dev_err(pdev->dev, "Controller error code 0x%02x\n",
1494                                 comp_code);
1495                 }
1496
1497                 break;
1498         }
1499         case TRB_TYPE(TRB_MFINDEX_WRAP):
1500         case TRB_TYPE(TRB_DRB_OVERFLOW):
1501                 break;
1502         default:
1503                 dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
1504                          TRB_FIELD_TO_TYPE(flags));
1505         }
1506
1507         if (update_ptrs)
1508                 /* Update SW event ring dequeue pointer. */
1509                 cdnsp_inc_deq(pdev, pdev->event_ring);
1510
1511         /*
1512          * Caller will call us again to check if there are more items
1513          * on the event ring.
1514          */
1515         return true;
1516 }
1517
1518 irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
1519 {
1520         struct cdnsp_device *pdev = (struct cdnsp_device *)data;
1521         union cdnsp_trb *event_ring_deq;
1522         unsigned long flags;
1523         int counter = 0;
1524
1525         spin_lock_irqsave(&pdev->lock, flags);
1526
1527         if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
1528                 /*
1529                  * While removing or stopping driver there may still be deferred
1530                  * not handled interrupt which should not be treated as error.
1531                  * Driver should simply ignore it.
1532                  */
1533                 if (pdev->gadget_driver)
1534                         cdnsp_died(pdev);
1535
1536                 spin_unlock_irqrestore(&pdev->lock, flags);
1537                 return IRQ_HANDLED;
1538         }
1539
1540         event_ring_deq = pdev->event_ring->dequeue;
1541
1542         while (cdnsp_handle_event(pdev)) {
1543                 if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
1544                         cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
1545                         event_ring_deq = pdev->event_ring->dequeue;
1546                         counter = 0;
1547                 }
1548         }
1549
1550         cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
1551
1552         spin_unlock_irqrestore(&pdev->lock, flags);
1553
1554         return IRQ_HANDLED;
1555 }
1556
1557 irqreturn_t cdnsp_irq_handler(int irq, void *priv)
1558 {
1559         struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
1560         u32 irq_pending;
1561         u32 status;
1562
1563         status = readl(&pdev->op_regs->status);
1564
1565         if (status == ~(u32)0) {
1566                 cdnsp_died(pdev);
1567                 return IRQ_HANDLED;
1568         }
1569
1570         if (!(status & STS_EINT))
1571                 return IRQ_NONE;
1572
1573         writel(status | STS_EINT, &pdev->op_regs->status);
1574         irq_pending = readl(&pdev->ir_set->irq_pending);
1575         irq_pending |= IMAN_IP;
1576         writel(irq_pending, &pdev->ir_set->irq_pending);
1577
1578         if (status & STS_FATAL) {
1579                 cdnsp_died(pdev);
1580                 return IRQ_HANDLED;
1581         }
1582
1583         return IRQ_WAKE_THREAD;
1584 }
1585
1586 /*
1587  * Generic function for queuing a TRB on a ring.
1588  * The caller must have checked to make sure there's room on the ring.
1589  *
1590  * @more_trbs_coming:   Will you enqueue more TRBs before setting doorbell?
1591  */
1592 static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
1593                             bool more_trbs_coming, u32 field1, u32 field2,
1594                             u32 field3, u32 field4)
1595 {
1596         struct cdnsp_generic_trb *trb;
1597
1598         trb = &ring->enqueue->generic;
1599
1600         trb->field[0] = cpu_to_le32(field1);
1601         trb->field[1] = cpu_to_le32(field2);
1602         trb->field[2] = cpu_to_le32(field3);
1603         trb->field[3] = cpu_to_le32(field4);
1604
1605         trace_cdnsp_queue_trb(ring, trb);
1606         cdnsp_inc_enq(pdev, ring, more_trbs_coming);
1607 }
1608
1609 /*
1610  * Does various checks on the endpoint ring, and makes it ready to
1611  * queue num_trbs.
1612  */
1613 static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
1614                               struct cdnsp_ring *ep_ring,
1615                               u32 ep_state, unsigned
1616                               int num_trbs,
1617                               gfp_t mem_flags)
1618 {
1619         unsigned int num_trbs_needed;
1620
1621         /* Make sure the endpoint has been added to controller schedule. */
1622         switch (ep_state) {
1623         case EP_STATE_STOPPED:
1624         case EP_STATE_RUNNING:
1625         case EP_STATE_HALTED:
1626                 break;
1627         default:
1628                 dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
1629                 return -EINVAL;
1630         }
1631
1632         while (1) {
1633                 if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
1634                         break;
1635
1636                 trace_cdnsp_no_room_on_ring("try ring expansion");
1637
1638                 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
1639                 if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
1640                                          mem_flags)) {
1641                         dev_err(pdev->dev, "Ring expansion failed\n");
1642                         return -ENOMEM;
1643                 }
1644         }
1645
1646         while (cdnsp_trb_is_link(ep_ring->enqueue)) {
1647                 ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
1648                 /* The cycle bit must be set as the last operation. */
1649                 wmb();
1650                 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
1651
1652                 /* Toggle the cycle bit after the last ring segment. */
1653                 if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
1654                         ep_ring->cycle_state ^= 1;
1655                 ep_ring->enq_seg = ep_ring->enq_seg->next;
1656                 ep_ring->enqueue = ep_ring->enq_seg->trbs;
1657         }
1658         return 0;
1659 }
1660
1661 static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
1662                                   struct cdnsp_request *preq,
1663                                   unsigned int num_trbs)
1664 {
1665         struct cdnsp_ring *ep_ring;
1666         int ret;
1667
1668         ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
1669                                           preq->request.stream_id);
1670         if (!ep_ring)
1671                 return -EINVAL;
1672
1673         ret = cdnsp_prepare_ring(pdev, ep_ring,
1674                                  GET_EP_CTX_STATE(preq->pep->out_ctx),
1675                                  num_trbs, GFP_ATOMIC);
1676         if (ret)
1677                 return ret;
1678
1679         INIT_LIST_HEAD(&preq->td.td_list);
1680         preq->td.preq = preq;
1681
1682         /* Add this TD to the tail of the endpoint ring's TD list. */
1683         list_add_tail(&preq->td.td_list, &ep_ring->td_list);
1684         ep_ring->num_tds++;
1685         preq->pep->stream_info.td_count++;
1686
1687         preq->td.start_seg = ep_ring->enq_seg;
1688         preq->td.first_trb = ep_ring->enqueue;
1689
1690         return 0;
1691 }
1692
1693 static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
1694 {
1695         unsigned int num_trbs;
1696
1697         num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
1698                                 TRB_MAX_BUFF_SIZE);
1699         if (num_trbs == 0)
1700                 num_trbs++;
1701
1702         return num_trbs;
1703 }
1704
1705 static unsigned int count_trbs_needed(struct cdnsp_request *preq)
1706 {
1707         return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1708 }
1709
1710 static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
1711 {
1712         unsigned int i, len, full_len, num_trbs = 0;
1713         struct scatterlist *sg;
1714
1715         full_len = preq->request.length;
1716
1717         for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
1718                 len = sg_dma_len(sg);
1719                 num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
1720                 len = min(len, full_len);
1721                 full_len -= len;
1722                 if (full_len == 0)
1723                         break;
1724         }
1725
1726         return num_trbs;
1727 }
1728
1729 static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
1730 {
1731         return cdnsp_count_trbs(preq->request.dma, preq->request.length);
1732 }
1733
1734 static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
1735 {
1736         if (running_total != preq->request.length)
1737                 dev_err(preq->pep->pdev->dev,
1738                         "%s - Miscalculated tx length, "
1739                         "queued %#x, asked for %#x (%d)\n",
1740                         preq->pep->name, running_total,
1741                         preq->request.length, preq->request.actual);
1742 }
1743
1744 /*
1745  * TD size is the number of max packet sized packets remaining in the TD
1746  * (*not* including this TRB).
1747  *
1748  * Total TD packet count = total_packet_count =
1749  *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
1750  *
1751  * Packets transferred up to and including this TRB = packets_transferred =
1752  *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
1753  *
1754  * TD size = total_packet_count - packets_transferred
1755  *
1756  * It must fit in bits 21:17, so it can't be bigger than 31.
1757  * This is taken care of in the TRB_TD_SIZE() macro
1758  *
1759  * The last TRB in a TD must have the TD size set to zero.
1760  */
1761 static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
1762                               int transferred,
1763                               int trb_buff_len,
1764                               unsigned int td_total_len,
1765                               struct cdnsp_request *preq,
1766                               bool more_trbs_coming)
1767 {
1768         u32 maxp, total_packet_count;
1769
1770         /* One TRB with a zero-length data packet. */
1771         if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
1772             trb_buff_len == td_total_len)
1773                 return 0;
1774
1775         maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
1776         total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
1777
1778         /* Queuing functions don't count the current TRB into transferred. */
1779         return (total_packet_count - ((transferred + trb_buff_len) / maxp));
1780 }
1781
1782 static int cdnsp_align_td(struct cdnsp_device *pdev,
1783                           struct cdnsp_request *preq, u32 enqd_len,
1784                           u32 *trb_buff_len, struct cdnsp_segment *seg)
1785 {
1786         struct device *dev = pdev->dev;
1787         unsigned int unalign;
1788         unsigned int max_pkt;
1789         u32 new_buff_len;
1790
1791         max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
1792         unalign = (enqd_len + *trb_buff_len) % max_pkt;
1793
1794         /* We got lucky, last normal TRB data on segment is packet aligned. */
1795         if (unalign == 0)
1796                 return 0;
1797
1798         /* Is the last nornal TRB alignable by splitting it. */
1799         if (*trb_buff_len > unalign) {
1800                 *trb_buff_len -= unalign;
1801                 trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
1802                                                   enqd_len, 0, unalign);
1803                 return 0;
1804         }
1805
1806         /*
1807          * We want enqd_len + trb_buff_len to sum up to a number aligned to
1808          * number which is divisible by the endpoint's wMaxPacketSize. IOW:
1809          * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
1810          */
1811         new_buff_len = max_pkt - (enqd_len % max_pkt);
1812
1813         if (new_buff_len > (preq->request.length - enqd_len))
1814                 new_buff_len = (preq->request.length - enqd_len);
1815
1816         /* Create a max max_pkt sized bounce buffer pointed to by last trb. */
1817         if (preq->direction) {
1818                 sg_pcopy_to_buffer(preq->request.sg,
1819                                    preq->request.num_mapped_sgs,
1820                                    seg->bounce_buf, new_buff_len, enqd_len);
1821                 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1822                                                  max_pkt, DMA_TO_DEVICE);
1823         } else {
1824                 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
1825                                                  max_pkt, DMA_FROM_DEVICE);
1826         }
1827
1828         if (dma_mapping_error(dev, seg->bounce_dma)) {
1829                 /* Try without aligning.*/
1830                 dev_warn(pdev->dev,
1831                          "Failed mapping bounce buffer, not aligning\n");
1832                 return 0;
1833         }
1834
1835         *trb_buff_len = new_buff_len;
1836         seg->bounce_len = new_buff_len;
1837         seg->bounce_offs = enqd_len;
1838
1839         trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
1840                                unalign);
1841
1842         /*
1843          * Bounce buffer successful aligned and seg->bounce_dma will be used
1844          * in transfer TRB as new transfer buffer address.
1845          */
1846         return 1;
1847 }
1848
1849 int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1850 {
1851         unsigned int enqd_len, block_len, trb_buff_len, full_len;
1852         unsigned int start_cycle, num_sgs = 0;
1853         struct cdnsp_generic_trb *start_trb;
1854         u32 field, length_field, remainder;
1855         struct scatterlist *sg = NULL;
1856         bool more_trbs_coming = true;
1857         bool need_zero_pkt = false;
1858         bool zero_len_trb = false;
1859         struct cdnsp_ring *ring;
1860         bool first_trb = true;
1861         unsigned int num_trbs;
1862         struct cdnsp_ep *pep;
1863         u64 addr, send_addr;
1864         int sent_len, ret;
1865
1866         ring = cdnsp_request_to_transfer_ring(pdev, preq);
1867         if (!ring)
1868                 return -EINVAL;
1869
1870         full_len = preq->request.length;
1871
1872         if (preq->request.num_sgs) {
1873                 num_sgs = preq->request.num_sgs;
1874                 sg = preq->request.sg;
1875                 addr = (u64)sg_dma_address(sg);
1876                 block_len = sg_dma_len(sg);
1877                 num_trbs = count_sg_trbs_needed(preq);
1878         } else {
1879                 num_trbs = count_trbs_needed(preq);
1880                 addr = (u64)preq->request.dma;
1881                 block_len = full_len;
1882         }
1883
1884         pep = preq->pep;
1885
1886         /* Deal with request.zero - need one more td/trb. */
1887         if (preq->request.zero && preq->request.length &&
1888             IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
1889                 need_zero_pkt = true;
1890                 num_trbs++;
1891         }
1892
1893         ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
1894         if (ret)
1895                 return ret;
1896
1897         /*
1898          * Don't give the first TRB to the hardware (by toggling the cycle bit)
1899          * until we've finished creating all the other TRBs. The ring's cycle
1900          * state may change as we enqueue the other TRBs, so save it too.
1901          */
1902         start_trb = &ring->enqueue->generic;
1903         start_cycle = ring->cycle_state;
1904         send_addr = addr;
1905
1906         /* Queue the TRBs, even if they are zero-length */
1907         for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
1908              enqd_len += trb_buff_len) {
1909                 field = TRB_TYPE(TRB_NORMAL);
1910
1911                 /* TRB buffer should not cross 64KB boundaries */
1912                 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
1913                 trb_buff_len = min(trb_buff_len, block_len);
1914                 if (enqd_len + trb_buff_len > full_len)
1915                         trb_buff_len = full_len - enqd_len;
1916
1917                 /* Don't change the cycle bit of the first TRB until later */
1918                 if (first_trb) {
1919                         first_trb = false;
1920                         if (start_cycle == 0)
1921                                 field |= TRB_CYCLE;
1922                 } else {
1923                         field |= ring->cycle_state;
1924                 }
1925
1926                 /*
1927                  * Chain all the TRBs together; clear the chain bit in the last
1928                  * TRB to indicate it's the last TRB in the chain.
1929                  */
1930                 if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
1931                         field |= TRB_CHAIN;
1932                         if (cdnsp_trb_is_link(ring->enqueue + 1)) {
1933                                 if (cdnsp_align_td(pdev, preq, enqd_len,
1934                                                    &trb_buff_len,
1935                                                    ring->enq_seg)) {
1936                                         send_addr = ring->enq_seg->bounce_dma;
1937                                         /* Assuming TD won't span 2 segs */
1938                                         preq->td.bounce_seg = ring->enq_seg;
1939                                 }
1940                         }
1941                 }
1942
1943                 if (enqd_len + trb_buff_len >= full_len) {
1944                         if (need_zero_pkt)
1945                                 zero_len_trb = !zero_len_trb;
1946
1947                         field &= ~TRB_CHAIN;
1948                         field |= TRB_IOC;
1949                         more_trbs_coming = false;
1950                         preq->td.last_trb = ring->enqueue;
1951                 }
1952
1953                 /* Only set interrupt on short packet for OUT endpoints. */
1954                 if (!preq->direction)
1955                         field |= TRB_ISP;
1956
1957                 /* Set the TRB length, TD size, and interrupter fields. */
1958                 remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
1959                                                full_len, preq,
1960                                                more_trbs_coming);
1961
1962                 length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
1963                         TRB_INTR_TARGET(0);
1964
1965                 cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb,
1966                                 lower_32_bits(send_addr),
1967                                 upper_32_bits(send_addr),
1968                                 length_field,
1969                                 field);
1970
1971                 addr += trb_buff_len;
1972                 sent_len = trb_buff_len;
1973                 while (sg && sent_len >= block_len) {
1974                         /* New sg entry */
1975                         --num_sgs;
1976                         sent_len -= block_len;
1977                         if (num_sgs != 0) {
1978                                 sg = sg_next(sg);
1979                                 block_len = sg_dma_len(sg);
1980                                 addr = (u64)sg_dma_address(sg);
1981                                 addr += sent_len;
1982                         }
1983                 }
1984                 block_len -= sent_len;
1985                 send_addr = addr;
1986         }
1987
1988         cdnsp_check_trb_math(preq, enqd_len);
1989         ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
1990                                        start_cycle, start_trb);
1991
1992         if (ret)
1993                 preq->td.drbl = 1;
1994
1995         return 0;
1996 }
1997
1998 int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
1999 {
2000         u32 field, length_field, remainder;
2001         struct cdnsp_ep *pep = preq->pep;
2002         struct cdnsp_ring *ep_ring;
2003         int num_trbs;
2004         int ret;
2005
2006         ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
2007         if (!ep_ring)
2008                 return -EINVAL;
2009
2010         /* 1 TRB for data, 1 for status */
2011         num_trbs = (pdev->three_stage_setup) ? 2 : 1;
2012
2013         ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
2014         if (ret)
2015                 return ret;
2016
2017         /* If there's data, queue data TRBs */
2018         if (pdev->ep0_expect_in)
2019                 field = TRB_TYPE(TRB_DATA) | TRB_IOC;
2020         else
2021                 field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
2022
2023         if (preq->request.length > 0) {
2024                 remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
2025                                                preq->request.length, preq, 1);
2026
2027                 length_field = TRB_LEN(preq->request.length) |
2028                                 TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
2029
2030                 if (pdev->ep0_expect_in)
2031                         field |= TRB_DIR_IN;
2032
2033                 cdnsp_queue_trb(pdev, ep_ring, true,
2034                                 lower_32_bits(preq->request.dma),
2035                                 upper_32_bits(preq->request.dma), length_field,
2036                                 field | ep_ring->cycle_state |
2037                                 TRB_SETUPID(pdev->setup_id) |
2038                                 pdev->setup_speed);
2039
2040                 pdev->ep0_stage = CDNSP_DATA_STAGE;
2041         }
2042
2043         /* Save the DMA address of the last TRB in the TD. */
2044         preq->td.last_trb = ep_ring->enqueue;
2045
2046         /* Queue status TRB. */
2047         if (preq->request.length == 0)
2048                 field = ep_ring->cycle_state;
2049         else
2050                 field = (ep_ring->cycle_state ^ 1);
2051
2052         if (preq->request.length > 0 && pdev->ep0_expect_in)
2053                 field |= TRB_DIR_IN;
2054
2055         if (pep->ep_state & EP0_HALTED_STATUS) {
2056                 pep->ep_state &= ~EP0_HALTED_STATUS;
2057                 field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
2058         } else {
2059                 field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
2060         }
2061
2062         cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
2063                         field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
2064                         TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
2065
2066         cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
2067
2068         return 0;
2069 }
2070
2071 int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2072 {
2073         u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
2074         int ret = 0;
2075
2076         if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
2077                 trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
2078                 goto ep_stopped;
2079         }
2080
2081         cdnsp_queue_stop_endpoint(pdev, pep->idx);
2082         cdnsp_ring_cmd_db(pdev);
2083         ret = cdnsp_wait_for_cmd_compl(pdev);
2084
2085         trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
2086
2087 ep_stopped:
2088         pep->ep_state |= EP_STOPPED;
2089         return ret;
2090 }
2091
2092 int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
2093 {
2094         int ret;
2095
2096         cdnsp_queue_flush_endpoint(pdev, pep->idx);
2097         cdnsp_ring_cmd_db(pdev);
2098         ret = cdnsp_wait_for_cmd_compl(pdev);
2099
2100         trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
2101
2102         return ret;
2103 }
2104
2105 /*
2106  * The transfer burst count field of the isochronous TRB defines the number of
2107  * bursts that are required to move all packets in this TD. Only SuperSpeed
2108  * devices can burst up to bMaxBurst number of packets per service interval.
2109  * This field is zero based, meaning a value of zero in the field means one
2110  * burst. Basically, for everything but SuperSpeed devices, this field will be
2111  * zero.
2112  */
2113 static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
2114                                           struct cdnsp_request *preq,
2115                                           unsigned int total_packet_count)
2116 {
2117         unsigned int max_burst;
2118
2119         if (pdev->gadget.speed < USB_SPEED_SUPER)
2120                 return 0;
2121
2122         max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2123         return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
2124 }
2125
2126 /*
2127  * Returns the number of packets in the last "burst" of packets. This field is
2128  * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
2129  * the last burst packet count is equal to the total number of packets in the
2130  * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
2131  * must contain (bMaxBurst + 1) number of packets, but the last burst can
2132  * contain 1 to (bMaxBurst + 1) packets.
2133  */
2134 static unsigned int
2135         cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
2136                                           struct cdnsp_request *preq,
2137                                           unsigned int total_packet_count)
2138 {
2139         unsigned int max_burst;
2140         unsigned int residue;
2141
2142         if (pdev->gadget.speed >= USB_SPEED_SUPER) {
2143                 /* bMaxBurst is zero based: 0 means 1 packet per burst. */
2144                 max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
2145                 residue = total_packet_count % (max_burst + 1);
2146
2147                 /*
2148                  * If residue is zero, the last burst contains (max_burst + 1)
2149                  * number of packets, but the TLBPC field is zero-based.
2150                  */
2151                 if (residue == 0)
2152                         return max_burst;
2153
2154                 return residue - 1;
2155         }
2156         if (total_packet_count == 0)
2157                 return 0;
2158
2159         return total_packet_count - 1;
2160 }
2161
2162 /* Queue function isoc transfer */
2163 static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
2164                                struct cdnsp_request *preq)
2165 {
2166         int trb_buff_len, td_len, td_remain_len, ret;
2167         unsigned int burst_count, last_burst_pkt;
2168         unsigned int total_pkt_count, max_pkt;
2169         struct cdnsp_generic_trb *start_trb;
2170         bool more_trbs_coming = true;
2171         struct cdnsp_ring *ep_ring;
2172         int running_total = 0;
2173         u32 field, length_field;
2174         int start_cycle;
2175         int trbs_per_td;
2176         u64 addr;
2177         int i;
2178
2179         ep_ring = preq->pep->ring;
2180         start_trb = &ep_ring->enqueue->generic;
2181         start_cycle = ep_ring->cycle_state;
2182         td_len = preq->request.length;
2183         addr = (u64)preq->request.dma;
2184         td_remain_len = td_len;
2185
2186         max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
2187         total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
2188
2189         /* A zero-length transfer still involves at least one packet. */
2190         if (total_pkt_count == 0)
2191                 total_pkt_count++;
2192
2193         burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
2194         last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
2195                                                            total_pkt_count);
2196         trbs_per_td = count_isoc_trbs_needed(preq);
2197
2198         ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
2199         if (ret)
2200                 goto cleanup;
2201
2202         /*
2203          * Set isoc specific data for the first TRB in a TD.
2204          * Prevent HW from getting the TRBs by keeping the cycle state
2205          * inverted in the first TDs isoc TRB.
2206          */
2207         field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
2208                 TRB_SIA | TRB_TBC(burst_count);
2209
2210         if (!start_cycle)
2211                 field |= TRB_CYCLE;
2212
2213         /* Fill the rest of the TRB fields, and remaining normal TRBs. */
2214         for (i = 0; i < trbs_per_td; i++) {
2215                 u32 remainder;
2216
2217                 /* Calculate TRB length. */
2218                 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
2219                 if (trb_buff_len > td_remain_len)
2220                         trb_buff_len = td_remain_len;
2221
2222                 /* Set the TRB length, TD size, & interrupter fields. */
2223                 remainder = cdnsp_td_remainder(pdev, running_total,
2224                                                trb_buff_len, td_len, preq,
2225                                                more_trbs_coming);
2226
2227                 length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
2228
2229                 /* Only first TRB is isoc, overwrite otherwise. */
2230                 if (i) {
2231                         field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
2232                         length_field |= TRB_TD_SIZE(remainder);
2233                 } else {
2234                         length_field |= TRB_TD_SIZE_TBC(burst_count);
2235                 }
2236
2237                 /* Only set interrupt on short packet for OUT EPs. */
2238                 if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
2239                         field |= TRB_ISP;
2240
2241                 /* Set the chain bit for all except the last TRB. */
2242                 if (i < trbs_per_td - 1) {
2243                         more_trbs_coming = true;
2244                         field |= TRB_CHAIN;
2245                 } else {
2246                         more_trbs_coming = false;
2247                         preq->td.last_trb = ep_ring->enqueue;
2248                         field |= TRB_IOC;
2249                 }
2250
2251                 cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
2252                                 lower_32_bits(addr), upper_32_bits(addr),
2253                                 length_field, field);
2254
2255                 running_total += trb_buff_len;
2256                 addr += trb_buff_len;
2257                 td_remain_len -= trb_buff_len;
2258         }
2259
2260         /* Check TD length */
2261         if (running_total != td_len) {
2262                 dev_err(pdev->dev, "ISOC TD length unmatch\n");
2263                 ret = -EINVAL;
2264                 goto cleanup;
2265         }
2266
2267         cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
2268                                  start_cycle, start_trb);
2269
2270         return 0;
2271
2272 cleanup:
2273         /* Clean up a partially enqueued isoc transfer. */
2274         list_del_init(&preq->td.td_list);
2275         ep_ring->num_tds--;
2276
2277         /*
2278          * Use the first TD as a temporary variable to turn the TDs we've
2279          * queued into No-ops with a software-owned cycle bit.
2280          * That way the hardware won't accidentally start executing bogus TDs
2281          * when we partially overwrite them.
2282          * td->first_trb and td->start_seg are already set.
2283          */
2284         preq->td.last_trb = ep_ring->enqueue;
2285         /* Every TRB except the first & last will have its cycle bit flipped. */
2286         cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
2287
2288         /* Reset the ring enqueue back to the first TRB and its cycle bit. */
2289         ep_ring->enqueue = preq->td.first_trb;
2290         ep_ring->enq_seg = preq->td.start_seg;
2291         ep_ring->cycle_state = start_cycle;
2292         return ret;
2293 }
2294
2295 int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
2296                                 struct cdnsp_request *preq)
2297 {
2298         struct cdnsp_ring *ep_ring;
2299         u32 ep_state;
2300         int num_trbs;
2301         int ret;
2302
2303         ep_ring = preq->pep->ring;
2304         ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
2305         num_trbs = count_isoc_trbs_needed(preq);
2306
2307         /*
2308          * Check the ring to guarantee there is enough room for the whole
2309          * request. Do not insert any td of the USB Request to the ring if the
2310          * check failed.
2311          */
2312         ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
2313         if (ret)
2314                 return ret;
2315
2316         return cdnsp_queue_isoc_tx(pdev, preq);
2317 }
2318
2319 /****           Command Ring Operations         ****/
2320 /*
2321  * Generic function for queuing a command TRB on the command ring.
2322  * Driver queue only one command to ring in the moment.
2323  */
2324 static void cdnsp_queue_command(struct cdnsp_device *pdev,
2325                                 u32 field1,
2326                                 u32 field2,
2327                                 u32 field3,
2328                                 u32 field4)
2329 {
2330         cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
2331                            GFP_ATOMIC);
2332
2333         pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
2334
2335         cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
2336                         field3, field4 | pdev->cmd_ring->cycle_state);
2337 }
2338
2339 /* Queue a slot enable or disable request on the command ring */
2340 void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
2341 {
2342         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
2343                             SLOT_ID_FOR_TRB(pdev->slot_id));
2344 }
2345
2346 /* Queue an address device command TRB */
2347 void cdnsp_queue_address_device(struct cdnsp_device *pdev,
2348                                 dma_addr_t in_ctx_ptr,
2349                                 enum cdnsp_setup_dev setup)
2350 {
2351         cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2352                             upper_32_bits(in_ctx_ptr), 0,
2353                             TRB_TYPE(TRB_ADDR_DEV) |
2354                             SLOT_ID_FOR_TRB(pdev->slot_id) |
2355                             (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
2356 }
2357
2358 /* Queue a reset device command TRB */
2359 void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
2360 {
2361         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
2362                             SLOT_ID_FOR_TRB(pdev->slot_id));
2363 }
2364
2365 /* Queue a configure endpoint command TRB */
2366 void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
2367                                     dma_addr_t in_ctx_ptr)
2368 {
2369         cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
2370                             upper_32_bits(in_ctx_ptr), 0,
2371                             TRB_TYPE(TRB_CONFIG_EP) |
2372                             SLOT_ID_FOR_TRB(pdev->slot_id));
2373 }
2374
2375 /*
2376  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
2377  * activity on an endpoint that is about to be suspended.
2378  */
2379 void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2380 {
2381         cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
2382                             EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
2383 }
2384
2385 /* Set Transfer Ring Dequeue Pointer command. */
2386 void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
2387                                    struct cdnsp_ep *pep,
2388                                    struct cdnsp_dequeue_state *deq_state)
2389 {
2390         u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
2391         u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
2392         u32 type = TRB_TYPE(TRB_SET_DEQ);
2393         u32 trb_sct = 0;
2394         dma_addr_t addr;
2395
2396         addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
2397                                      deq_state->new_deq_ptr);
2398
2399         if (deq_state->stream_id)
2400                 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
2401
2402         cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
2403                             deq_state->new_cycle_state, upper_32_bits(addr),
2404                             trb_stream_id, trb_slot_id |
2405                             EP_ID_FOR_TRB(pep->idx) | type);
2406 }
2407
2408 void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
2409 {
2410         return cdnsp_queue_command(pdev, 0, 0, 0,
2411                                    SLOT_ID_FOR_TRB(pdev->slot_id) |
2412                                    EP_ID_FOR_TRB(ep_index) |
2413                                    TRB_TYPE(TRB_RESET_EP));
2414 }
2415
2416 /*
2417  * Queue a halt endpoint request on the command ring.
2418  */
2419 void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
2420 {
2421         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
2422                             SLOT_ID_FOR_TRB(pdev->slot_id) |
2423                             EP_ID_FOR_TRB(ep_index));
2424 }
2425
2426 /*
2427  * Queue a flush endpoint request on the command ring.
2428  */
2429 void  cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
2430                                  unsigned int ep_index)
2431 {
2432         cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
2433                             SLOT_ID_FOR_TRB(pdev->slot_id) |
2434                             EP_ID_FOR_TRB(ep_index));
2435 }
2436
2437 void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
2438 {
2439         u32 lo, mid;
2440
2441         lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
2442              TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
2443         mid = TRB_FH_TR_PACKET_DEV_NOT |
2444               TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
2445               TRB_FH_TO_INTERFACE(intf_num);
2446
2447         cdnsp_queue_command(pdev, lo, mid, 0,
2448                             TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
2449 }