xhci: dbc: Don't use generic xhci context allocation for dbc
[platform/kernel/linux-rpi.git] / drivers / usb / host / xhci-dbgcap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgcap.c - xHCI debug capability support
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
12
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
16
17 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
18 {
19         if (!ctx)
20                 return;
21         dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
22         kfree(ctx);
23 }
24
25 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
26 {
27         struct usb_string_descriptor    *s_desc;
28         u32                             string_length;
29
30         /* Serial string: */
31         s_desc = (struct usb_string_descriptor *)strings->serial;
32         utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
33                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
34                         DBC_MAX_STRING_LENGTH);
35
36         s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
37         s_desc->bDescriptorType = USB_DT_STRING;
38         string_length           = s_desc->bLength;
39         string_length           <<= 8;
40
41         /* Product string: */
42         s_desc = (struct usb_string_descriptor *)strings->product;
43         utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
44                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
45                         DBC_MAX_STRING_LENGTH);
46
47         s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
48         s_desc->bDescriptorType = USB_DT_STRING;
49         string_length           += s_desc->bLength;
50         string_length           <<= 8;
51
52         /* Manufacture string: */
53         s_desc = (struct usb_string_descriptor *)strings->manufacturer;
54         utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
55                         strlen(DBC_STRING_MANUFACTURER),
56                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
57                         DBC_MAX_STRING_LENGTH);
58
59         s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
60         s_desc->bDescriptorType = USB_DT_STRING;
61         string_length           += s_desc->bLength;
62         string_length           <<= 8;
63
64         /* String0: */
65         strings->string0[0]     = 4;
66         strings->string0[1]     = USB_DT_STRING;
67         strings->string0[2]     = 0x09;
68         strings->string0[3]     = 0x04;
69         string_length           += 4;
70
71         return string_length;
72 }
73
74 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
75 {
76         struct dbc_info_context *info;
77         struct xhci_ep_ctx      *ep_ctx;
78         u32                     dev_info;
79         dma_addr_t              deq, dma;
80         unsigned int            max_burst;
81
82         if (!dbc)
83                 return;
84
85         /* Populate info Context: */
86         info                    = (struct dbc_info_context *)dbc->ctx->bytes;
87         dma                     = dbc->string_dma;
88         info->string0           = cpu_to_le64(dma);
89         info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
90         info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
91         info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
92         info->length            = cpu_to_le32(string_length);
93
94         /* Populate bulk out endpoint context: */
95         ep_ctx                  = dbc_bulkout_ctx(dbc);
96         max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
97         deq                     = dbc_bulkout_enq(dbc);
98         ep_ctx->ep_info         = 0;
99         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
100         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
101
102         /* Populate bulk in endpoint context: */
103         ep_ctx                  = dbc_bulkin_ctx(dbc);
104         deq                     = dbc_bulkin_enq(dbc);
105         ep_ctx->ep_info         = 0;
106         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
107         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
108
109         /* Set DbC context and info registers: */
110         lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
111
112         dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
113         writel(dev_info, &dbc->regs->devinfo1);
114
115         dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
116         writel(dev_info, &dbc->regs->devinfo2);
117 }
118
119 static void xhci_dbc_giveback(struct dbc_request *req, int status)
120         __releases(&dbc->lock)
121         __acquires(&dbc->lock)
122 {
123         struct dbc_ep           *dep = req->dep;
124         struct xhci_dbc         *dbc = dep->dbc;
125         struct device           *dev = dbc->dev;
126
127         list_del_init(&req->list_pending);
128         req->trb_dma = 0;
129         req->trb = NULL;
130
131         if (req->status == -EINPROGRESS)
132                 req->status = status;
133
134         trace_xhci_dbc_giveback_request(req);
135
136         dma_unmap_single(dev,
137                          req->dma,
138                          req->length,
139                          dbc_ep_dma_direction(dep));
140
141         /* Give back the transfer request: */
142         spin_unlock(&dbc->lock);
143         req->complete(dbc, req);
144         spin_lock(&dbc->lock);
145 }
146
147 static void xhci_dbc_flush_single_request(struct dbc_request *req)
148 {
149         union xhci_trb  *trb = req->trb;
150
151         trb->generic.field[0]   = 0;
152         trb->generic.field[1]   = 0;
153         trb->generic.field[2]   = 0;
154         trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
155         trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
156
157         xhci_dbc_giveback(req, -ESHUTDOWN);
158 }
159
160 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
161 {
162         struct dbc_request      *req, *tmp;
163
164         list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
165                 xhci_dbc_flush_single_request(req);
166 }
167
168 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
169 {
170         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
171         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
172 }
173
174 struct dbc_request *
175 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
176 {
177         struct dbc_request      *req;
178
179         req = kzalloc(sizeof(*req), gfp_flags);
180         if (!req)
181                 return NULL;
182
183         req->dep = dep;
184         INIT_LIST_HEAD(&req->list_pending);
185         INIT_LIST_HEAD(&req->list_pool);
186         req->direction = dep->direction;
187
188         trace_xhci_dbc_alloc_request(req);
189
190         return req;
191 }
192
193 void
194 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
195 {
196         trace_xhci_dbc_free_request(req);
197
198         kfree(req);
199 }
200
201 static void
202 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
203                    u32 field2, u32 field3, u32 field4)
204 {
205         union xhci_trb          *trb, *next;
206
207         trb = ring->enqueue;
208         trb->generic.field[0]   = cpu_to_le32(field1);
209         trb->generic.field[1]   = cpu_to_le32(field2);
210         trb->generic.field[2]   = cpu_to_le32(field3);
211         trb->generic.field[3]   = cpu_to_le32(field4);
212
213         trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
214
215         ring->num_trbs_free--;
216         next = ++(ring->enqueue);
217         if (TRB_TYPE_LINK_LE32(next->link.control)) {
218                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
219                 ring->enqueue = ring->enq_seg->trbs;
220                 ring->cycle_state ^= 1;
221         }
222 }
223
224 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
225                                   struct dbc_request *req)
226 {
227         u64                     addr;
228         union xhci_trb          *trb;
229         unsigned int            num_trbs;
230         struct xhci_dbc         *dbc = dep->dbc;
231         struct xhci_ring        *ring = dep->ring;
232         u32                     length, control, cycle;
233
234         num_trbs = count_trbs(req->dma, req->length);
235         WARN_ON(num_trbs != 1);
236         if (ring->num_trbs_free < num_trbs)
237                 return -EBUSY;
238
239         addr    = req->dma;
240         trb     = ring->enqueue;
241         cycle   = ring->cycle_state;
242         length  = TRB_LEN(req->length);
243         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
244
245         if (cycle)
246                 control &= cpu_to_le32(~TRB_CYCLE);
247         else
248                 control |= cpu_to_le32(TRB_CYCLE);
249
250         req->trb = ring->enqueue;
251         req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
252         xhci_dbc_queue_trb(ring,
253                            lower_32_bits(addr),
254                            upper_32_bits(addr),
255                            length, control);
256
257         /*
258          * Add a barrier between writes of trb fields and flipping
259          * the cycle bit:
260          */
261         wmb();
262
263         if (cycle)
264                 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
265         else
266                 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
267
268         writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
269
270         return 0;
271 }
272
273 static int
274 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
275 {
276         int                     ret;
277         struct xhci_dbc         *dbc = dep->dbc;
278         struct device           *dev = dbc->dev;
279
280         if (!req->length || !req->buf)
281                 return -EINVAL;
282
283         req->actual             = 0;
284         req->status             = -EINPROGRESS;
285
286         req->dma = dma_map_single(dev,
287                                   req->buf,
288                                   req->length,
289                                   dbc_ep_dma_direction(dep));
290         if (dma_mapping_error(dev, req->dma)) {
291                 dev_err(dbc->dev, "failed to map buffer\n");
292                 return -EFAULT;
293         }
294
295         ret = xhci_dbc_queue_bulk_tx(dep, req);
296         if (ret) {
297                 dev_err(dbc->dev, "failed to queue trbs\n");
298                 dma_unmap_single(dev,
299                                  req->dma,
300                                  req->length,
301                                  dbc_ep_dma_direction(dep));
302                 return -EFAULT;
303         }
304
305         list_add_tail(&req->list_pending, &dep->list_pending);
306
307         return 0;
308 }
309
310 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
311                  gfp_t gfp_flags)
312 {
313         unsigned long           flags;
314         struct xhci_dbc         *dbc = dep->dbc;
315         int                     ret = -ESHUTDOWN;
316
317         spin_lock_irqsave(&dbc->lock, flags);
318         if (dbc->state == DS_CONFIGURED)
319                 ret = dbc_ep_do_queue(dep, req);
320         spin_unlock_irqrestore(&dbc->lock, flags);
321
322         mod_delayed_work(system_wq, &dbc->event_work, 0);
323
324         trace_xhci_dbc_queue_request(req);
325
326         return ret;
327 }
328
329 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
330 {
331         struct dbc_ep           *dep;
332
333         dep                     = &dbc->eps[direction];
334         dep->dbc                = dbc;
335         dep->direction          = direction;
336         dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
337
338         INIT_LIST_HEAD(&dep->list_pending);
339 }
340
341 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
342 {
343         xhci_dbc_do_eps_init(dbc, BULK_OUT);
344         xhci_dbc_do_eps_init(dbc, BULK_IN);
345 }
346
347 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
348 {
349         memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
350 }
351
352 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
353                     struct xhci_erst *erst, gfp_t flags)
354 {
355         erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
356                                            &erst->erst_dma_addr, flags);
357         if (!erst->entries)
358                 return -ENOMEM;
359
360         erst->num_entries = 1;
361         erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
362         erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
363         erst->entries[0].rsvd = 0;
364         return 0;
365 }
366
367 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
368 {
369         if (erst->entries)
370                 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
371                                   erst->entries, erst->erst_dma_addr);
372         erst->entries = NULL;
373 }
374
375 static struct xhci_container_ctx *
376 dbc_alloc_ctx(struct device *dev, gfp_t flags)
377 {
378         struct xhci_container_ctx *ctx;
379
380         ctx = kzalloc(sizeof(*ctx), flags);
381         if (!ctx)
382                 return NULL;
383
384         /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
385         ctx->size = 3 * DBC_CONTEXT_SIZE;
386         ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
387         if (!ctx->bytes) {
388                 kfree(ctx);
389                 return NULL;
390         }
391         return ctx;
392 }
393
394 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
395 {
396         int                     ret;
397         dma_addr_t              deq;
398         u32                     string_length;
399         struct xhci_dbc         *dbc = xhci->dbc;
400         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
401
402         /* Allocate various rings for events and transfers: */
403         dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
404         if (!dbc->ring_evt)
405                 goto evt_fail;
406
407         dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
408         if (!dbc->ring_in)
409                 goto in_fail;
410
411         dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
412         if (!dbc->ring_out)
413                 goto out_fail;
414
415         /* Allocate and populate ERST: */
416         ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
417         if (ret)
418                 goto erst_fail;
419
420         /* Allocate context data structure: */
421         dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
422         if (!dbc->ctx)
423                 goto ctx_fail;
424
425         /* Allocate the string table: */
426         dbc->string_size = sizeof(struct dbc_str_descs);
427         dbc->string = dma_alloc_coherent(dev, dbc->string_size,
428                                          &dbc->string_dma, flags);
429         if (!dbc->string)
430                 goto string_fail;
431
432         /* Setup ERST register: */
433         writel(dbc->erst.erst_size, &dbc->regs->ersts);
434
435         lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
436         deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
437                                    dbc->ring_evt->dequeue);
438         lo_hi_writeq(deq, &dbc->regs->erdp);
439
440         /* Setup strings and contexts: */
441         string_length = xhci_dbc_populate_strings(dbc->string);
442         xhci_dbc_init_contexts(dbc, string_length);
443
444         xhci_dbc_eps_init(dbc);
445         dbc->state = DS_INITIALIZED;
446
447         return 0;
448
449 string_fail:
450         dbc_free_ctx(dev, dbc->ctx);
451         dbc->ctx = NULL;
452 ctx_fail:
453         dbc_erst_free(dev, &dbc->erst);
454 erst_fail:
455         xhci_ring_free(xhci, dbc->ring_out);
456         dbc->ring_out = NULL;
457 out_fail:
458         xhci_ring_free(xhci, dbc->ring_in);
459         dbc->ring_in = NULL;
460 in_fail:
461         xhci_ring_free(xhci, dbc->ring_evt);
462         dbc->ring_evt = NULL;
463 evt_fail:
464         return -ENOMEM;
465 }
466
467 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
468 {
469         struct xhci_dbc         *dbc = xhci->dbc;
470         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
471
472         if (!dbc)
473                 return;
474
475         xhci_dbc_eps_exit(dbc);
476
477         if (dbc->string) {
478                 dma_free_coherent(dbc->dev, dbc->string_size,
479                                   dbc->string, dbc->string_dma);
480                 dbc->string = NULL;
481         }
482
483         dbc_free_ctx(dbc->dev, dbc->ctx);
484         dbc->ctx = NULL;
485
486         dbc_erst_free(dev, &dbc->erst);
487         xhci_ring_free(xhci, dbc->ring_out);
488         xhci_ring_free(xhci, dbc->ring_in);
489         xhci_ring_free(xhci, dbc->ring_evt);
490         dbc->ring_in = NULL;
491         dbc->ring_out = NULL;
492         dbc->ring_evt = NULL;
493 }
494
495 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
496 {
497         int                     ret;
498         u32                     ctrl;
499         struct xhci_dbc         *dbc = xhci->dbc;
500
501         if (dbc->state != DS_DISABLED)
502                 return -EINVAL;
503
504         writel(0, &dbc->regs->control);
505         ret = xhci_handshake(&dbc->regs->control,
506                              DBC_CTRL_DBC_ENABLE,
507                              0, 1000);
508         if (ret)
509                 return ret;
510
511         ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
512         if (ret)
513                 return ret;
514
515         ctrl = readl(&dbc->regs->control);
516         writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
517                &dbc->regs->control);
518         ret = xhci_handshake(&dbc->regs->control,
519                              DBC_CTRL_DBC_ENABLE,
520                              DBC_CTRL_DBC_ENABLE, 1000);
521         if (ret)
522                 return ret;
523
524         dbc->state = DS_ENABLED;
525
526         return 0;
527 }
528
529 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
530 {
531         if (dbc->state == DS_DISABLED)
532                 return -1;
533
534         writel(0, &dbc->regs->control);
535         dbc->state = DS_DISABLED;
536
537         return 0;
538 }
539
540 static int xhci_dbc_start(struct xhci_hcd *xhci)
541 {
542         int                     ret;
543         unsigned long           flags;
544         struct xhci_dbc         *dbc = xhci->dbc;
545
546         WARN_ON(!dbc);
547
548         pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
549
550         spin_lock_irqsave(&dbc->lock, flags);
551         ret = xhci_do_dbc_start(xhci);
552         spin_unlock_irqrestore(&dbc->lock, flags);
553
554         if (ret) {
555                 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
556                 return ret;
557         }
558
559         return mod_delayed_work(system_wq, &dbc->event_work, 1);
560 }
561
562 static void xhci_dbc_stop(struct xhci_hcd *xhci)
563 {
564         int ret;
565         unsigned long           flags;
566         struct xhci_dbc         *dbc = xhci->dbc;
567         struct dbc_port         *port = &dbc->port;
568
569         WARN_ON(!dbc);
570
571         cancel_delayed_work_sync(&dbc->event_work);
572
573         if (port->registered)
574                 xhci_dbc_tty_unregister_device(dbc);
575
576         spin_lock_irqsave(&dbc->lock, flags);
577         ret = xhci_do_dbc_stop(dbc);
578         spin_unlock_irqrestore(&dbc->lock, flags);
579
580         if (!ret) {
581                 xhci_dbc_mem_cleanup(xhci);
582                 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
583         }
584 }
585
586 static void
587 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
588 {
589         u32                     portsc;
590
591         portsc = readl(&dbc->regs->portsc);
592         if (portsc & DBC_PORTSC_CONN_CHANGE)
593                 dev_info(dbc->dev, "DbC port connect change\n");
594
595         if (portsc & DBC_PORTSC_RESET_CHANGE)
596                 dev_info(dbc->dev, "DbC port reset change\n");
597
598         if (portsc & DBC_PORTSC_LINK_CHANGE)
599                 dev_info(dbc->dev, "DbC port link status change\n");
600
601         if (portsc & DBC_PORTSC_CONFIG_CHANGE)
602                 dev_info(dbc->dev, "DbC config error change\n");
603
604         /* Port reset change bit will be cleared in other place: */
605         writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
606 }
607
608 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
609 {
610         struct dbc_ep           *dep;
611         struct xhci_ring        *ring;
612         int                     ep_id;
613         int                     status;
614         u32                     comp_code;
615         size_t                  remain_length;
616         struct dbc_request      *req = NULL, *r;
617
618         comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
619         remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
620         ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
621         dep             = (ep_id == EPID_OUT) ?
622                                 get_out_ep(dbc) : get_in_ep(dbc);
623         ring            = dep->ring;
624
625         switch (comp_code) {
626         case COMP_SUCCESS:
627                 remain_length = 0;
628         /* FALLTHROUGH */
629         case COMP_SHORT_PACKET:
630                 status = 0;
631                 break;
632         case COMP_TRB_ERROR:
633         case COMP_BABBLE_DETECTED_ERROR:
634         case COMP_USB_TRANSACTION_ERROR:
635         case COMP_STALL_ERROR:
636                 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
637                 status = -comp_code;
638                 break;
639         default:
640                 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
641                 status = -comp_code;
642                 break;
643         }
644
645         /* Match the pending request: */
646         list_for_each_entry(r, &dep->list_pending, list_pending) {
647                 if (r->trb_dma == event->trans_event.buffer) {
648                         req = r;
649                         break;
650                 }
651         }
652
653         if (!req) {
654                 dev_warn(dbc->dev, "no matched request\n");
655                 return;
656         }
657
658         trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
659
660         ring->num_trbs_free++;
661         req->actual = req->length - remain_length;
662         xhci_dbc_giveback(req, status);
663 }
664
665 static void inc_evt_deq(struct xhci_ring *ring)
666 {
667         /* If on the last TRB of the segment go back to the beginning */
668         if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
669                 ring->cycle_state ^= 1;
670                 ring->dequeue = ring->deq_seg->trbs;
671                 return;
672         }
673         ring->dequeue++;
674 }
675
676 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
677 {
678         dma_addr_t              deq;
679         struct dbc_ep           *dep;
680         union xhci_trb          *evt;
681         u32                     ctrl, portsc;
682         bool                    update_erdp = false;
683
684         /* DbC state machine: */
685         switch (dbc->state) {
686         case DS_DISABLED:
687         case DS_INITIALIZED:
688
689                 return EVT_ERR;
690         case DS_ENABLED:
691                 portsc = readl(&dbc->regs->portsc);
692                 if (portsc & DBC_PORTSC_CONN_STATUS) {
693                         dbc->state = DS_CONNECTED;
694                         dev_info(dbc->dev, "DbC connected\n");
695                 }
696
697                 return EVT_DONE;
698         case DS_CONNECTED:
699                 ctrl = readl(&dbc->regs->control);
700                 if (ctrl & DBC_CTRL_DBC_RUN) {
701                         dbc->state = DS_CONFIGURED;
702                         dev_info(dbc->dev, "DbC configured\n");
703                         portsc = readl(&dbc->regs->portsc);
704                         writel(portsc, &dbc->regs->portsc);
705                         return EVT_GSER;
706                 }
707
708                 return EVT_DONE;
709         case DS_CONFIGURED:
710                 /* Handle cable unplug event: */
711                 portsc = readl(&dbc->regs->portsc);
712                 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
713                     !(portsc & DBC_PORTSC_CONN_STATUS)) {
714                         dev_info(dbc->dev, "DbC cable unplugged\n");
715                         dbc->state = DS_ENABLED;
716                         xhci_dbc_flush_requests(dbc);
717
718                         return EVT_DISC;
719                 }
720
721                 /* Handle debug port reset event: */
722                 if (portsc & DBC_PORTSC_RESET_CHANGE) {
723                         dev_info(dbc->dev, "DbC port reset\n");
724                         writel(portsc, &dbc->regs->portsc);
725                         dbc->state = DS_ENABLED;
726                         xhci_dbc_flush_requests(dbc);
727
728                         return EVT_DISC;
729                 }
730
731                 /* Handle endpoint stall event: */
732                 ctrl = readl(&dbc->regs->control);
733                 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
734                     (ctrl & DBC_CTRL_HALT_OUT_TR)) {
735                         dev_info(dbc->dev, "DbC Endpoint stall\n");
736                         dbc->state = DS_STALLED;
737
738                         if (ctrl & DBC_CTRL_HALT_IN_TR) {
739                                 dep = get_in_ep(dbc);
740                                 xhci_dbc_flush_endpoint_requests(dep);
741                         }
742
743                         if (ctrl & DBC_CTRL_HALT_OUT_TR) {
744                                 dep = get_out_ep(dbc);
745                                 xhci_dbc_flush_endpoint_requests(dep);
746                         }
747
748                         return EVT_DONE;
749                 }
750
751                 /* Clear DbC run change bit: */
752                 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
753                         writel(ctrl, &dbc->regs->control);
754                         ctrl = readl(&dbc->regs->control);
755                 }
756
757                 break;
758         case DS_STALLED:
759                 ctrl = readl(&dbc->regs->control);
760                 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
761                     !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
762                     (ctrl & DBC_CTRL_DBC_RUN)) {
763                         dbc->state = DS_CONFIGURED;
764                         break;
765                 }
766
767                 return EVT_DONE;
768         default:
769                 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
770                 break;
771         }
772
773         /* Handle the events in the event ring: */
774         evt = dbc->ring_evt->dequeue;
775         while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
776                         dbc->ring_evt->cycle_state) {
777                 /*
778                  * Add a barrier between reading the cycle flag and any
779                  * reads of the event's flags/data below:
780                  */
781                 rmb();
782
783                 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
784
785                 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
786                 case TRB_TYPE(TRB_PORT_STATUS):
787                         dbc_handle_port_status(dbc, evt);
788                         break;
789                 case TRB_TYPE(TRB_TRANSFER):
790                         dbc_handle_xfer_event(dbc, evt);
791                         break;
792                 default:
793                         break;
794                 }
795
796                 inc_evt_deq(dbc->ring_evt);
797
798                 evt = dbc->ring_evt->dequeue;
799                 update_erdp = true;
800         }
801
802         /* Update event ring dequeue pointer: */
803         if (update_erdp) {
804                 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
805                                            dbc->ring_evt->dequeue);
806                 lo_hi_writeq(deq, &dbc->regs->erdp);
807         }
808
809         return EVT_DONE;
810 }
811
812 static void xhci_dbc_handle_events(struct work_struct *work)
813 {
814         int                     ret;
815         enum evtreturn          evtr;
816         struct xhci_dbc         *dbc;
817         unsigned long           flags;
818
819         dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
820
821         spin_lock_irqsave(&dbc->lock, flags);
822         evtr = xhci_dbc_do_handle_events(dbc);
823         spin_unlock_irqrestore(&dbc->lock, flags);
824
825         switch (evtr) {
826         case EVT_GSER:
827                 ret = xhci_dbc_tty_register_device(dbc);
828                 if (ret) {
829                         dev_err(dbc->dev, "failed to alloc tty device\n");
830                         break;
831                 }
832
833                 dev_info(dbc->dev, "DbC now attached to /dev/ttyDBC0\n");
834                 break;
835         case EVT_DISC:
836                 xhci_dbc_tty_unregister_device(dbc);
837                 break;
838         case EVT_DONE:
839                 break;
840         default:
841                 dev_info(dbc->dev, "stop handling dbc events\n");
842                 return;
843         }
844
845         mod_delayed_work(system_wq, &dbc->event_work, 1);
846 }
847
848 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
849 {
850         unsigned long           flags;
851
852         spin_lock_irqsave(&xhci->lock, flags);
853         kfree(xhci->dbc);
854         xhci->dbc = NULL;
855         spin_unlock_irqrestore(&xhci->lock, flags);
856 }
857
858 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
859 {
860         u32                     reg;
861         struct xhci_dbc         *dbc;
862         unsigned long           flags;
863         void __iomem            *base;
864         int                     dbc_cap_offs;
865
866         base = &xhci->cap_regs->hc_capbase;
867         dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
868         if (!dbc_cap_offs)
869                 return -ENODEV;
870
871         dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
872         if (!dbc)
873                 return -ENOMEM;
874
875         dbc->regs = base + dbc_cap_offs;
876
877         /* We will avoid using DbC in xhci driver if it's in use. */
878         reg = readl(&dbc->regs->control);
879         if (reg & DBC_CTRL_DBC_ENABLE) {
880                 kfree(dbc);
881                 return -EBUSY;
882         }
883
884         spin_lock_irqsave(&xhci->lock, flags);
885         if (xhci->dbc) {
886                 spin_unlock_irqrestore(&xhci->lock, flags);
887                 kfree(dbc);
888                 return -EBUSY;
889         }
890         xhci->dbc = dbc;
891         spin_unlock_irqrestore(&xhci->lock, flags);
892
893         dbc->xhci = xhci;
894         dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
895         INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
896         spin_lock_init(&dbc->lock);
897
898         return 0;
899 }
900
901 static ssize_t dbc_show(struct device *dev,
902                         struct device_attribute *attr,
903                         char *buf)
904 {
905         const char              *p;
906         struct xhci_dbc         *dbc;
907         struct xhci_hcd         *xhci;
908
909         xhci = hcd_to_xhci(dev_get_drvdata(dev));
910         dbc = xhci->dbc;
911
912         switch (dbc->state) {
913         case DS_DISABLED:
914                 p = "disabled";
915                 break;
916         case DS_INITIALIZED:
917                 p = "initialized";
918                 break;
919         case DS_ENABLED:
920                 p = "enabled";
921                 break;
922         case DS_CONNECTED:
923                 p = "connected";
924                 break;
925         case DS_CONFIGURED:
926                 p = "configured";
927                 break;
928         case DS_STALLED:
929                 p = "stalled";
930                 break;
931         default:
932                 p = "unknown";
933         }
934
935         return sprintf(buf, "%s\n", p);
936 }
937
938 static ssize_t dbc_store(struct device *dev,
939                          struct device_attribute *attr,
940                          const char *buf, size_t count)
941 {
942         struct xhci_hcd         *xhci;
943
944         xhci = hcd_to_xhci(dev_get_drvdata(dev));
945
946         if (!strncmp(buf, "enable", 6))
947                 xhci_dbc_start(xhci);
948         else if (!strncmp(buf, "disable", 7))
949                 xhci_dbc_stop(xhci);
950         else
951                 return -EINVAL;
952
953         return count;
954 }
955
956 static DEVICE_ATTR_RW(dbc);
957
958 int xhci_dbc_init(struct xhci_hcd *xhci)
959 {
960         int                     ret;
961         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
962
963         ret = xhci_do_dbc_init(xhci);
964         if (ret)
965                 goto init_err3;
966
967         ret = xhci_dbc_tty_register_driver(xhci);
968         if (ret)
969                 goto init_err2;
970
971         ret = device_create_file(dev, &dev_attr_dbc);
972         if (ret)
973                 goto init_err1;
974
975         return 0;
976
977 init_err1:
978         xhci_dbc_tty_unregister_driver();
979 init_err2:
980         xhci_do_dbc_exit(xhci);
981 init_err3:
982         return ret;
983 }
984
985 void xhci_dbc_exit(struct xhci_hcd *xhci)
986 {
987         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
988
989         if (!xhci->dbc)
990                 return;
991
992         device_remove_file(dev, &dev_attr_dbc);
993         xhci_dbc_tty_unregister_driver();
994         xhci_dbc_stop(xhci);
995         xhci_do_dbc_exit(xhci);
996 }
997
998 #ifdef CONFIG_PM
999 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1000 {
1001         struct xhci_dbc         *dbc = xhci->dbc;
1002
1003         if (!dbc)
1004                 return 0;
1005
1006         if (dbc->state == DS_CONFIGURED)
1007                 dbc->resume_required = 1;
1008
1009         xhci_dbc_stop(xhci);
1010
1011         return 0;
1012 }
1013
1014 int xhci_dbc_resume(struct xhci_hcd *xhci)
1015 {
1016         int                     ret = 0;
1017         struct xhci_dbc         *dbc = xhci->dbc;
1018
1019         if (!dbc)
1020                 return 0;
1021
1022         if (dbc->resume_required) {
1023                 dbc->resume_required = 0;
1024                 xhci_dbc_start(xhci);
1025         }
1026
1027         return ret;
1028 }
1029 #endif /* CONFIG_PM */