xhci: dbc: Get the device pointer from dbc structure in dbc_ep_do_queue()
[platform/kernel/linux-rpi.git] / drivers / usb / host / xhci-dbgcap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgcap.c - xHCI debug capability support
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
12
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
16
17 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
18 {
19         struct usb_string_descriptor    *s_desc;
20         u32                             string_length;
21
22         /* Serial string: */
23         s_desc = (struct usb_string_descriptor *)strings->serial;
24         utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
25                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
26                         DBC_MAX_STRING_LENGTH);
27
28         s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
29         s_desc->bDescriptorType = USB_DT_STRING;
30         string_length           = s_desc->bLength;
31         string_length           <<= 8;
32
33         /* Product string: */
34         s_desc = (struct usb_string_descriptor *)strings->product;
35         utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
36                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
37                         DBC_MAX_STRING_LENGTH);
38
39         s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
40         s_desc->bDescriptorType = USB_DT_STRING;
41         string_length           += s_desc->bLength;
42         string_length           <<= 8;
43
44         /* Manufacture string: */
45         s_desc = (struct usb_string_descriptor *)strings->manufacturer;
46         utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
47                         strlen(DBC_STRING_MANUFACTURER),
48                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49                         DBC_MAX_STRING_LENGTH);
50
51         s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
52         s_desc->bDescriptorType = USB_DT_STRING;
53         string_length           += s_desc->bLength;
54         string_length           <<= 8;
55
56         /* String0: */
57         strings->string0[0]     = 4;
58         strings->string0[1]     = USB_DT_STRING;
59         strings->string0[2]     = 0x09;
60         strings->string0[3]     = 0x04;
61         string_length           += 4;
62
63         return string_length;
64 }
65
66 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
67 {
68         struct dbc_info_context *info;
69         struct xhci_ep_ctx      *ep_ctx;
70         u32                     dev_info;
71         dma_addr_t              deq, dma;
72         unsigned int            max_burst;
73
74         if (!dbc)
75                 return;
76
77         /* Populate info Context: */
78         info                    = (struct dbc_info_context *)dbc->ctx->bytes;
79         dma                     = dbc->string_dma;
80         info->string0           = cpu_to_le64(dma);
81         info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
82         info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
83         info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
84         info->length            = cpu_to_le32(string_length);
85
86         /* Populate bulk out endpoint context: */
87         ep_ctx                  = dbc_bulkout_ctx(dbc);
88         max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
89         deq                     = dbc_bulkout_enq(dbc);
90         ep_ctx->ep_info         = 0;
91         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
92         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
93
94         /* Populate bulk in endpoint context: */
95         ep_ctx                  = dbc_bulkin_ctx(dbc);
96         deq                     = dbc_bulkin_enq(dbc);
97         ep_ctx->ep_info         = 0;
98         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
99         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
100
101         /* Set DbC context and info registers: */
102         lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
103
104         dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
105         writel(dev_info, &dbc->regs->devinfo1);
106
107         dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
108         writel(dev_info, &dbc->regs->devinfo2);
109 }
110
111 static void xhci_dbc_giveback(struct dbc_request *req, int status)
112         __releases(&dbc->lock)
113         __acquires(&dbc->lock)
114 {
115         struct dbc_ep           *dep = req->dep;
116         struct xhci_dbc         *dbc = dep->dbc;
117         struct xhci_hcd         *xhci = dbc->xhci;
118         struct device           *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
119
120         list_del_init(&req->list_pending);
121         req->trb_dma = 0;
122         req->trb = NULL;
123
124         if (req->status == -EINPROGRESS)
125                 req->status = status;
126
127         trace_xhci_dbc_giveback_request(req);
128
129         dma_unmap_single(dev,
130                          req->dma,
131                          req->length,
132                          dbc_ep_dma_direction(dep));
133
134         /* Give back the transfer request: */
135         spin_unlock(&dbc->lock);
136         req->complete(xhci, req);
137         spin_lock(&dbc->lock);
138 }
139
140 static void xhci_dbc_flush_single_request(struct dbc_request *req)
141 {
142         union xhci_trb  *trb = req->trb;
143
144         trb->generic.field[0]   = 0;
145         trb->generic.field[1]   = 0;
146         trb->generic.field[2]   = 0;
147         trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
148         trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
149
150         xhci_dbc_giveback(req, -ESHUTDOWN);
151 }
152
153 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
154 {
155         struct dbc_request      *req, *tmp;
156
157         list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
158                 xhci_dbc_flush_single_request(req);
159 }
160
161 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
162 {
163         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
164         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
165 }
166
167 struct dbc_request *
168 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
169 {
170         struct dbc_request      *req;
171
172         req = kzalloc(sizeof(*req), gfp_flags);
173         if (!req)
174                 return NULL;
175
176         req->dep = dep;
177         INIT_LIST_HEAD(&req->list_pending);
178         INIT_LIST_HEAD(&req->list_pool);
179         req->direction = dep->direction;
180
181         trace_xhci_dbc_alloc_request(req);
182
183         return req;
184 }
185
186 void
187 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
188 {
189         trace_xhci_dbc_free_request(req);
190
191         kfree(req);
192 }
193
194 static void
195 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
196                    u32 field2, u32 field3, u32 field4)
197 {
198         union xhci_trb          *trb, *next;
199
200         trb = ring->enqueue;
201         trb->generic.field[0]   = cpu_to_le32(field1);
202         trb->generic.field[1]   = cpu_to_le32(field2);
203         trb->generic.field[2]   = cpu_to_le32(field3);
204         trb->generic.field[3]   = cpu_to_le32(field4);
205
206         trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
207
208         ring->num_trbs_free--;
209         next = ++(ring->enqueue);
210         if (TRB_TYPE_LINK_LE32(next->link.control)) {
211                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
212                 ring->enqueue = ring->enq_seg->trbs;
213                 ring->cycle_state ^= 1;
214         }
215 }
216
217 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
218                                   struct dbc_request *req)
219 {
220         u64                     addr;
221         union xhci_trb          *trb;
222         unsigned int            num_trbs;
223         struct xhci_dbc         *dbc = dep->dbc;
224         struct xhci_ring        *ring = dep->ring;
225         u32                     length, control, cycle;
226
227         num_trbs = count_trbs(req->dma, req->length);
228         WARN_ON(num_trbs != 1);
229         if (ring->num_trbs_free < num_trbs)
230                 return -EBUSY;
231
232         addr    = req->dma;
233         trb     = ring->enqueue;
234         cycle   = ring->cycle_state;
235         length  = TRB_LEN(req->length);
236         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
237
238         if (cycle)
239                 control &= cpu_to_le32(~TRB_CYCLE);
240         else
241                 control |= cpu_to_le32(TRB_CYCLE);
242
243         req->trb = ring->enqueue;
244         req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
245         xhci_dbc_queue_trb(ring,
246                            lower_32_bits(addr),
247                            upper_32_bits(addr),
248                            length, control);
249
250         /*
251          * Add a barrier between writes of trb fields and flipping
252          * the cycle bit:
253          */
254         wmb();
255
256         if (cycle)
257                 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
258         else
259                 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
260
261         writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
262
263         return 0;
264 }
265
266 static int
267 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
268 {
269         int                     ret;
270         struct xhci_dbc         *dbc = dep->dbc;
271         struct device           *dev = dbc->dev;
272
273         if (!req->length || !req->buf)
274                 return -EINVAL;
275
276         req->actual             = 0;
277         req->status             = -EINPROGRESS;
278
279         req->dma = dma_map_single(dev,
280                                   req->buf,
281                                   req->length,
282                                   dbc_ep_dma_direction(dep));
283         if (dma_mapping_error(dev, req->dma)) {
284                 dev_err(dbc->dev, "failed to map buffer\n");
285                 return -EFAULT;
286         }
287
288         ret = xhci_dbc_queue_bulk_tx(dep, req);
289         if (ret) {
290                 dev_err(dbc->dev, "failed to queue trbs\n");
291                 dma_unmap_single(dev,
292                                  req->dma,
293                                  req->length,
294                                  dbc_ep_dma_direction(dep));
295                 return -EFAULT;
296         }
297
298         list_add_tail(&req->list_pending, &dep->list_pending);
299
300         return 0;
301 }
302
303 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
304                  gfp_t gfp_flags)
305 {
306         unsigned long           flags;
307         struct xhci_dbc         *dbc = dep->dbc;
308         int                     ret = -ESHUTDOWN;
309
310         spin_lock_irqsave(&dbc->lock, flags);
311         if (dbc->state == DS_CONFIGURED)
312                 ret = dbc_ep_do_queue(dep, req);
313         spin_unlock_irqrestore(&dbc->lock, flags);
314
315         mod_delayed_work(system_wq, &dbc->event_work, 0);
316
317         trace_xhci_dbc_queue_request(req);
318
319         return ret;
320 }
321
322 static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
323 {
324         struct dbc_ep           *dep;
325         struct xhci_dbc         *dbc = xhci->dbc;
326
327         dep                     = &dbc->eps[direction];
328         dep->dbc                = dbc;
329         dep->direction          = direction;
330         dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
331
332         INIT_LIST_HEAD(&dep->list_pending);
333 }
334
335 static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
336 {
337         xhci_dbc_do_eps_init(xhci, BULK_OUT);
338         xhci_dbc_do_eps_init(xhci, BULK_IN);
339 }
340
341 static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
342 {
343         struct xhci_dbc         *dbc = xhci->dbc;
344
345         memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
346 }
347
348 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
349                     struct xhci_erst *erst, gfp_t flags)
350 {
351         erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
352                                            &erst->erst_dma_addr, flags);
353         if (!erst->entries)
354                 return -ENOMEM;
355
356         erst->num_entries = 1;
357         erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
358         erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
359         erst->entries[0].rsvd = 0;
360         return 0;
361 }
362
363 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
364 {
365         if (erst->entries)
366                 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
367                                   erst->entries, erst->erst_dma_addr);
368         erst->entries = NULL;
369 }
370
371 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
372 {
373         int                     ret;
374         dma_addr_t              deq;
375         u32                     string_length;
376         struct xhci_dbc         *dbc = xhci->dbc;
377         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
378
379         /* Allocate various rings for events and transfers: */
380         dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
381         if (!dbc->ring_evt)
382                 goto evt_fail;
383
384         dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
385         if (!dbc->ring_in)
386                 goto in_fail;
387
388         dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
389         if (!dbc->ring_out)
390                 goto out_fail;
391
392         /* Allocate and populate ERST: */
393         ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
394         if (ret)
395                 goto erst_fail;
396
397         /* Allocate context data structure: */
398         dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
399         if (!dbc->ctx)
400                 goto ctx_fail;
401
402         /* Allocate the string table: */
403         dbc->string_size = sizeof(struct dbc_str_descs);
404         dbc->string = dma_alloc_coherent(dev, dbc->string_size,
405                                          &dbc->string_dma, flags);
406         if (!dbc->string)
407                 goto string_fail;
408
409         /* Setup ERST register: */
410         writel(dbc->erst.erst_size, &dbc->regs->ersts);
411
412         lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
413         deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
414                                    dbc->ring_evt->dequeue);
415         lo_hi_writeq(deq, &dbc->regs->erdp);
416
417         /* Setup strings and contexts: */
418         string_length = xhci_dbc_populate_strings(dbc->string);
419         xhci_dbc_init_contexts(dbc, string_length);
420
421         xhci_dbc_eps_init(xhci);
422         dbc->state = DS_INITIALIZED;
423
424         return 0;
425
426 string_fail:
427         xhci_free_container_ctx(xhci, dbc->ctx);
428         dbc->ctx = NULL;
429 ctx_fail:
430         dbc_erst_free(dev, &dbc->erst);
431 erst_fail:
432         xhci_ring_free(xhci, dbc->ring_out);
433         dbc->ring_out = NULL;
434 out_fail:
435         xhci_ring_free(xhci, dbc->ring_in);
436         dbc->ring_in = NULL;
437 in_fail:
438         xhci_ring_free(xhci, dbc->ring_evt);
439         dbc->ring_evt = NULL;
440 evt_fail:
441         return -ENOMEM;
442 }
443
444 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
445 {
446         struct xhci_dbc         *dbc = xhci->dbc;
447         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
448
449         if (!dbc)
450                 return;
451
452         xhci_dbc_eps_exit(xhci);
453
454         if (dbc->string) {
455                 dma_free_coherent(dbc->dev, dbc->string_size,
456                                   dbc->string, dbc->string_dma);
457                 dbc->string = NULL;
458         }
459
460         xhci_free_container_ctx(xhci, dbc->ctx);
461         dbc->ctx = NULL;
462
463         dbc_erst_free(dev, &dbc->erst);
464         xhci_ring_free(xhci, dbc->ring_out);
465         xhci_ring_free(xhci, dbc->ring_in);
466         xhci_ring_free(xhci, dbc->ring_evt);
467         dbc->ring_in = NULL;
468         dbc->ring_out = NULL;
469         dbc->ring_evt = NULL;
470 }
471
472 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
473 {
474         int                     ret;
475         u32                     ctrl;
476         struct xhci_dbc         *dbc = xhci->dbc;
477
478         if (dbc->state != DS_DISABLED)
479                 return -EINVAL;
480
481         writel(0, &dbc->regs->control);
482         ret = xhci_handshake(&dbc->regs->control,
483                              DBC_CTRL_DBC_ENABLE,
484                              0, 1000);
485         if (ret)
486                 return ret;
487
488         ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
489         if (ret)
490                 return ret;
491
492         ctrl = readl(&dbc->regs->control);
493         writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
494                &dbc->regs->control);
495         ret = xhci_handshake(&dbc->regs->control,
496                              DBC_CTRL_DBC_ENABLE,
497                              DBC_CTRL_DBC_ENABLE, 1000);
498         if (ret)
499                 return ret;
500
501         dbc->state = DS_ENABLED;
502
503         return 0;
504 }
505
506 static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
507 {
508         struct xhci_dbc         *dbc = xhci->dbc;
509
510         if (dbc->state == DS_DISABLED)
511                 return -1;
512
513         writel(0, &dbc->regs->control);
514         dbc->state = DS_DISABLED;
515
516         return 0;
517 }
518
519 static int xhci_dbc_start(struct xhci_hcd *xhci)
520 {
521         int                     ret;
522         unsigned long           flags;
523         struct xhci_dbc         *dbc = xhci->dbc;
524
525         WARN_ON(!dbc);
526
527         pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
528
529         spin_lock_irqsave(&dbc->lock, flags);
530         ret = xhci_do_dbc_start(xhci);
531         spin_unlock_irqrestore(&dbc->lock, flags);
532
533         if (ret) {
534                 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
535                 return ret;
536         }
537
538         return mod_delayed_work(system_wq, &dbc->event_work, 1);
539 }
540
541 static void xhci_dbc_stop(struct xhci_hcd *xhci)
542 {
543         int ret;
544         unsigned long           flags;
545         struct xhci_dbc         *dbc = xhci->dbc;
546         struct dbc_port         *port = &dbc->port;
547
548         WARN_ON(!dbc);
549
550         cancel_delayed_work_sync(&dbc->event_work);
551
552         if (port->registered)
553                 xhci_dbc_tty_unregister_device(xhci);
554
555         spin_lock_irqsave(&dbc->lock, flags);
556         ret = xhci_do_dbc_stop(xhci);
557         spin_unlock_irqrestore(&dbc->lock, flags);
558
559         if (!ret) {
560                 xhci_dbc_mem_cleanup(xhci);
561                 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
562         }
563 }
564
565 static void
566 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
567 {
568         u32                     portsc;
569
570         portsc = readl(&dbc->regs->portsc);
571         if (portsc & DBC_PORTSC_CONN_CHANGE)
572                 dev_info(dbc->dev, "DbC port connect change\n");
573
574         if (portsc & DBC_PORTSC_RESET_CHANGE)
575                 dev_info(dbc->dev, "DbC port reset change\n");
576
577         if (portsc & DBC_PORTSC_LINK_CHANGE)
578                 dev_info(dbc->dev, "DbC port link status change\n");
579
580         if (portsc & DBC_PORTSC_CONFIG_CHANGE)
581                 dev_info(dbc->dev, "DbC config error change\n");
582
583         /* Port reset change bit will be cleared in other place: */
584         writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
585 }
586
587 static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
588 {
589         struct dbc_ep           *dep;
590         struct xhci_ring        *ring;
591         int                     ep_id;
592         int                     status;
593         u32                     comp_code;
594         size_t                  remain_length;
595         struct dbc_request      *req = NULL, *r;
596         struct xhci_dbc         *dbc = xhci->dbc;
597
598         comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
599         remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
600         ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
601         dep             = (ep_id == EPID_OUT) ?
602                                 get_out_ep(xhci) : get_in_ep(xhci);
603         ring            = dep->ring;
604
605         switch (comp_code) {
606         case COMP_SUCCESS:
607                 remain_length = 0;
608         /* FALLTHROUGH */
609         case COMP_SHORT_PACKET:
610                 status = 0;
611                 break;
612         case COMP_TRB_ERROR:
613         case COMP_BABBLE_DETECTED_ERROR:
614         case COMP_USB_TRANSACTION_ERROR:
615         case COMP_STALL_ERROR:
616                 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
617                 status = -comp_code;
618                 break;
619         default:
620                 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
621                 status = -comp_code;
622                 break;
623         }
624
625         /* Match the pending request: */
626         list_for_each_entry(r, &dep->list_pending, list_pending) {
627                 if (r->trb_dma == event->trans_event.buffer) {
628                         req = r;
629                         break;
630                 }
631         }
632
633         if (!req) {
634                 dev_warn(dbc->dev, "no matched request\n");
635                 return;
636         }
637
638         trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
639
640         ring->num_trbs_free++;
641         req->actual = req->length - remain_length;
642         xhci_dbc_giveback(req, status);
643 }
644
645 static void inc_evt_deq(struct xhci_ring *ring)
646 {
647         /* If on the last TRB of the segment go back to the beginning */
648         if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
649                 ring->cycle_state ^= 1;
650                 ring->dequeue = ring->deq_seg->trbs;
651                 return;
652         }
653         ring->dequeue++;
654 }
655
656 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
657 {
658         dma_addr_t              deq;
659         struct dbc_ep           *dep;
660         union xhci_trb          *evt;
661         u32                     ctrl, portsc;
662         struct xhci_hcd         *xhci = dbc->xhci;
663         bool                    update_erdp = false;
664
665         /* DbC state machine: */
666         switch (dbc->state) {
667         case DS_DISABLED:
668         case DS_INITIALIZED:
669
670                 return EVT_ERR;
671         case DS_ENABLED:
672                 portsc = readl(&dbc->regs->portsc);
673                 if (portsc & DBC_PORTSC_CONN_STATUS) {
674                         dbc->state = DS_CONNECTED;
675                         dev_info(dbc->dev, "DbC connected\n");
676                 }
677
678                 return EVT_DONE;
679         case DS_CONNECTED:
680                 ctrl = readl(&dbc->regs->control);
681                 if (ctrl & DBC_CTRL_DBC_RUN) {
682                         dbc->state = DS_CONFIGURED;
683                         dev_info(dbc->dev, "DbC configured\n");
684                         portsc = readl(&dbc->regs->portsc);
685                         writel(portsc, &dbc->regs->portsc);
686                         return EVT_GSER;
687                 }
688
689                 return EVT_DONE;
690         case DS_CONFIGURED:
691                 /* Handle cable unplug event: */
692                 portsc = readl(&dbc->regs->portsc);
693                 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
694                     !(portsc & DBC_PORTSC_CONN_STATUS)) {
695                         dev_info(dbc->dev, "DbC cable unplugged\n");
696                         dbc->state = DS_ENABLED;
697                         xhci_dbc_flush_requests(dbc);
698
699                         return EVT_DISC;
700                 }
701
702                 /* Handle debug port reset event: */
703                 if (portsc & DBC_PORTSC_RESET_CHANGE) {
704                         dev_info(dbc->dev, "DbC port reset\n");
705                         writel(portsc, &dbc->regs->portsc);
706                         dbc->state = DS_ENABLED;
707                         xhci_dbc_flush_requests(dbc);
708
709                         return EVT_DISC;
710                 }
711
712                 /* Handle endpoint stall event: */
713                 ctrl = readl(&dbc->regs->control);
714                 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
715                     (ctrl & DBC_CTRL_HALT_OUT_TR)) {
716                         dev_info(dbc->dev, "DbC Endpoint stall\n");
717                         dbc->state = DS_STALLED;
718
719                         if (ctrl & DBC_CTRL_HALT_IN_TR) {
720                                 dep = get_in_ep(xhci);
721                                 xhci_dbc_flush_endpoint_requests(dep);
722                         }
723
724                         if (ctrl & DBC_CTRL_HALT_OUT_TR) {
725                                 dep = get_out_ep(xhci);
726                                 xhci_dbc_flush_endpoint_requests(dep);
727                         }
728
729                         return EVT_DONE;
730                 }
731
732                 /* Clear DbC run change bit: */
733                 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
734                         writel(ctrl, &dbc->regs->control);
735                         ctrl = readl(&dbc->regs->control);
736                 }
737
738                 break;
739         case DS_STALLED:
740                 ctrl = readl(&dbc->regs->control);
741                 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
742                     !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
743                     (ctrl & DBC_CTRL_DBC_RUN)) {
744                         dbc->state = DS_CONFIGURED;
745                         break;
746                 }
747
748                 return EVT_DONE;
749         default:
750                 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
751                 break;
752         }
753
754         /* Handle the events in the event ring: */
755         evt = dbc->ring_evt->dequeue;
756         while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
757                         dbc->ring_evt->cycle_state) {
758                 /*
759                  * Add a barrier between reading the cycle flag and any
760                  * reads of the event's flags/data below:
761                  */
762                 rmb();
763
764                 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
765
766                 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
767                 case TRB_TYPE(TRB_PORT_STATUS):
768                         dbc_handle_port_status(dbc, evt);
769                         break;
770                 case TRB_TYPE(TRB_TRANSFER):
771                         dbc_handle_xfer_event(xhci, evt);
772                         break;
773                 default:
774                         break;
775                 }
776
777                 inc_evt_deq(dbc->ring_evt);
778
779                 evt = dbc->ring_evt->dequeue;
780                 update_erdp = true;
781         }
782
783         /* Update event ring dequeue pointer: */
784         if (update_erdp) {
785                 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
786                                            dbc->ring_evt->dequeue);
787                 lo_hi_writeq(deq, &dbc->regs->erdp);
788         }
789
790         return EVT_DONE;
791 }
792
793 static void xhci_dbc_handle_events(struct work_struct *work)
794 {
795         int                     ret;
796         enum evtreturn          evtr;
797         struct xhci_dbc         *dbc;
798         unsigned long           flags;
799         struct xhci_hcd         *xhci;
800
801         dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
802         xhci = dbc->xhci;
803
804         spin_lock_irqsave(&dbc->lock, flags);
805         evtr = xhci_dbc_do_handle_events(dbc);
806         spin_unlock_irqrestore(&dbc->lock, flags);
807
808         switch (evtr) {
809         case EVT_GSER:
810                 ret = xhci_dbc_tty_register_device(xhci);
811                 if (ret) {
812                         dev_err(dbc->dev, "failed to alloc tty device\n");
813                         break;
814                 }
815
816                 dev_info(dbc->dev, "DbC now attached to /dev/ttyDBC0\n");
817                 break;
818         case EVT_DISC:
819                 xhci_dbc_tty_unregister_device(xhci);
820                 break;
821         case EVT_DONE:
822                 break;
823         default:
824                 dev_info(dbc->dev, "stop handling dbc events\n");
825                 return;
826         }
827
828         mod_delayed_work(system_wq, &dbc->event_work, 1);
829 }
830
831 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
832 {
833         unsigned long           flags;
834
835         spin_lock_irqsave(&xhci->lock, flags);
836         kfree(xhci->dbc);
837         xhci->dbc = NULL;
838         spin_unlock_irqrestore(&xhci->lock, flags);
839 }
840
841 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
842 {
843         u32                     reg;
844         struct xhci_dbc         *dbc;
845         unsigned long           flags;
846         void __iomem            *base;
847         int                     dbc_cap_offs;
848
849         base = &xhci->cap_regs->hc_capbase;
850         dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
851         if (!dbc_cap_offs)
852                 return -ENODEV;
853
854         dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
855         if (!dbc)
856                 return -ENOMEM;
857
858         dbc->regs = base + dbc_cap_offs;
859
860         /* We will avoid using DbC in xhci driver if it's in use. */
861         reg = readl(&dbc->regs->control);
862         if (reg & DBC_CTRL_DBC_ENABLE) {
863                 kfree(dbc);
864                 return -EBUSY;
865         }
866
867         spin_lock_irqsave(&xhci->lock, flags);
868         if (xhci->dbc) {
869                 spin_unlock_irqrestore(&xhci->lock, flags);
870                 kfree(dbc);
871                 return -EBUSY;
872         }
873         xhci->dbc = dbc;
874         spin_unlock_irqrestore(&xhci->lock, flags);
875
876         dbc->xhci = xhci;
877         dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
878         INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
879         spin_lock_init(&dbc->lock);
880
881         return 0;
882 }
883
884 static ssize_t dbc_show(struct device *dev,
885                         struct device_attribute *attr,
886                         char *buf)
887 {
888         const char              *p;
889         struct xhci_dbc         *dbc;
890         struct xhci_hcd         *xhci;
891
892         xhci = hcd_to_xhci(dev_get_drvdata(dev));
893         dbc = xhci->dbc;
894
895         switch (dbc->state) {
896         case DS_DISABLED:
897                 p = "disabled";
898                 break;
899         case DS_INITIALIZED:
900                 p = "initialized";
901                 break;
902         case DS_ENABLED:
903                 p = "enabled";
904                 break;
905         case DS_CONNECTED:
906                 p = "connected";
907                 break;
908         case DS_CONFIGURED:
909                 p = "configured";
910                 break;
911         case DS_STALLED:
912                 p = "stalled";
913                 break;
914         default:
915                 p = "unknown";
916         }
917
918         return sprintf(buf, "%s\n", p);
919 }
920
921 static ssize_t dbc_store(struct device *dev,
922                          struct device_attribute *attr,
923                          const char *buf, size_t count)
924 {
925         struct xhci_hcd         *xhci;
926
927         xhci = hcd_to_xhci(dev_get_drvdata(dev));
928
929         if (!strncmp(buf, "enable", 6))
930                 xhci_dbc_start(xhci);
931         else if (!strncmp(buf, "disable", 7))
932                 xhci_dbc_stop(xhci);
933         else
934                 return -EINVAL;
935
936         return count;
937 }
938
939 static DEVICE_ATTR_RW(dbc);
940
941 int xhci_dbc_init(struct xhci_hcd *xhci)
942 {
943         int                     ret;
944         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
945
946         ret = xhci_do_dbc_init(xhci);
947         if (ret)
948                 goto init_err3;
949
950         ret = xhci_dbc_tty_register_driver(xhci);
951         if (ret)
952                 goto init_err2;
953
954         ret = device_create_file(dev, &dev_attr_dbc);
955         if (ret)
956                 goto init_err1;
957
958         return 0;
959
960 init_err1:
961         xhci_dbc_tty_unregister_driver();
962 init_err2:
963         xhci_do_dbc_exit(xhci);
964 init_err3:
965         return ret;
966 }
967
968 void xhci_dbc_exit(struct xhci_hcd *xhci)
969 {
970         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
971
972         if (!xhci->dbc)
973                 return;
974
975         device_remove_file(dev, &dev_attr_dbc);
976         xhci_dbc_tty_unregister_driver();
977         xhci_dbc_stop(xhci);
978         xhci_do_dbc_exit(xhci);
979 }
980
981 #ifdef CONFIG_PM
982 int xhci_dbc_suspend(struct xhci_hcd *xhci)
983 {
984         struct xhci_dbc         *dbc = xhci->dbc;
985
986         if (!dbc)
987                 return 0;
988
989         if (dbc->state == DS_CONFIGURED)
990                 dbc->resume_required = 1;
991
992         xhci_dbc_stop(xhci);
993
994         return 0;
995 }
996
997 int xhci_dbc_resume(struct xhci_hcd *xhci)
998 {
999         int                     ret = 0;
1000         struct xhci_dbc         *dbc = xhci->dbc;
1001
1002         if (!dbc)
1003                 return 0;
1004
1005         if (dbc->resume_required) {
1006                 dbc->resume_required = 0;
1007                 xhci_dbc_start(xhci);
1008         }
1009
1010         return ret;
1011 }
1012 #endif /* CONFIG_PM */