xhci: dbc: Use dbc structure in the request completion instead of xhci_hcd
[platform/kernel/linux-rpi.git] / drivers / usb / host / xhci-dbgcap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgcap.c - xHCI debug capability support
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
12
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
16
17 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
18 {
19         struct usb_string_descriptor    *s_desc;
20         u32                             string_length;
21
22         /* Serial string: */
23         s_desc = (struct usb_string_descriptor *)strings->serial;
24         utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
25                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
26                         DBC_MAX_STRING_LENGTH);
27
28         s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
29         s_desc->bDescriptorType = USB_DT_STRING;
30         string_length           = s_desc->bLength;
31         string_length           <<= 8;
32
33         /* Product string: */
34         s_desc = (struct usb_string_descriptor *)strings->product;
35         utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
36                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
37                         DBC_MAX_STRING_LENGTH);
38
39         s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
40         s_desc->bDescriptorType = USB_DT_STRING;
41         string_length           += s_desc->bLength;
42         string_length           <<= 8;
43
44         /* Manufacture string: */
45         s_desc = (struct usb_string_descriptor *)strings->manufacturer;
46         utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
47                         strlen(DBC_STRING_MANUFACTURER),
48                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49                         DBC_MAX_STRING_LENGTH);
50
51         s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
52         s_desc->bDescriptorType = USB_DT_STRING;
53         string_length           += s_desc->bLength;
54         string_length           <<= 8;
55
56         /* String0: */
57         strings->string0[0]     = 4;
58         strings->string0[1]     = USB_DT_STRING;
59         strings->string0[2]     = 0x09;
60         strings->string0[3]     = 0x04;
61         string_length           += 4;
62
63         return string_length;
64 }
65
66 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
67 {
68         struct dbc_info_context *info;
69         struct xhci_ep_ctx      *ep_ctx;
70         u32                     dev_info;
71         dma_addr_t              deq, dma;
72         unsigned int            max_burst;
73
74         if (!dbc)
75                 return;
76
77         /* Populate info Context: */
78         info                    = (struct dbc_info_context *)dbc->ctx->bytes;
79         dma                     = dbc->string_dma;
80         info->string0           = cpu_to_le64(dma);
81         info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
82         info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
83         info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
84         info->length            = cpu_to_le32(string_length);
85
86         /* Populate bulk out endpoint context: */
87         ep_ctx                  = dbc_bulkout_ctx(dbc);
88         max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
89         deq                     = dbc_bulkout_enq(dbc);
90         ep_ctx->ep_info         = 0;
91         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
92         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
93
94         /* Populate bulk in endpoint context: */
95         ep_ctx                  = dbc_bulkin_ctx(dbc);
96         deq                     = dbc_bulkin_enq(dbc);
97         ep_ctx->ep_info         = 0;
98         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
99         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
100
101         /* Set DbC context and info registers: */
102         lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
103
104         dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
105         writel(dev_info, &dbc->regs->devinfo1);
106
107         dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
108         writel(dev_info, &dbc->regs->devinfo2);
109 }
110
111 static void xhci_dbc_giveback(struct dbc_request *req, int status)
112         __releases(&dbc->lock)
113         __acquires(&dbc->lock)
114 {
115         struct dbc_ep           *dep = req->dep;
116         struct xhci_dbc         *dbc = dep->dbc;
117         struct device           *dev = dbc->dev;
118
119         list_del_init(&req->list_pending);
120         req->trb_dma = 0;
121         req->trb = NULL;
122
123         if (req->status == -EINPROGRESS)
124                 req->status = status;
125
126         trace_xhci_dbc_giveback_request(req);
127
128         dma_unmap_single(dev,
129                          req->dma,
130                          req->length,
131                          dbc_ep_dma_direction(dep));
132
133         /* Give back the transfer request: */
134         spin_unlock(&dbc->lock);
135         req->complete(dbc, req);
136         spin_lock(&dbc->lock);
137 }
138
139 static void xhci_dbc_flush_single_request(struct dbc_request *req)
140 {
141         union xhci_trb  *trb = req->trb;
142
143         trb->generic.field[0]   = 0;
144         trb->generic.field[1]   = 0;
145         trb->generic.field[2]   = 0;
146         trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
147         trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
148
149         xhci_dbc_giveback(req, -ESHUTDOWN);
150 }
151
152 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
153 {
154         struct dbc_request      *req, *tmp;
155
156         list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
157                 xhci_dbc_flush_single_request(req);
158 }
159
160 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
161 {
162         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
163         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
164 }
165
166 struct dbc_request *
167 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
168 {
169         struct dbc_request      *req;
170
171         req = kzalloc(sizeof(*req), gfp_flags);
172         if (!req)
173                 return NULL;
174
175         req->dep = dep;
176         INIT_LIST_HEAD(&req->list_pending);
177         INIT_LIST_HEAD(&req->list_pool);
178         req->direction = dep->direction;
179
180         trace_xhci_dbc_alloc_request(req);
181
182         return req;
183 }
184
185 void
186 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
187 {
188         trace_xhci_dbc_free_request(req);
189
190         kfree(req);
191 }
192
193 static void
194 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
195                    u32 field2, u32 field3, u32 field4)
196 {
197         union xhci_trb          *trb, *next;
198
199         trb = ring->enqueue;
200         trb->generic.field[0]   = cpu_to_le32(field1);
201         trb->generic.field[1]   = cpu_to_le32(field2);
202         trb->generic.field[2]   = cpu_to_le32(field3);
203         trb->generic.field[3]   = cpu_to_le32(field4);
204
205         trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
206
207         ring->num_trbs_free--;
208         next = ++(ring->enqueue);
209         if (TRB_TYPE_LINK_LE32(next->link.control)) {
210                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
211                 ring->enqueue = ring->enq_seg->trbs;
212                 ring->cycle_state ^= 1;
213         }
214 }
215
216 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
217                                   struct dbc_request *req)
218 {
219         u64                     addr;
220         union xhci_trb          *trb;
221         unsigned int            num_trbs;
222         struct xhci_dbc         *dbc = dep->dbc;
223         struct xhci_ring        *ring = dep->ring;
224         u32                     length, control, cycle;
225
226         num_trbs = count_trbs(req->dma, req->length);
227         WARN_ON(num_trbs != 1);
228         if (ring->num_trbs_free < num_trbs)
229                 return -EBUSY;
230
231         addr    = req->dma;
232         trb     = ring->enqueue;
233         cycle   = ring->cycle_state;
234         length  = TRB_LEN(req->length);
235         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
236
237         if (cycle)
238                 control &= cpu_to_le32(~TRB_CYCLE);
239         else
240                 control |= cpu_to_le32(TRB_CYCLE);
241
242         req->trb = ring->enqueue;
243         req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
244         xhci_dbc_queue_trb(ring,
245                            lower_32_bits(addr),
246                            upper_32_bits(addr),
247                            length, control);
248
249         /*
250          * Add a barrier between writes of trb fields and flipping
251          * the cycle bit:
252          */
253         wmb();
254
255         if (cycle)
256                 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
257         else
258                 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
259
260         writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
261
262         return 0;
263 }
264
265 static int
266 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
267 {
268         int                     ret;
269         struct xhci_dbc         *dbc = dep->dbc;
270         struct device           *dev = dbc->dev;
271
272         if (!req->length || !req->buf)
273                 return -EINVAL;
274
275         req->actual             = 0;
276         req->status             = -EINPROGRESS;
277
278         req->dma = dma_map_single(dev,
279                                   req->buf,
280                                   req->length,
281                                   dbc_ep_dma_direction(dep));
282         if (dma_mapping_error(dev, req->dma)) {
283                 dev_err(dbc->dev, "failed to map buffer\n");
284                 return -EFAULT;
285         }
286
287         ret = xhci_dbc_queue_bulk_tx(dep, req);
288         if (ret) {
289                 dev_err(dbc->dev, "failed to queue trbs\n");
290                 dma_unmap_single(dev,
291                                  req->dma,
292                                  req->length,
293                                  dbc_ep_dma_direction(dep));
294                 return -EFAULT;
295         }
296
297         list_add_tail(&req->list_pending, &dep->list_pending);
298
299         return 0;
300 }
301
302 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
303                  gfp_t gfp_flags)
304 {
305         unsigned long           flags;
306         struct xhci_dbc         *dbc = dep->dbc;
307         int                     ret = -ESHUTDOWN;
308
309         spin_lock_irqsave(&dbc->lock, flags);
310         if (dbc->state == DS_CONFIGURED)
311                 ret = dbc_ep_do_queue(dep, req);
312         spin_unlock_irqrestore(&dbc->lock, flags);
313
314         mod_delayed_work(system_wq, &dbc->event_work, 0);
315
316         trace_xhci_dbc_queue_request(req);
317
318         return ret;
319 }
320
321 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
322 {
323         struct dbc_ep           *dep;
324
325         dep                     = &dbc->eps[direction];
326         dep->dbc                = dbc;
327         dep->direction          = direction;
328         dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
329
330         INIT_LIST_HEAD(&dep->list_pending);
331 }
332
333 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
334 {
335         xhci_dbc_do_eps_init(dbc, BULK_OUT);
336         xhci_dbc_do_eps_init(dbc, BULK_IN);
337 }
338
339 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
340 {
341         memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
342 }
343
344 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
345                     struct xhci_erst *erst, gfp_t flags)
346 {
347         erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
348                                            &erst->erst_dma_addr, flags);
349         if (!erst->entries)
350                 return -ENOMEM;
351
352         erst->num_entries = 1;
353         erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
354         erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
355         erst->entries[0].rsvd = 0;
356         return 0;
357 }
358
359 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
360 {
361         if (erst->entries)
362                 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
363                                   erst->entries, erst->erst_dma_addr);
364         erst->entries = NULL;
365 }
366
367 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
368 {
369         int                     ret;
370         dma_addr_t              deq;
371         u32                     string_length;
372         struct xhci_dbc         *dbc = xhci->dbc;
373         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
374
375         /* Allocate various rings for events and transfers: */
376         dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
377         if (!dbc->ring_evt)
378                 goto evt_fail;
379
380         dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
381         if (!dbc->ring_in)
382                 goto in_fail;
383
384         dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
385         if (!dbc->ring_out)
386                 goto out_fail;
387
388         /* Allocate and populate ERST: */
389         ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
390         if (ret)
391                 goto erst_fail;
392
393         /* Allocate context data structure: */
394         dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
395         if (!dbc->ctx)
396                 goto ctx_fail;
397
398         /* Allocate the string table: */
399         dbc->string_size = sizeof(struct dbc_str_descs);
400         dbc->string = dma_alloc_coherent(dev, dbc->string_size,
401                                          &dbc->string_dma, flags);
402         if (!dbc->string)
403                 goto string_fail;
404
405         /* Setup ERST register: */
406         writel(dbc->erst.erst_size, &dbc->regs->ersts);
407
408         lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
409         deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
410                                    dbc->ring_evt->dequeue);
411         lo_hi_writeq(deq, &dbc->regs->erdp);
412
413         /* Setup strings and contexts: */
414         string_length = xhci_dbc_populate_strings(dbc->string);
415         xhci_dbc_init_contexts(dbc, string_length);
416
417         xhci_dbc_eps_init(dbc);
418         dbc->state = DS_INITIALIZED;
419
420         return 0;
421
422 string_fail:
423         xhci_free_container_ctx(xhci, dbc->ctx);
424         dbc->ctx = NULL;
425 ctx_fail:
426         dbc_erst_free(dev, &dbc->erst);
427 erst_fail:
428         xhci_ring_free(xhci, dbc->ring_out);
429         dbc->ring_out = NULL;
430 out_fail:
431         xhci_ring_free(xhci, dbc->ring_in);
432         dbc->ring_in = NULL;
433 in_fail:
434         xhci_ring_free(xhci, dbc->ring_evt);
435         dbc->ring_evt = NULL;
436 evt_fail:
437         return -ENOMEM;
438 }
439
440 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
441 {
442         struct xhci_dbc         *dbc = xhci->dbc;
443         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
444
445         if (!dbc)
446                 return;
447
448         xhci_dbc_eps_exit(dbc);
449
450         if (dbc->string) {
451                 dma_free_coherent(dbc->dev, dbc->string_size,
452                                   dbc->string, dbc->string_dma);
453                 dbc->string = NULL;
454         }
455
456         xhci_free_container_ctx(xhci, dbc->ctx);
457         dbc->ctx = NULL;
458
459         dbc_erst_free(dev, &dbc->erst);
460         xhci_ring_free(xhci, dbc->ring_out);
461         xhci_ring_free(xhci, dbc->ring_in);
462         xhci_ring_free(xhci, dbc->ring_evt);
463         dbc->ring_in = NULL;
464         dbc->ring_out = NULL;
465         dbc->ring_evt = NULL;
466 }
467
468 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
469 {
470         int                     ret;
471         u32                     ctrl;
472         struct xhci_dbc         *dbc = xhci->dbc;
473
474         if (dbc->state != DS_DISABLED)
475                 return -EINVAL;
476
477         writel(0, &dbc->regs->control);
478         ret = xhci_handshake(&dbc->regs->control,
479                              DBC_CTRL_DBC_ENABLE,
480                              0, 1000);
481         if (ret)
482                 return ret;
483
484         ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
485         if (ret)
486                 return ret;
487
488         ctrl = readl(&dbc->regs->control);
489         writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
490                &dbc->regs->control);
491         ret = xhci_handshake(&dbc->regs->control,
492                              DBC_CTRL_DBC_ENABLE,
493                              DBC_CTRL_DBC_ENABLE, 1000);
494         if (ret)
495                 return ret;
496
497         dbc->state = DS_ENABLED;
498
499         return 0;
500 }
501
502 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
503 {
504         if (dbc->state == DS_DISABLED)
505                 return -1;
506
507         writel(0, &dbc->regs->control);
508         dbc->state = DS_DISABLED;
509
510         return 0;
511 }
512
513 static int xhci_dbc_start(struct xhci_hcd *xhci)
514 {
515         int                     ret;
516         unsigned long           flags;
517         struct xhci_dbc         *dbc = xhci->dbc;
518
519         WARN_ON(!dbc);
520
521         pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
522
523         spin_lock_irqsave(&dbc->lock, flags);
524         ret = xhci_do_dbc_start(xhci);
525         spin_unlock_irqrestore(&dbc->lock, flags);
526
527         if (ret) {
528                 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
529                 return ret;
530         }
531
532         return mod_delayed_work(system_wq, &dbc->event_work, 1);
533 }
534
535 static void xhci_dbc_stop(struct xhci_hcd *xhci)
536 {
537         int ret;
538         unsigned long           flags;
539         struct xhci_dbc         *dbc = xhci->dbc;
540         struct dbc_port         *port = &dbc->port;
541
542         WARN_ON(!dbc);
543
544         cancel_delayed_work_sync(&dbc->event_work);
545
546         if (port->registered)
547                 xhci_dbc_tty_unregister_device(dbc);
548
549         spin_lock_irqsave(&dbc->lock, flags);
550         ret = xhci_do_dbc_stop(dbc);
551         spin_unlock_irqrestore(&dbc->lock, flags);
552
553         if (!ret) {
554                 xhci_dbc_mem_cleanup(xhci);
555                 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
556         }
557 }
558
559 static void
560 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
561 {
562         u32                     portsc;
563
564         portsc = readl(&dbc->regs->portsc);
565         if (portsc & DBC_PORTSC_CONN_CHANGE)
566                 dev_info(dbc->dev, "DbC port connect change\n");
567
568         if (portsc & DBC_PORTSC_RESET_CHANGE)
569                 dev_info(dbc->dev, "DbC port reset change\n");
570
571         if (portsc & DBC_PORTSC_LINK_CHANGE)
572                 dev_info(dbc->dev, "DbC port link status change\n");
573
574         if (portsc & DBC_PORTSC_CONFIG_CHANGE)
575                 dev_info(dbc->dev, "DbC config error change\n");
576
577         /* Port reset change bit will be cleared in other place: */
578         writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
579 }
580
581 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
582 {
583         struct dbc_ep           *dep;
584         struct xhci_ring        *ring;
585         int                     ep_id;
586         int                     status;
587         u32                     comp_code;
588         size_t                  remain_length;
589         struct dbc_request      *req = NULL, *r;
590
591         comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
592         remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
593         ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
594         dep             = (ep_id == EPID_OUT) ?
595                                 get_out_ep(dbc) : get_in_ep(dbc);
596         ring            = dep->ring;
597
598         switch (comp_code) {
599         case COMP_SUCCESS:
600                 remain_length = 0;
601         /* FALLTHROUGH */
602         case COMP_SHORT_PACKET:
603                 status = 0;
604                 break;
605         case COMP_TRB_ERROR:
606         case COMP_BABBLE_DETECTED_ERROR:
607         case COMP_USB_TRANSACTION_ERROR:
608         case COMP_STALL_ERROR:
609                 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
610                 status = -comp_code;
611                 break;
612         default:
613                 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
614                 status = -comp_code;
615                 break;
616         }
617
618         /* Match the pending request: */
619         list_for_each_entry(r, &dep->list_pending, list_pending) {
620                 if (r->trb_dma == event->trans_event.buffer) {
621                         req = r;
622                         break;
623                 }
624         }
625
626         if (!req) {
627                 dev_warn(dbc->dev, "no matched request\n");
628                 return;
629         }
630
631         trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
632
633         ring->num_trbs_free++;
634         req->actual = req->length - remain_length;
635         xhci_dbc_giveback(req, status);
636 }
637
638 static void inc_evt_deq(struct xhci_ring *ring)
639 {
640         /* If on the last TRB of the segment go back to the beginning */
641         if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
642                 ring->cycle_state ^= 1;
643                 ring->dequeue = ring->deq_seg->trbs;
644                 return;
645         }
646         ring->dequeue++;
647 }
648
649 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
650 {
651         dma_addr_t              deq;
652         struct dbc_ep           *dep;
653         union xhci_trb          *evt;
654         u32                     ctrl, portsc;
655         bool                    update_erdp = false;
656
657         /* DbC state machine: */
658         switch (dbc->state) {
659         case DS_DISABLED:
660         case DS_INITIALIZED:
661
662                 return EVT_ERR;
663         case DS_ENABLED:
664                 portsc = readl(&dbc->regs->portsc);
665                 if (portsc & DBC_PORTSC_CONN_STATUS) {
666                         dbc->state = DS_CONNECTED;
667                         dev_info(dbc->dev, "DbC connected\n");
668                 }
669
670                 return EVT_DONE;
671         case DS_CONNECTED:
672                 ctrl = readl(&dbc->regs->control);
673                 if (ctrl & DBC_CTRL_DBC_RUN) {
674                         dbc->state = DS_CONFIGURED;
675                         dev_info(dbc->dev, "DbC configured\n");
676                         portsc = readl(&dbc->regs->portsc);
677                         writel(portsc, &dbc->regs->portsc);
678                         return EVT_GSER;
679                 }
680
681                 return EVT_DONE;
682         case DS_CONFIGURED:
683                 /* Handle cable unplug event: */
684                 portsc = readl(&dbc->regs->portsc);
685                 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
686                     !(portsc & DBC_PORTSC_CONN_STATUS)) {
687                         dev_info(dbc->dev, "DbC cable unplugged\n");
688                         dbc->state = DS_ENABLED;
689                         xhci_dbc_flush_requests(dbc);
690
691                         return EVT_DISC;
692                 }
693
694                 /* Handle debug port reset event: */
695                 if (portsc & DBC_PORTSC_RESET_CHANGE) {
696                         dev_info(dbc->dev, "DbC port reset\n");
697                         writel(portsc, &dbc->regs->portsc);
698                         dbc->state = DS_ENABLED;
699                         xhci_dbc_flush_requests(dbc);
700
701                         return EVT_DISC;
702                 }
703
704                 /* Handle endpoint stall event: */
705                 ctrl = readl(&dbc->regs->control);
706                 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
707                     (ctrl & DBC_CTRL_HALT_OUT_TR)) {
708                         dev_info(dbc->dev, "DbC Endpoint stall\n");
709                         dbc->state = DS_STALLED;
710
711                         if (ctrl & DBC_CTRL_HALT_IN_TR) {
712                                 dep = get_in_ep(dbc);
713                                 xhci_dbc_flush_endpoint_requests(dep);
714                         }
715
716                         if (ctrl & DBC_CTRL_HALT_OUT_TR) {
717                                 dep = get_out_ep(dbc);
718                                 xhci_dbc_flush_endpoint_requests(dep);
719                         }
720
721                         return EVT_DONE;
722                 }
723
724                 /* Clear DbC run change bit: */
725                 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
726                         writel(ctrl, &dbc->regs->control);
727                         ctrl = readl(&dbc->regs->control);
728                 }
729
730                 break;
731         case DS_STALLED:
732                 ctrl = readl(&dbc->regs->control);
733                 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
734                     !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
735                     (ctrl & DBC_CTRL_DBC_RUN)) {
736                         dbc->state = DS_CONFIGURED;
737                         break;
738                 }
739
740                 return EVT_DONE;
741         default:
742                 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
743                 break;
744         }
745
746         /* Handle the events in the event ring: */
747         evt = dbc->ring_evt->dequeue;
748         while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
749                         dbc->ring_evt->cycle_state) {
750                 /*
751                  * Add a barrier between reading the cycle flag and any
752                  * reads of the event's flags/data below:
753                  */
754                 rmb();
755
756                 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
757
758                 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
759                 case TRB_TYPE(TRB_PORT_STATUS):
760                         dbc_handle_port_status(dbc, evt);
761                         break;
762                 case TRB_TYPE(TRB_TRANSFER):
763                         dbc_handle_xfer_event(dbc, evt);
764                         break;
765                 default:
766                         break;
767                 }
768
769                 inc_evt_deq(dbc->ring_evt);
770
771                 evt = dbc->ring_evt->dequeue;
772                 update_erdp = true;
773         }
774
775         /* Update event ring dequeue pointer: */
776         if (update_erdp) {
777                 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
778                                            dbc->ring_evt->dequeue);
779                 lo_hi_writeq(deq, &dbc->regs->erdp);
780         }
781
782         return EVT_DONE;
783 }
784
785 static void xhci_dbc_handle_events(struct work_struct *work)
786 {
787         int                     ret;
788         enum evtreturn          evtr;
789         struct xhci_dbc         *dbc;
790         unsigned long           flags;
791
792         dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
793
794         spin_lock_irqsave(&dbc->lock, flags);
795         evtr = xhci_dbc_do_handle_events(dbc);
796         spin_unlock_irqrestore(&dbc->lock, flags);
797
798         switch (evtr) {
799         case EVT_GSER:
800                 ret = xhci_dbc_tty_register_device(dbc);
801                 if (ret) {
802                         dev_err(dbc->dev, "failed to alloc tty device\n");
803                         break;
804                 }
805
806                 dev_info(dbc->dev, "DbC now attached to /dev/ttyDBC0\n");
807                 break;
808         case EVT_DISC:
809                 xhci_dbc_tty_unregister_device(dbc);
810                 break;
811         case EVT_DONE:
812                 break;
813         default:
814                 dev_info(dbc->dev, "stop handling dbc events\n");
815                 return;
816         }
817
818         mod_delayed_work(system_wq, &dbc->event_work, 1);
819 }
820
821 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
822 {
823         unsigned long           flags;
824
825         spin_lock_irqsave(&xhci->lock, flags);
826         kfree(xhci->dbc);
827         xhci->dbc = NULL;
828         spin_unlock_irqrestore(&xhci->lock, flags);
829 }
830
831 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
832 {
833         u32                     reg;
834         struct xhci_dbc         *dbc;
835         unsigned long           flags;
836         void __iomem            *base;
837         int                     dbc_cap_offs;
838
839         base = &xhci->cap_regs->hc_capbase;
840         dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
841         if (!dbc_cap_offs)
842                 return -ENODEV;
843
844         dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
845         if (!dbc)
846                 return -ENOMEM;
847
848         dbc->regs = base + dbc_cap_offs;
849
850         /* We will avoid using DbC in xhci driver if it's in use. */
851         reg = readl(&dbc->regs->control);
852         if (reg & DBC_CTRL_DBC_ENABLE) {
853                 kfree(dbc);
854                 return -EBUSY;
855         }
856
857         spin_lock_irqsave(&xhci->lock, flags);
858         if (xhci->dbc) {
859                 spin_unlock_irqrestore(&xhci->lock, flags);
860                 kfree(dbc);
861                 return -EBUSY;
862         }
863         xhci->dbc = dbc;
864         spin_unlock_irqrestore(&xhci->lock, flags);
865
866         dbc->xhci = xhci;
867         dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
868         INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
869         spin_lock_init(&dbc->lock);
870
871         return 0;
872 }
873
874 static ssize_t dbc_show(struct device *dev,
875                         struct device_attribute *attr,
876                         char *buf)
877 {
878         const char              *p;
879         struct xhci_dbc         *dbc;
880         struct xhci_hcd         *xhci;
881
882         xhci = hcd_to_xhci(dev_get_drvdata(dev));
883         dbc = xhci->dbc;
884
885         switch (dbc->state) {
886         case DS_DISABLED:
887                 p = "disabled";
888                 break;
889         case DS_INITIALIZED:
890                 p = "initialized";
891                 break;
892         case DS_ENABLED:
893                 p = "enabled";
894                 break;
895         case DS_CONNECTED:
896                 p = "connected";
897                 break;
898         case DS_CONFIGURED:
899                 p = "configured";
900                 break;
901         case DS_STALLED:
902                 p = "stalled";
903                 break;
904         default:
905                 p = "unknown";
906         }
907
908         return sprintf(buf, "%s\n", p);
909 }
910
911 static ssize_t dbc_store(struct device *dev,
912                          struct device_attribute *attr,
913                          const char *buf, size_t count)
914 {
915         struct xhci_hcd         *xhci;
916
917         xhci = hcd_to_xhci(dev_get_drvdata(dev));
918
919         if (!strncmp(buf, "enable", 6))
920                 xhci_dbc_start(xhci);
921         else if (!strncmp(buf, "disable", 7))
922                 xhci_dbc_stop(xhci);
923         else
924                 return -EINVAL;
925
926         return count;
927 }
928
929 static DEVICE_ATTR_RW(dbc);
930
931 int xhci_dbc_init(struct xhci_hcd *xhci)
932 {
933         int                     ret;
934         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
935
936         ret = xhci_do_dbc_init(xhci);
937         if (ret)
938                 goto init_err3;
939
940         ret = xhci_dbc_tty_register_driver(xhci);
941         if (ret)
942                 goto init_err2;
943
944         ret = device_create_file(dev, &dev_attr_dbc);
945         if (ret)
946                 goto init_err1;
947
948         return 0;
949
950 init_err1:
951         xhci_dbc_tty_unregister_driver();
952 init_err2:
953         xhci_do_dbc_exit(xhci);
954 init_err3:
955         return ret;
956 }
957
958 void xhci_dbc_exit(struct xhci_hcd *xhci)
959 {
960         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
961
962         if (!xhci->dbc)
963                 return;
964
965         device_remove_file(dev, &dev_attr_dbc);
966         xhci_dbc_tty_unregister_driver();
967         xhci_dbc_stop(xhci);
968         xhci_do_dbc_exit(xhci);
969 }
970
971 #ifdef CONFIG_PM
972 int xhci_dbc_suspend(struct xhci_hcd *xhci)
973 {
974         struct xhci_dbc         *dbc = xhci->dbc;
975
976         if (!dbc)
977                 return 0;
978
979         if (dbc->state == DS_CONFIGURED)
980                 dbc->resume_required = 1;
981
982         xhci_dbc_stop(xhci);
983
984         return 0;
985 }
986
987 int xhci_dbc_resume(struct xhci_hcd *xhci)
988 {
989         int                     ret = 0;
990         struct xhci_dbc         *dbc = xhci->dbc;
991
992         if (!dbc)
993                 return 0;
994
995         if (dbc->resume_required) {
996                 dbc->resume_required = 0;
997                 xhci_dbc_start(xhci);
998         }
999
1000         return ret;
1001 }
1002 #endif /* CONFIG_PM */