1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
5 * Copyright (C) 2017 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
17 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
21 dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
25 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
27 struct usb_string_descriptor *s_desc;
31 s_desc = (struct usb_string_descriptor *)strings->serial;
32 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
33 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
34 DBC_MAX_STRING_LENGTH);
36 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
37 s_desc->bDescriptorType = USB_DT_STRING;
38 string_length = s_desc->bLength;
42 s_desc = (struct usb_string_descriptor *)strings->product;
43 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
44 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
45 DBC_MAX_STRING_LENGTH);
47 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
48 s_desc->bDescriptorType = USB_DT_STRING;
49 string_length += s_desc->bLength;
52 /* Manufacture string: */
53 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
54 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
55 strlen(DBC_STRING_MANUFACTURER),
56 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
57 DBC_MAX_STRING_LENGTH);
59 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
60 s_desc->bDescriptorType = USB_DT_STRING;
61 string_length += s_desc->bLength;
65 strings->string0[0] = 4;
66 strings->string0[1] = USB_DT_STRING;
67 strings->string0[2] = 0x09;
68 strings->string0[3] = 0x04;
74 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
76 struct dbc_info_context *info;
77 struct xhci_ep_ctx *ep_ctx;
80 unsigned int max_burst;
85 /* Populate info Context: */
86 info = (struct dbc_info_context *)dbc->ctx->bytes;
87 dma = dbc->string_dma;
88 info->string0 = cpu_to_le64(dma);
89 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
90 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
91 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
92 info->length = cpu_to_le32(string_length);
94 /* Populate bulk out endpoint context: */
95 ep_ctx = dbc_bulkout_ctx(dbc);
96 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
97 deq = dbc_bulkout_enq(dbc);
99 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
100 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
102 /* Populate bulk in endpoint context: */
103 ep_ctx = dbc_bulkin_ctx(dbc);
104 deq = dbc_bulkin_enq(dbc);
106 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
107 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
109 /* Set DbC context and info registers: */
110 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
112 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
113 writel(dev_info, &dbc->regs->devinfo1);
115 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
116 writel(dev_info, &dbc->regs->devinfo2);
119 static void xhci_dbc_giveback(struct dbc_request *req, int status)
120 __releases(&dbc->lock)
121 __acquires(&dbc->lock)
123 struct dbc_ep *dep = req->dep;
124 struct xhci_dbc *dbc = dep->dbc;
125 struct device *dev = dbc->dev;
127 list_del_init(&req->list_pending);
131 if (req->status == -EINPROGRESS)
132 req->status = status;
134 trace_xhci_dbc_giveback_request(req);
136 dma_unmap_single(dev,
139 dbc_ep_dma_direction(dep));
141 /* Give back the transfer request: */
142 spin_unlock(&dbc->lock);
143 req->complete(dbc, req);
144 spin_lock(&dbc->lock);
147 static void xhci_dbc_flush_single_request(struct dbc_request *req)
149 union xhci_trb *trb = req->trb;
151 trb->generic.field[0] = 0;
152 trb->generic.field[1] = 0;
153 trb->generic.field[2] = 0;
154 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
155 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
157 xhci_dbc_giveback(req, -ESHUTDOWN);
160 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
162 struct dbc_request *req, *tmp;
164 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
165 xhci_dbc_flush_single_request(req);
168 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
170 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
171 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
175 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
177 struct dbc_request *req;
179 req = kzalloc(sizeof(*req), gfp_flags);
184 INIT_LIST_HEAD(&req->list_pending);
185 INIT_LIST_HEAD(&req->list_pool);
186 req->direction = dep->direction;
188 trace_xhci_dbc_alloc_request(req);
194 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
196 trace_xhci_dbc_free_request(req);
202 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
203 u32 field2, u32 field3, u32 field4)
205 union xhci_trb *trb, *next;
208 trb->generic.field[0] = cpu_to_le32(field1);
209 trb->generic.field[1] = cpu_to_le32(field2);
210 trb->generic.field[2] = cpu_to_le32(field3);
211 trb->generic.field[3] = cpu_to_le32(field4);
213 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
215 ring->num_trbs_free--;
216 next = ++(ring->enqueue);
217 if (TRB_TYPE_LINK_LE32(next->link.control)) {
218 next->link.control ^= cpu_to_le32(TRB_CYCLE);
219 ring->enqueue = ring->enq_seg->trbs;
220 ring->cycle_state ^= 1;
224 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
225 struct dbc_request *req)
229 unsigned int num_trbs;
230 struct xhci_dbc *dbc = dep->dbc;
231 struct xhci_ring *ring = dep->ring;
232 u32 length, control, cycle;
234 num_trbs = count_trbs(req->dma, req->length);
235 WARN_ON(num_trbs != 1);
236 if (ring->num_trbs_free < num_trbs)
241 cycle = ring->cycle_state;
242 length = TRB_LEN(req->length);
243 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
246 control &= cpu_to_le32(~TRB_CYCLE);
248 control |= cpu_to_le32(TRB_CYCLE);
250 req->trb = ring->enqueue;
251 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
252 xhci_dbc_queue_trb(ring,
258 * Add a barrier between writes of trb fields and flipping
264 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
266 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
268 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
274 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
277 struct xhci_dbc *dbc = dep->dbc;
278 struct device *dev = dbc->dev;
280 if (!req->length || !req->buf)
284 req->status = -EINPROGRESS;
286 req->dma = dma_map_single(dev,
289 dbc_ep_dma_direction(dep));
290 if (dma_mapping_error(dev, req->dma)) {
291 dev_err(dbc->dev, "failed to map buffer\n");
295 ret = xhci_dbc_queue_bulk_tx(dep, req);
297 dev_err(dbc->dev, "failed to queue trbs\n");
298 dma_unmap_single(dev,
301 dbc_ep_dma_direction(dep));
305 list_add_tail(&req->list_pending, &dep->list_pending);
310 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
314 struct xhci_dbc *dbc = dep->dbc;
315 int ret = -ESHUTDOWN;
317 spin_lock_irqsave(&dbc->lock, flags);
318 if (dbc->state == DS_CONFIGURED)
319 ret = dbc_ep_do_queue(dep, req);
320 spin_unlock_irqrestore(&dbc->lock, flags);
322 mod_delayed_work(system_wq, &dbc->event_work, 0);
324 trace_xhci_dbc_queue_request(req);
329 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
333 dep = &dbc->eps[direction];
335 dep->direction = direction;
336 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
338 INIT_LIST_HEAD(&dep->list_pending);
341 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
343 xhci_dbc_do_eps_init(dbc, BULK_OUT);
344 xhci_dbc_do_eps_init(dbc, BULK_IN);
347 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
349 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
352 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
353 struct xhci_erst *erst, gfp_t flags)
355 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
356 &erst->erst_dma_addr, flags);
360 erst->num_entries = 1;
361 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
362 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
363 erst->entries[0].rsvd = 0;
367 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
370 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
371 erst->entries, erst->erst_dma_addr);
372 erst->entries = NULL;
375 static struct xhci_container_ctx *
376 dbc_alloc_ctx(struct device *dev, gfp_t flags)
378 struct xhci_container_ctx *ctx;
380 ctx = kzalloc(sizeof(*ctx), flags);
384 /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
385 ctx->size = 3 * DBC_CONTEXT_SIZE;
386 ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
394 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
399 struct xhci_dbc *dbc = xhci->dbc;
400 struct device *dev = xhci_to_hcd(xhci)->self.controller;
402 /* Allocate various rings for events and transfers: */
403 dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
407 dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
411 dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
415 /* Allocate and populate ERST: */
416 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
420 /* Allocate context data structure: */
421 dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
425 /* Allocate the string table: */
426 dbc->string_size = sizeof(struct dbc_str_descs);
427 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
428 &dbc->string_dma, flags);
432 /* Setup ERST register: */
433 writel(dbc->erst.erst_size, &dbc->regs->ersts);
435 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
436 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
437 dbc->ring_evt->dequeue);
438 lo_hi_writeq(deq, &dbc->regs->erdp);
440 /* Setup strings and contexts: */
441 string_length = xhci_dbc_populate_strings(dbc->string);
442 xhci_dbc_init_contexts(dbc, string_length);
444 xhci_dbc_eps_init(dbc);
445 dbc->state = DS_INITIALIZED;
450 dbc_free_ctx(dev, dbc->ctx);
453 dbc_erst_free(dev, &dbc->erst);
455 xhci_ring_free(xhci, dbc->ring_out);
456 dbc->ring_out = NULL;
458 xhci_ring_free(xhci, dbc->ring_in);
461 xhci_ring_free(xhci, dbc->ring_evt);
462 dbc->ring_evt = NULL;
467 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
469 struct xhci_dbc *dbc = xhci->dbc;
470 struct device *dev = xhci_to_hcd(xhci)->self.controller;
475 xhci_dbc_eps_exit(dbc);
478 dma_free_coherent(dbc->dev, dbc->string_size,
479 dbc->string, dbc->string_dma);
483 dbc_free_ctx(dbc->dev, dbc->ctx);
486 dbc_erst_free(dev, &dbc->erst);
487 xhci_ring_free(xhci, dbc->ring_out);
488 xhci_ring_free(xhci, dbc->ring_in);
489 xhci_ring_free(xhci, dbc->ring_evt);
491 dbc->ring_out = NULL;
492 dbc->ring_evt = NULL;
495 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
499 struct xhci_dbc *dbc = xhci->dbc;
501 if (dbc->state != DS_DISABLED)
504 writel(0, &dbc->regs->control);
505 ret = xhci_handshake(&dbc->regs->control,
511 ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
515 ctrl = readl(&dbc->regs->control);
516 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
517 &dbc->regs->control);
518 ret = xhci_handshake(&dbc->regs->control,
520 DBC_CTRL_DBC_ENABLE, 1000);
524 dbc->state = DS_ENABLED;
529 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
531 if (dbc->state == DS_DISABLED)
534 writel(0, &dbc->regs->control);
535 dbc->state = DS_DISABLED;
540 static int xhci_dbc_start(struct xhci_hcd *xhci)
544 struct xhci_dbc *dbc = xhci->dbc;
548 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
550 spin_lock_irqsave(&dbc->lock, flags);
551 ret = xhci_do_dbc_start(xhci);
552 spin_unlock_irqrestore(&dbc->lock, flags);
555 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
559 return mod_delayed_work(system_wq, &dbc->event_work, 1);
562 static void xhci_dbc_stop(struct xhci_hcd *xhci)
566 struct xhci_dbc *dbc = xhci->dbc;
567 struct dbc_port *port = &dbc->port;
571 cancel_delayed_work_sync(&dbc->event_work);
573 if (port->registered)
574 xhci_dbc_tty_unregister_device(dbc);
576 spin_lock_irqsave(&dbc->lock, flags);
577 ret = xhci_do_dbc_stop(dbc);
578 spin_unlock_irqrestore(&dbc->lock, flags);
581 xhci_dbc_mem_cleanup(xhci);
582 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
587 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
591 portsc = readl(&dbc->regs->portsc);
592 if (portsc & DBC_PORTSC_CONN_CHANGE)
593 dev_info(dbc->dev, "DbC port connect change\n");
595 if (portsc & DBC_PORTSC_RESET_CHANGE)
596 dev_info(dbc->dev, "DbC port reset change\n");
598 if (portsc & DBC_PORTSC_LINK_CHANGE)
599 dev_info(dbc->dev, "DbC port link status change\n");
601 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
602 dev_info(dbc->dev, "DbC config error change\n");
604 /* Port reset change bit will be cleared in other place: */
605 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
608 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
611 struct xhci_ring *ring;
615 size_t remain_length;
616 struct dbc_request *req = NULL, *r;
618 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
619 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
620 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
621 dep = (ep_id == EPID_OUT) ?
622 get_out_ep(dbc) : get_in_ep(dbc);
629 case COMP_SHORT_PACKET:
633 case COMP_BABBLE_DETECTED_ERROR:
634 case COMP_USB_TRANSACTION_ERROR:
635 case COMP_STALL_ERROR:
636 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
640 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
645 /* Match the pending request: */
646 list_for_each_entry(r, &dep->list_pending, list_pending) {
647 if (r->trb_dma == event->trans_event.buffer) {
654 dev_warn(dbc->dev, "no matched request\n");
658 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
660 ring->num_trbs_free++;
661 req->actual = req->length - remain_length;
662 xhci_dbc_giveback(req, status);
665 static void inc_evt_deq(struct xhci_ring *ring)
667 /* If on the last TRB of the segment go back to the beginning */
668 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
669 ring->cycle_state ^= 1;
670 ring->dequeue = ring->deq_seg->trbs;
676 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
682 bool update_erdp = false;
684 /* DbC state machine: */
685 switch (dbc->state) {
691 portsc = readl(&dbc->regs->portsc);
692 if (portsc & DBC_PORTSC_CONN_STATUS) {
693 dbc->state = DS_CONNECTED;
694 dev_info(dbc->dev, "DbC connected\n");
699 ctrl = readl(&dbc->regs->control);
700 if (ctrl & DBC_CTRL_DBC_RUN) {
701 dbc->state = DS_CONFIGURED;
702 dev_info(dbc->dev, "DbC configured\n");
703 portsc = readl(&dbc->regs->portsc);
704 writel(portsc, &dbc->regs->portsc);
710 /* Handle cable unplug event: */
711 portsc = readl(&dbc->regs->portsc);
712 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
713 !(portsc & DBC_PORTSC_CONN_STATUS)) {
714 dev_info(dbc->dev, "DbC cable unplugged\n");
715 dbc->state = DS_ENABLED;
716 xhci_dbc_flush_requests(dbc);
721 /* Handle debug port reset event: */
722 if (portsc & DBC_PORTSC_RESET_CHANGE) {
723 dev_info(dbc->dev, "DbC port reset\n");
724 writel(portsc, &dbc->regs->portsc);
725 dbc->state = DS_ENABLED;
726 xhci_dbc_flush_requests(dbc);
731 /* Handle endpoint stall event: */
732 ctrl = readl(&dbc->regs->control);
733 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
734 (ctrl & DBC_CTRL_HALT_OUT_TR)) {
735 dev_info(dbc->dev, "DbC Endpoint stall\n");
736 dbc->state = DS_STALLED;
738 if (ctrl & DBC_CTRL_HALT_IN_TR) {
739 dep = get_in_ep(dbc);
740 xhci_dbc_flush_endpoint_requests(dep);
743 if (ctrl & DBC_CTRL_HALT_OUT_TR) {
744 dep = get_out_ep(dbc);
745 xhci_dbc_flush_endpoint_requests(dep);
751 /* Clear DbC run change bit: */
752 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
753 writel(ctrl, &dbc->regs->control);
754 ctrl = readl(&dbc->regs->control);
759 ctrl = readl(&dbc->regs->control);
760 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
761 !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
762 (ctrl & DBC_CTRL_DBC_RUN)) {
763 dbc->state = DS_CONFIGURED;
769 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
773 /* Handle the events in the event ring: */
774 evt = dbc->ring_evt->dequeue;
775 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
776 dbc->ring_evt->cycle_state) {
778 * Add a barrier between reading the cycle flag and any
779 * reads of the event's flags/data below:
783 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
785 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
786 case TRB_TYPE(TRB_PORT_STATUS):
787 dbc_handle_port_status(dbc, evt);
789 case TRB_TYPE(TRB_TRANSFER):
790 dbc_handle_xfer_event(dbc, evt);
796 inc_evt_deq(dbc->ring_evt);
798 evt = dbc->ring_evt->dequeue;
802 /* Update event ring dequeue pointer: */
804 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
805 dbc->ring_evt->dequeue);
806 lo_hi_writeq(deq, &dbc->regs->erdp);
812 static void xhci_dbc_handle_events(struct work_struct *work)
816 struct xhci_dbc *dbc;
819 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
821 spin_lock_irqsave(&dbc->lock, flags);
822 evtr = xhci_dbc_do_handle_events(dbc);
823 spin_unlock_irqrestore(&dbc->lock, flags);
827 ret = xhci_dbc_tty_register_device(dbc);
829 dev_err(dbc->dev, "failed to alloc tty device\n");
833 dev_info(dbc->dev, "DbC now attached to /dev/ttyDBC0\n");
836 xhci_dbc_tty_unregister_device(dbc);
841 dev_info(dbc->dev, "stop handling dbc events\n");
845 mod_delayed_work(system_wq, &dbc->event_work, 1);
848 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
852 spin_lock_irqsave(&xhci->lock, flags);
855 spin_unlock_irqrestore(&xhci->lock, flags);
858 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
861 struct xhci_dbc *dbc;
866 base = &xhci->cap_regs->hc_capbase;
867 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
871 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
875 dbc->regs = base + dbc_cap_offs;
877 /* We will avoid using DbC in xhci driver if it's in use. */
878 reg = readl(&dbc->regs->control);
879 if (reg & DBC_CTRL_DBC_ENABLE) {
884 spin_lock_irqsave(&xhci->lock, flags);
886 spin_unlock_irqrestore(&xhci->lock, flags);
891 spin_unlock_irqrestore(&xhci->lock, flags);
894 dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
895 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
896 spin_lock_init(&dbc->lock);
901 static ssize_t dbc_show(struct device *dev,
902 struct device_attribute *attr,
906 struct xhci_dbc *dbc;
907 struct xhci_hcd *xhci;
909 xhci = hcd_to_xhci(dev_get_drvdata(dev));
912 switch (dbc->state) {
935 return sprintf(buf, "%s\n", p);
938 static ssize_t dbc_store(struct device *dev,
939 struct device_attribute *attr,
940 const char *buf, size_t count)
942 struct xhci_hcd *xhci;
944 xhci = hcd_to_xhci(dev_get_drvdata(dev));
946 if (!strncmp(buf, "enable", 6))
947 xhci_dbc_start(xhci);
948 else if (!strncmp(buf, "disable", 7))
956 static DEVICE_ATTR_RW(dbc);
958 int xhci_dbc_init(struct xhci_hcd *xhci)
961 struct device *dev = xhci_to_hcd(xhci)->self.controller;
963 ret = xhci_do_dbc_init(xhci);
967 ret = xhci_dbc_tty_register_driver(xhci);
971 ret = device_create_file(dev, &dev_attr_dbc);
978 xhci_dbc_tty_unregister_driver();
980 xhci_do_dbc_exit(xhci);
985 void xhci_dbc_exit(struct xhci_hcd *xhci)
987 struct device *dev = xhci_to_hcd(xhci)->self.controller;
992 device_remove_file(dev, &dev_attr_dbc);
993 xhci_dbc_tty_unregister_driver();
995 xhci_do_dbc_exit(xhci);
999 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1001 struct xhci_dbc *dbc = xhci->dbc;
1006 if (dbc->state == DS_CONFIGURED)
1007 dbc->resume_required = 1;
1009 xhci_dbc_stop(xhci);
1014 int xhci_dbc_resume(struct xhci_hcd *xhci)
1017 struct xhci_dbc *dbc = xhci->dbc;
1022 if (dbc->resume_required) {
1023 dbc->resume_required = 0;
1024 xhci_dbc_start(xhci);
1029 #endif /* CONFIG_PM */