1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
5 * Copyright (C) 2017 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
17 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
19 struct usb_string_descriptor *s_desc;
23 s_desc = (struct usb_string_descriptor *)strings->serial;
24 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
25 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
26 DBC_MAX_STRING_LENGTH);
28 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
29 s_desc->bDescriptorType = USB_DT_STRING;
30 string_length = s_desc->bLength;
34 s_desc = (struct usb_string_descriptor *)strings->product;
35 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
36 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
37 DBC_MAX_STRING_LENGTH);
39 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
40 s_desc->bDescriptorType = USB_DT_STRING;
41 string_length += s_desc->bLength;
44 /* Manufacture string: */
45 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
46 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
47 strlen(DBC_STRING_MANUFACTURER),
48 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49 DBC_MAX_STRING_LENGTH);
51 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
52 s_desc->bDescriptorType = USB_DT_STRING;
53 string_length += s_desc->bLength;
57 strings->string0[0] = 4;
58 strings->string0[1] = USB_DT_STRING;
59 strings->string0[2] = 0x09;
60 strings->string0[3] = 0x04;
66 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
68 struct dbc_info_context *info;
69 struct xhci_ep_ctx *ep_ctx;
72 unsigned int max_burst;
77 /* Populate info Context: */
78 info = (struct dbc_info_context *)dbc->ctx->bytes;
79 dma = dbc->string_dma;
80 info->string0 = cpu_to_le64(dma);
81 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
82 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
83 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
84 info->length = cpu_to_le32(string_length);
86 /* Populate bulk out endpoint context: */
87 ep_ctx = dbc_bulkout_ctx(dbc);
88 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
89 deq = dbc_bulkout_enq(dbc);
91 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
92 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
94 /* Populate bulk in endpoint context: */
95 ep_ctx = dbc_bulkin_ctx(dbc);
96 deq = dbc_bulkin_enq(dbc);
98 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
99 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
101 /* Set DbC context and info registers: */
102 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
104 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
105 writel(dev_info, &dbc->regs->devinfo1);
107 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
108 writel(dev_info, &dbc->regs->devinfo2);
111 static void xhci_dbc_giveback(struct dbc_request *req, int status)
112 __releases(&dbc->lock)
113 __acquires(&dbc->lock)
115 struct dbc_ep *dep = req->dep;
116 struct xhci_dbc *dbc = dep->dbc;
117 struct xhci_hcd *xhci = dbc->xhci;
118 struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev;
120 list_del_init(&req->list_pending);
124 if (req->status == -EINPROGRESS)
125 req->status = status;
127 trace_xhci_dbc_giveback_request(req);
129 dma_unmap_single(dev,
132 dbc_ep_dma_direction(dep));
134 /* Give back the transfer request: */
135 spin_unlock(&dbc->lock);
136 req->complete(xhci, req);
137 spin_lock(&dbc->lock);
140 static void xhci_dbc_flush_single_request(struct dbc_request *req)
142 union xhci_trb *trb = req->trb;
144 trb->generic.field[0] = 0;
145 trb->generic.field[1] = 0;
146 trb->generic.field[2] = 0;
147 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
148 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
150 xhci_dbc_giveback(req, -ESHUTDOWN);
153 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
155 struct dbc_request *req, *tmp;
157 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
158 xhci_dbc_flush_single_request(req);
161 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
163 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
164 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
168 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
170 struct dbc_request *req;
172 req = kzalloc(sizeof(*req), gfp_flags);
177 INIT_LIST_HEAD(&req->list_pending);
178 INIT_LIST_HEAD(&req->list_pool);
179 req->direction = dep->direction;
181 trace_xhci_dbc_alloc_request(req);
187 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
189 trace_xhci_dbc_free_request(req);
195 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
196 u32 field2, u32 field3, u32 field4)
198 union xhci_trb *trb, *next;
201 trb->generic.field[0] = cpu_to_le32(field1);
202 trb->generic.field[1] = cpu_to_le32(field2);
203 trb->generic.field[2] = cpu_to_le32(field3);
204 trb->generic.field[3] = cpu_to_le32(field4);
206 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
208 ring->num_trbs_free--;
209 next = ++(ring->enqueue);
210 if (TRB_TYPE_LINK_LE32(next->link.control)) {
211 next->link.control ^= cpu_to_le32(TRB_CYCLE);
212 ring->enqueue = ring->enq_seg->trbs;
213 ring->cycle_state ^= 1;
217 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
218 struct dbc_request *req)
222 unsigned int num_trbs;
223 struct xhci_dbc *dbc = dep->dbc;
224 struct xhci_ring *ring = dep->ring;
225 u32 length, control, cycle;
227 num_trbs = count_trbs(req->dma, req->length);
228 WARN_ON(num_trbs != 1);
229 if (ring->num_trbs_free < num_trbs)
234 cycle = ring->cycle_state;
235 length = TRB_LEN(req->length);
236 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
239 control &= cpu_to_le32(~TRB_CYCLE);
241 control |= cpu_to_le32(TRB_CYCLE);
243 req->trb = ring->enqueue;
244 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
245 xhci_dbc_queue_trb(ring,
251 * Add a barrier between writes of trb fields and flipping
257 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
259 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
261 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
267 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
270 struct xhci_dbc *dbc = dep->dbc;
271 struct device *dev = dbc->dev;
273 if (!req->length || !req->buf)
277 req->status = -EINPROGRESS;
279 req->dma = dma_map_single(dev,
282 dbc_ep_dma_direction(dep));
283 if (dma_mapping_error(dev, req->dma)) {
284 dev_err(dbc->dev, "failed to map buffer\n");
288 ret = xhci_dbc_queue_bulk_tx(dep, req);
290 dev_err(dbc->dev, "failed to queue trbs\n");
291 dma_unmap_single(dev,
294 dbc_ep_dma_direction(dep));
298 list_add_tail(&req->list_pending, &dep->list_pending);
303 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
307 struct xhci_dbc *dbc = dep->dbc;
308 int ret = -ESHUTDOWN;
310 spin_lock_irqsave(&dbc->lock, flags);
311 if (dbc->state == DS_CONFIGURED)
312 ret = dbc_ep_do_queue(dep, req);
313 spin_unlock_irqrestore(&dbc->lock, flags);
315 mod_delayed_work(system_wq, &dbc->event_work, 0);
317 trace_xhci_dbc_queue_request(req);
322 static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction)
325 struct xhci_dbc *dbc = xhci->dbc;
327 dep = &dbc->eps[direction];
329 dep->direction = direction;
330 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
332 INIT_LIST_HEAD(&dep->list_pending);
335 static void xhci_dbc_eps_init(struct xhci_hcd *xhci)
337 xhci_dbc_do_eps_init(xhci, BULK_OUT);
338 xhci_dbc_do_eps_init(xhci, BULK_IN);
341 static void xhci_dbc_eps_exit(struct xhci_hcd *xhci)
343 struct xhci_dbc *dbc = xhci->dbc;
345 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
348 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
349 struct xhci_erst *erst, gfp_t flags)
351 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
352 &erst->erst_dma_addr, flags);
356 erst->num_entries = 1;
357 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
358 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
359 erst->entries[0].rsvd = 0;
363 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
366 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
367 erst->entries, erst->erst_dma_addr);
368 erst->entries = NULL;
371 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
376 struct xhci_dbc *dbc = xhci->dbc;
377 struct device *dev = xhci_to_hcd(xhci)->self.controller;
379 /* Allocate various rings for events and transfers: */
380 dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
384 dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
388 dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
392 /* Allocate and populate ERST: */
393 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
397 /* Allocate context data structure: */
398 dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
402 /* Allocate the string table: */
403 dbc->string_size = sizeof(struct dbc_str_descs);
404 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
405 &dbc->string_dma, flags);
409 /* Setup ERST register: */
410 writel(dbc->erst.erst_size, &dbc->regs->ersts);
412 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
413 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
414 dbc->ring_evt->dequeue);
415 lo_hi_writeq(deq, &dbc->regs->erdp);
417 /* Setup strings and contexts: */
418 string_length = xhci_dbc_populate_strings(dbc->string);
419 xhci_dbc_init_contexts(dbc, string_length);
421 xhci_dbc_eps_init(xhci);
422 dbc->state = DS_INITIALIZED;
427 xhci_free_container_ctx(xhci, dbc->ctx);
430 dbc_erst_free(dev, &dbc->erst);
432 xhci_ring_free(xhci, dbc->ring_out);
433 dbc->ring_out = NULL;
435 xhci_ring_free(xhci, dbc->ring_in);
438 xhci_ring_free(xhci, dbc->ring_evt);
439 dbc->ring_evt = NULL;
444 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
446 struct xhci_dbc *dbc = xhci->dbc;
447 struct device *dev = xhci_to_hcd(xhci)->self.controller;
452 xhci_dbc_eps_exit(xhci);
455 dma_free_coherent(dbc->dev, dbc->string_size,
456 dbc->string, dbc->string_dma);
460 xhci_free_container_ctx(xhci, dbc->ctx);
463 dbc_erst_free(dev, &dbc->erst);
464 xhci_ring_free(xhci, dbc->ring_out);
465 xhci_ring_free(xhci, dbc->ring_in);
466 xhci_ring_free(xhci, dbc->ring_evt);
468 dbc->ring_out = NULL;
469 dbc->ring_evt = NULL;
472 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
476 struct xhci_dbc *dbc = xhci->dbc;
478 if (dbc->state != DS_DISABLED)
481 writel(0, &dbc->regs->control);
482 ret = xhci_handshake(&dbc->regs->control,
488 ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
492 ctrl = readl(&dbc->regs->control);
493 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
494 &dbc->regs->control);
495 ret = xhci_handshake(&dbc->regs->control,
497 DBC_CTRL_DBC_ENABLE, 1000);
501 dbc->state = DS_ENABLED;
506 static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
508 struct xhci_dbc *dbc = xhci->dbc;
510 if (dbc->state == DS_DISABLED)
513 writel(0, &dbc->regs->control);
514 dbc->state = DS_DISABLED;
519 static int xhci_dbc_start(struct xhci_hcd *xhci)
523 struct xhci_dbc *dbc = xhci->dbc;
527 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
529 spin_lock_irqsave(&dbc->lock, flags);
530 ret = xhci_do_dbc_start(xhci);
531 spin_unlock_irqrestore(&dbc->lock, flags);
534 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
538 return mod_delayed_work(system_wq, &dbc->event_work, 1);
541 static void xhci_dbc_stop(struct xhci_hcd *xhci)
545 struct xhci_dbc *dbc = xhci->dbc;
546 struct dbc_port *port = &dbc->port;
550 cancel_delayed_work_sync(&dbc->event_work);
552 if (port->registered)
553 xhci_dbc_tty_unregister_device(xhci);
555 spin_lock_irqsave(&dbc->lock, flags);
556 ret = xhci_do_dbc_stop(xhci);
557 spin_unlock_irqrestore(&dbc->lock, flags);
560 xhci_dbc_mem_cleanup(xhci);
561 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
566 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
570 portsc = readl(&dbc->regs->portsc);
571 if (portsc & DBC_PORTSC_CONN_CHANGE)
572 dev_info(dbc->dev, "DbC port connect change\n");
574 if (portsc & DBC_PORTSC_RESET_CHANGE)
575 dev_info(dbc->dev, "DbC port reset change\n");
577 if (portsc & DBC_PORTSC_LINK_CHANGE)
578 dev_info(dbc->dev, "DbC port link status change\n");
580 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
581 dev_info(dbc->dev, "DbC config error change\n");
583 /* Port reset change bit will be cleared in other place: */
584 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
587 static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event)
590 struct xhci_ring *ring;
594 size_t remain_length;
595 struct dbc_request *req = NULL, *r;
596 struct xhci_dbc *dbc = xhci->dbc;
598 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
599 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
600 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
601 dep = (ep_id == EPID_OUT) ?
602 get_out_ep(xhci) : get_in_ep(xhci);
609 case COMP_SHORT_PACKET:
613 case COMP_BABBLE_DETECTED_ERROR:
614 case COMP_USB_TRANSACTION_ERROR:
615 case COMP_STALL_ERROR:
616 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
620 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
625 /* Match the pending request: */
626 list_for_each_entry(r, &dep->list_pending, list_pending) {
627 if (r->trb_dma == event->trans_event.buffer) {
634 dev_warn(dbc->dev, "no matched request\n");
638 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
640 ring->num_trbs_free++;
641 req->actual = req->length - remain_length;
642 xhci_dbc_giveback(req, status);
645 static void inc_evt_deq(struct xhci_ring *ring)
647 /* If on the last TRB of the segment go back to the beginning */
648 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
649 ring->cycle_state ^= 1;
650 ring->dequeue = ring->deq_seg->trbs;
656 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
662 struct xhci_hcd *xhci = dbc->xhci;
663 bool update_erdp = false;
665 /* DbC state machine: */
666 switch (dbc->state) {
672 portsc = readl(&dbc->regs->portsc);
673 if (portsc & DBC_PORTSC_CONN_STATUS) {
674 dbc->state = DS_CONNECTED;
675 dev_info(dbc->dev, "DbC connected\n");
680 ctrl = readl(&dbc->regs->control);
681 if (ctrl & DBC_CTRL_DBC_RUN) {
682 dbc->state = DS_CONFIGURED;
683 dev_info(dbc->dev, "DbC configured\n");
684 portsc = readl(&dbc->regs->portsc);
685 writel(portsc, &dbc->regs->portsc);
691 /* Handle cable unplug event: */
692 portsc = readl(&dbc->regs->portsc);
693 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
694 !(portsc & DBC_PORTSC_CONN_STATUS)) {
695 dev_info(dbc->dev, "DbC cable unplugged\n");
696 dbc->state = DS_ENABLED;
697 xhci_dbc_flush_requests(dbc);
702 /* Handle debug port reset event: */
703 if (portsc & DBC_PORTSC_RESET_CHANGE) {
704 dev_info(dbc->dev, "DbC port reset\n");
705 writel(portsc, &dbc->regs->portsc);
706 dbc->state = DS_ENABLED;
707 xhci_dbc_flush_requests(dbc);
712 /* Handle endpoint stall event: */
713 ctrl = readl(&dbc->regs->control);
714 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
715 (ctrl & DBC_CTRL_HALT_OUT_TR)) {
716 dev_info(dbc->dev, "DbC Endpoint stall\n");
717 dbc->state = DS_STALLED;
719 if (ctrl & DBC_CTRL_HALT_IN_TR) {
720 dep = get_in_ep(xhci);
721 xhci_dbc_flush_endpoint_requests(dep);
724 if (ctrl & DBC_CTRL_HALT_OUT_TR) {
725 dep = get_out_ep(xhci);
726 xhci_dbc_flush_endpoint_requests(dep);
732 /* Clear DbC run change bit: */
733 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
734 writel(ctrl, &dbc->regs->control);
735 ctrl = readl(&dbc->regs->control);
740 ctrl = readl(&dbc->regs->control);
741 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
742 !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
743 (ctrl & DBC_CTRL_DBC_RUN)) {
744 dbc->state = DS_CONFIGURED;
750 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
754 /* Handle the events in the event ring: */
755 evt = dbc->ring_evt->dequeue;
756 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
757 dbc->ring_evt->cycle_state) {
759 * Add a barrier between reading the cycle flag and any
760 * reads of the event's flags/data below:
764 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
766 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
767 case TRB_TYPE(TRB_PORT_STATUS):
768 dbc_handle_port_status(dbc, evt);
770 case TRB_TYPE(TRB_TRANSFER):
771 dbc_handle_xfer_event(xhci, evt);
777 inc_evt_deq(dbc->ring_evt);
779 evt = dbc->ring_evt->dequeue;
783 /* Update event ring dequeue pointer: */
785 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
786 dbc->ring_evt->dequeue);
787 lo_hi_writeq(deq, &dbc->regs->erdp);
793 static void xhci_dbc_handle_events(struct work_struct *work)
797 struct xhci_dbc *dbc;
799 struct xhci_hcd *xhci;
801 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
804 spin_lock_irqsave(&dbc->lock, flags);
805 evtr = xhci_dbc_do_handle_events(dbc);
806 spin_unlock_irqrestore(&dbc->lock, flags);
810 ret = xhci_dbc_tty_register_device(xhci);
812 dev_err(dbc->dev, "failed to alloc tty device\n");
816 dev_info(dbc->dev, "DbC now attached to /dev/ttyDBC0\n");
819 xhci_dbc_tty_unregister_device(xhci);
824 dev_info(dbc->dev, "stop handling dbc events\n");
828 mod_delayed_work(system_wq, &dbc->event_work, 1);
831 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
835 spin_lock_irqsave(&xhci->lock, flags);
838 spin_unlock_irqrestore(&xhci->lock, flags);
841 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
844 struct xhci_dbc *dbc;
849 base = &xhci->cap_regs->hc_capbase;
850 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
854 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
858 dbc->regs = base + dbc_cap_offs;
860 /* We will avoid using DbC in xhci driver if it's in use. */
861 reg = readl(&dbc->regs->control);
862 if (reg & DBC_CTRL_DBC_ENABLE) {
867 spin_lock_irqsave(&xhci->lock, flags);
869 spin_unlock_irqrestore(&xhci->lock, flags);
874 spin_unlock_irqrestore(&xhci->lock, flags);
877 dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
878 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
879 spin_lock_init(&dbc->lock);
884 static ssize_t dbc_show(struct device *dev,
885 struct device_attribute *attr,
889 struct xhci_dbc *dbc;
890 struct xhci_hcd *xhci;
892 xhci = hcd_to_xhci(dev_get_drvdata(dev));
895 switch (dbc->state) {
918 return sprintf(buf, "%s\n", p);
921 static ssize_t dbc_store(struct device *dev,
922 struct device_attribute *attr,
923 const char *buf, size_t count)
925 struct xhci_hcd *xhci;
927 xhci = hcd_to_xhci(dev_get_drvdata(dev));
929 if (!strncmp(buf, "enable", 6))
930 xhci_dbc_start(xhci);
931 else if (!strncmp(buf, "disable", 7))
939 static DEVICE_ATTR_RW(dbc);
941 int xhci_dbc_init(struct xhci_hcd *xhci)
944 struct device *dev = xhci_to_hcd(xhci)->self.controller;
946 ret = xhci_do_dbc_init(xhci);
950 ret = xhci_dbc_tty_register_driver(xhci);
954 ret = device_create_file(dev, &dev_attr_dbc);
961 xhci_dbc_tty_unregister_driver();
963 xhci_do_dbc_exit(xhci);
968 void xhci_dbc_exit(struct xhci_hcd *xhci)
970 struct device *dev = xhci_to_hcd(xhci)->self.controller;
975 device_remove_file(dev, &dev_attr_dbc);
976 xhci_dbc_tty_unregister_driver();
978 xhci_do_dbc_exit(xhci);
982 int xhci_dbc_suspend(struct xhci_hcd *xhci)
984 struct xhci_dbc *dbc = xhci->dbc;
989 if (dbc->state == DS_CONFIGURED)
990 dbc->resume_required = 1;
997 int xhci_dbc_resume(struct xhci_hcd *xhci)
1000 struct xhci_dbc *dbc = xhci->dbc;
1005 if (dbc->resume_required) {
1006 dbc->resume_required = 0;
1007 xhci_dbc_start(xhci);
1012 #endif /* CONFIG_PM */