1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbgcap.c - xHCI debug capability support
5 * Copyright (C) 2017 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
17 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
19 struct usb_string_descriptor *s_desc;
23 s_desc = (struct usb_string_descriptor *)strings->serial;
24 utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
25 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
26 DBC_MAX_STRING_LENGTH);
28 s_desc->bLength = (strlen(DBC_STRING_SERIAL) + 1) * 2;
29 s_desc->bDescriptorType = USB_DT_STRING;
30 string_length = s_desc->bLength;
34 s_desc = (struct usb_string_descriptor *)strings->product;
35 utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
36 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
37 DBC_MAX_STRING_LENGTH);
39 s_desc->bLength = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
40 s_desc->bDescriptorType = USB_DT_STRING;
41 string_length += s_desc->bLength;
44 /* Manufacture string: */
45 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
46 utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
47 strlen(DBC_STRING_MANUFACTURER),
48 UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49 DBC_MAX_STRING_LENGTH);
51 s_desc->bLength = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
52 s_desc->bDescriptorType = USB_DT_STRING;
53 string_length += s_desc->bLength;
57 strings->string0[0] = 4;
58 strings->string0[1] = USB_DT_STRING;
59 strings->string0[2] = 0x09;
60 strings->string0[3] = 0x04;
66 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
68 struct dbc_info_context *info;
69 struct xhci_ep_ctx *ep_ctx;
72 unsigned int max_burst;
77 /* Populate info Context: */
78 info = (struct dbc_info_context *)dbc->ctx->bytes;
79 dma = dbc->string_dma;
80 info->string0 = cpu_to_le64(dma);
81 info->manufacturer = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
82 info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
83 info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
84 info->length = cpu_to_le32(string_length);
86 /* Populate bulk out endpoint context: */
87 ep_ctx = dbc_bulkout_ctx(dbc);
88 max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
89 deq = dbc_bulkout_enq(dbc);
91 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
92 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
94 /* Populate bulk in endpoint context: */
95 ep_ctx = dbc_bulkin_ctx(dbc);
96 deq = dbc_bulkin_enq(dbc);
98 ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
99 ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
101 /* Set DbC context and info registers: */
102 lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
104 dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
105 writel(dev_info, &dbc->regs->devinfo1);
107 dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
108 writel(dev_info, &dbc->regs->devinfo2);
111 static void xhci_dbc_giveback(struct dbc_request *req, int status)
112 __releases(&dbc->lock)
113 __acquires(&dbc->lock)
115 struct dbc_ep *dep = req->dep;
116 struct xhci_dbc *dbc = dep->dbc;
117 struct device *dev = dbc->dev;
119 list_del_init(&req->list_pending);
123 if (req->status == -EINPROGRESS)
124 req->status = status;
126 trace_xhci_dbc_giveback_request(req);
128 dma_unmap_single(dev,
131 dbc_ep_dma_direction(dep));
133 /* Give back the transfer request: */
134 spin_unlock(&dbc->lock);
135 req->complete(dbc, req);
136 spin_lock(&dbc->lock);
139 static void xhci_dbc_flush_single_request(struct dbc_request *req)
141 union xhci_trb *trb = req->trb;
143 trb->generic.field[0] = 0;
144 trb->generic.field[1] = 0;
145 trb->generic.field[2] = 0;
146 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
147 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
149 xhci_dbc_giveback(req, -ESHUTDOWN);
152 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
154 struct dbc_request *req, *tmp;
156 list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
157 xhci_dbc_flush_single_request(req);
160 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
162 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
163 xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
167 dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags)
169 struct dbc_request *req;
171 req = kzalloc(sizeof(*req), gfp_flags);
176 INIT_LIST_HEAD(&req->list_pending);
177 INIT_LIST_HEAD(&req->list_pool);
178 req->direction = dep->direction;
180 trace_xhci_dbc_alloc_request(req);
186 dbc_free_request(struct dbc_ep *dep, struct dbc_request *req)
188 trace_xhci_dbc_free_request(req);
194 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
195 u32 field2, u32 field3, u32 field4)
197 union xhci_trb *trb, *next;
200 trb->generic.field[0] = cpu_to_le32(field1);
201 trb->generic.field[1] = cpu_to_le32(field2);
202 trb->generic.field[2] = cpu_to_le32(field3);
203 trb->generic.field[3] = cpu_to_le32(field4);
205 trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
207 ring->num_trbs_free--;
208 next = ++(ring->enqueue);
209 if (TRB_TYPE_LINK_LE32(next->link.control)) {
210 next->link.control ^= cpu_to_le32(TRB_CYCLE);
211 ring->enqueue = ring->enq_seg->trbs;
212 ring->cycle_state ^= 1;
216 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
217 struct dbc_request *req)
221 unsigned int num_trbs;
222 struct xhci_dbc *dbc = dep->dbc;
223 struct xhci_ring *ring = dep->ring;
224 u32 length, control, cycle;
226 num_trbs = count_trbs(req->dma, req->length);
227 WARN_ON(num_trbs != 1);
228 if (ring->num_trbs_free < num_trbs)
233 cycle = ring->cycle_state;
234 length = TRB_LEN(req->length);
235 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
238 control &= cpu_to_le32(~TRB_CYCLE);
240 control |= cpu_to_le32(TRB_CYCLE);
242 req->trb = ring->enqueue;
243 req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
244 xhci_dbc_queue_trb(ring,
250 * Add a barrier between writes of trb fields and flipping
256 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
258 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
260 writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
266 dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
269 struct xhci_dbc *dbc = dep->dbc;
270 struct device *dev = dbc->dev;
272 if (!req->length || !req->buf)
276 req->status = -EINPROGRESS;
278 req->dma = dma_map_single(dev,
281 dbc_ep_dma_direction(dep));
282 if (dma_mapping_error(dev, req->dma)) {
283 dev_err(dbc->dev, "failed to map buffer\n");
287 ret = xhci_dbc_queue_bulk_tx(dep, req);
289 dev_err(dbc->dev, "failed to queue trbs\n");
290 dma_unmap_single(dev,
293 dbc_ep_dma_direction(dep));
297 list_add_tail(&req->list_pending, &dep->list_pending);
302 int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
306 struct xhci_dbc *dbc = dep->dbc;
307 int ret = -ESHUTDOWN;
309 spin_lock_irqsave(&dbc->lock, flags);
310 if (dbc->state == DS_CONFIGURED)
311 ret = dbc_ep_do_queue(dep, req);
312 spin_unlock_irqrestore(&dbc->lock, flags);
314 mod_delayed_work(system_wq, &dbc->event_work, 0);
316 trace_xhci_dbc_queue_request(req);
321 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
325 dep = &dbc->eps[direction];
327 dep->direction = direction;
328 dep->ring = direction ? dbc->ring_in : dbc->ring_out;
330 INIT_LIST_HEAD(&dep->list_pending);
333 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
335 xhci_dbc_do_eps_init(dbc, BULK_OUT);
336 xhci_dbc_do_eps_init(dbc, BULK_IN);
339 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
341 memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
344 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
345 struct xhci_erst *erst, gfp_t flags)
347 erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
348 &erst->erst_dma_addr, flags);
352 erst->num_entries = 1;
353 erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
354 erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
355 erst->entries[0].rsvd = 0;
359 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
362 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
363 erst->entries, erst->erst_dma_addr);
364 erst->entries = NULL;
367 static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags)
372 struct xhci_dbc *dbc = xhci->dbc;
373 struct device *dev = xhci_to_hcd(xhci)->self.controller;
375 /* Allocate various rings for events and transfers: */
376 dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags);
380 dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
384 dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags);
388 /* Allocate and populate ERST: */
389 ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
393 /* Allocate context data structure: */
394 dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
398 /* Allocate the string table: */
399 dbc->string_size = sizeof(struct dbc_str_descs);
400 dbc->string = dma_alloc_coherent(dev, dbc->string_size,
401 &dbc->string_dma, flags);
405 /* Setup ERST register: */
406 writel(dbc->erst.erst_size, &dbc->regs->ersts);
408 lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
409 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
410 dbc->ring_evt->dequeue);
411 lo_hi_writeq(deq, &dbc->regs->erdp);
413 /* Setup strings and contexts: */
414 string_length = xhci_dbc_populate_strings(dbc->string);
415 xhci_dbc_init_contexts(dbc, string_length);
417 xhci_dbc_eps_init(dbc);
418 dbc->state = DS_INITIALIZED;
423 xhci_free_container_ctx(xhci, dbc->ctx);
426 dbc_erst_free(dev, &dbc->erst);
428 xhci_ring_free(xhci, dbc->ring_out);
429 dbc->ring_out = NULL;
431 xhci_ring_free(xhci, dbc->ring_in);
434 xhci_ring_free(xhci, dbc->ring_evt);
435 dbc->ring_evt = NULL;
440 static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci)
442 struct xhci_dbc *dbc = xhci->dbc;
443 struct device *dev = xhci_to_hcd(xhci)->self.controller;
448 xhci_dbc_eps_exit(dbc);
451 dma_free_coherent(dbc->dev, dbc->string_size,
452 dbc->string, dbc->string_dma);
456 xhci_free_container_ctx(xhci, dbc->ctx);
459 dbc_erst_free(dev, &dbc->erst);
460 xhci_ring_free(xhci, dbc->ring_out);
461 xhci_ring_free(xhci, dbc->ring_in);
462 xhci_ring_free(xhci, dbc->ring_evt);
464 dbc->ring_out = NULL;
465 dbc->ring_evt = NULL;
468 static int xhci_do_dbc_start(struct xhci_hcd *xhci)
472 struct xhci_dbc *dbc = xhci->dbc;
474 if (dbc->state != DS_DISABLED)
477 writel(0, &dbc->regs->control);
478 ret = xhci_handshake(&dbc->regs->control,
484 ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC);
488 ctrl = readl(&dbc->regs->control);
489 writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
490 &dbc->regs->control);
491 ret = xhci_handshake(&dbc->regs->control,
493 DBC_CTRL_DBC_ENABLE, 1000);
497 dbc->state = DS_ENABLED;
502 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
504 if (dbc->state == DS_DISABLED)
507 writel(0, &dbc->regs->control);
508 dbc->state = DS_DISABLED;
513 static int xhci_dbc_start(struct xhci_hcd *xhci)
517 struct xhci_dbc *dbc = xhci->dbc;
521 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
523 spin_lock_irqsave(&dbc->lock, flags);
524 ret = xhci_do_dbc_start(xhci);
525 spin_unlock_irqrestore(&dbc->lock, flags);
528 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
532 return mod_delayed_work(system_wq, &dbc->event_work, 1);
535 static void xhci_dbc_stop(struct xhci_hcd *xhci)
539 struct xhci_dbc *dbc = xhci->dbc;
540 struct dbc_port *port = &dbc->port;
544 cancel_delayed_work_sync(&dbc->event_work);
546 if (port->registered)
547 xhci_dbc_tty_unregister_device(dbc);
549 spin_lock_irqsave(&dbc->lock, flags);
550 ret = xhci_do_dbc_stop(dbc);
551 spin_unlock_irqrestore(&dbc->lock, flags);
554 xhci_dbc_mem_cleanup(xhci);
555 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
560 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
564 portsc = readl(&dbc->regs->portsc);
565 if (portsc & DBC_PORTSC_CONN_CHANGE)
566 dev_info(dbc->dev, "DbC port connect change\n");
568 if (portsc & DBC_PORTSC_RESET_CHANGE)
569 dev_info(dbc->dev, "DbC port reset change\n");
571 if (portsc & DBC_PORTSC_LINK_CHANGE)
572 dev_info(dbc->dev, "DbC port link status change\n");
574 if (portsc & DBC_PORTSC_CONFIG_CHANGE)
575 dev_info(dbc->dev, "DbC config error change\n");
577 /* Port reset change bit will be cleared in other place: */
578 writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
581 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
584 struct xhci_ring *ring;
588 size_t remain_length;
589 struct dbc_request *req = NULL, *r;
591 comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
592 remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
593 ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
594 dep = (ep_id == EPID_OUT) ?
595 get_out_ep(dbc) : get_in_ep(dbc);
602 case COMP_SHORT_PACKET:
606 case COMP_BABBLE_DETECTED_ERROR:
607 case COMP_USB_TRANSACTION_ERROR:
608 case COMP_STALL_ERROR:
609 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
613 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
618 /* Match the pending request: */
619 list_for_each_entry(r, &dep->list_pending, list_pending) {
620 if (r->trb_dma == event->trans_event.buffer) {
627 dev_warn(dbc->dev, "no matched request\n");
631 trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
633 ring->num_trbs_free++;
634 req->actual = req->length - remain_length;
635 xhci_dbc_giveback(req, status);
638 static void inc_evt_deq(struct xhci_ring *ring)
640 /* If on the last TRB of the segment go back to the beginning */
641 if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
642 ring->cycle_state ^= 1;
643 ring->dequeue = ring->deq_seg->trbs;
649 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
655 bool update_erdp = false;
657 /* DbC state machine: */
658 switch (dbc->state) {
664 portsc = readl(&dbc->regs->portsc);
665 if (portsc & DBC_PORTSC_CONN_STATUS) {
666 dbc->state = DS_CONNECTED;
667 dev_info(dbc->dev, "DbC connected\n");
672 ctrl = readl(&dbc->regs->control);
673 if (ctrl & DBC_CTRL_DBC_RUN) {
674 dbc->state = DS_CONFIGURED;
675 dev_info(dbc->dev, "DbC configured\n");
676 portsc = readl(&dbc->regs->portsc);
677 writel(portsc, &dbc->regs->portsc);
683 /* Handle cable unplug event: */
684 portsc = readl(&dbc->regs->portsc);
685 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
686 !(portsc & DBC_PORTSC_CONN_STATUS)) {
687 dev_info(dbc->dev, "DbC cable unplugged\n");
688 dbc->state = DS_ENABLED;
689 xhci_dbc_flush_requests(dbc);
694 /* Handle debug port reset event: */
695 if (portsc & DBC_PORTSC_RESET_CHANGE) {
696 dev_info(dbc->dev, "DbC port reset\n");
697 writel(portsc, &dbc->regs->portsc);
698 dbc->state = DS_ENABLED;
699 xhci_dbc_flush_requests(dbc);
704 /* Handle endpoint stall event: */
705 ctrl = readl(&dbc->regs->control);
706 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
707 (ctrl & DBC_CTRL_HALT_OUT_TR)) {
708 dev_info(dbc->dev, "DbC Endpoint stall\n");
709 dbc->state = DS_STALLED;
711 if (ctrl & DBC_CTRL_HALT_IN_TR) {
712 dep = get_in_ep(dbc);
713 xhci_dbc_flush_endpoint_requests(dep);
716 if (ctrl & DBC_CTRL_HALT_OUT_TR) {
717 dep = get_out_ep(dbc);
718 xhci_dbc_flush_endpoint_requests(dep);
724 /* Clear DbC run change bit: */
725 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
726 writel(ctrl, &dbc->regs->control);
727 ctrl = readl(&dbc->regs->control);
732 ctrl = readl(&dbc->regs->control);
733 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
734 !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
735 (ctrl & DBC_CTRL_DBC_RUN)) {
736 dbc->state = DS_CONFIGURED;
742 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
746 /* Handle the events in the event ring: */
747 evt = dbc->ring_evt->dequeue;
748 while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
749 dbc->ring_evt->cycle_state) {
751 * Add a barrier between reading the cycle flag and any
752 * reads of the event's flags/data below:
756 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
758 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
759 case TRB_TYPE(TRB_PORT_STATUS):
760 dbc_handle_port_status(dbc, evt);
762 case TRB_TYPE(TRB_TRANSFER):
763 dbc_handle_xfer_event(dbc, evt);
769 inc_evt_deq(dbc->ring_evt);
771 evt = dbc->ring_evt->dequeue;
775 /* Update event ring dequeue pointer: */
777 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
778 dbc->ring_evt->dequeue);
779 lo_hi_writeq(deq, &dbc->regs->erdp);
785 static void xhci_dbc_handle_events(struct work_struct *work)
789 struct xhci_dbc *dbc;
792 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
794 spin_lock_irqsave(&dbc->lock, flags);
795 evtr = xhci_dbc_do_handle_events(dbc);
796 spin_unlock_irqrestore(&dbc->lock, flags);
800 ret = xhci_dbc_tty_register_device(dbc);
802 dev_err(dbc->dev, "failed to alloc tty device\n");
806 dev_info(dbc->dev, "DbC now attached to /dev/ttyDBC0\n");
809 xhci_dbc_tty_unregister_device(dbc);
814 dev_info(dbc->dev, "stop handling dbc events\n");
818 mod_delayed_work(system_wq, &dbc->event_work, 1);
821 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
825 spin_lock_irqsave(&xhci->lock, flags);
828 spin_unlock_irqrestore(&xhci->lock, flags);
831 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
834 struct xhci_dbc *dbc;
839 base = &xhci->cap_regs->hc_capbase;
840 dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
844 dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
848 dbc->regs = base + dbc_cap_offs;
850 /* We will avoid using DbC in xhci driver if it's in use. */
851 reg = readl(&dbc->regs->control);
852 if (reg & DBC_CTRL_DBC_ENABLE) {
857 spin_lock_irqsave(&xhci->lock, flags);
859 spin_unlock_irqrestore(&xhci->lock, flags);
864 spin_unlock_irqrestore(&xhci->lock, flags);
867 dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
868 INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
869 spin_lock_init(&dbc->lock);
874 static ssize_t dbc_show(struct device *dev,
875 struct device_attribute *attr,
879 struct xhci_dbc *dbc;
880 struct xhci_hcd *xhci;
882 xhci = hcd_to_xhci(dev_get_drvdata(dev));
885 switch (dbc->state) {
908 return sprintf(buf, "%s\n", p);
911 static ssize_t dbc_store(struct device *dev,
912 struct device_attribute *attr,
913 const char *buf, size_t count)
915 struct xhci_hcd *xhci;
917 xhci = hcd_to_xhci(dev_get_drvdata(dev));
919 if (!strncmp(buf, "enable", 6))
920 xhci_dbc_start(xhci);
921 else if (!strncmp(buf, "disable", 7))
929 static DEVICE_ATTR_RW(dbc);
931 int xhci_dbc_init(struct xhci_hcd *xhci)
934 struct device *dev = xhci_to_hcd(xhci)->self.controller;
936 ret = xhci_do_dbc_init(xhci);
940 ret = xhci_dbc_tty_register_driver(xhci);
944 ret = device_create_file(dev, &dev_attr_dbc);
951 xhci_dbc_tty_unregister_driver();
953 xhci_do_dbc_exit(xhci);
958 void xhci_dbc_exit(struct xhci_hcd *xhci)
960 struct device *dev = xhci_to_hcd(xhci)->self.controller;
965 device_remove_file(dev, &dev_attr_dbc);
966 xhci_dbc_tty_unregister_driver();
968 xhci_do_dbc_exit(xhci);
972 int xhci_dbc_suspend(struct xhci_hcd *xhci)
974 struct xhci_dbc *dbc = xhci->dbc;
979 if (dbc->state == DS_CONFIGURED)
980 dbc->resume_required = 1;
987 int xhci_dbc_resume(struct xhci_hcd *xhci)
990 struct xhci_dbc *dbc = xhci->dbc;
995 if (dbc->resume_required) {
996 dbc->resume_required = 0;
997 xhci_dbc_start(xhci);
1002 #endif /* CONFIG_PM */