2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/usb.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include <linux/dmapool.h>
27 #include <linux/dma-mapping.h>
30 #include "xhci-trace.h"
33 * Allocates a generic ring segment from the ring pool, sets the dma address,
34 * initializes the segment to zero, and sets the private next pointer to NULL.
37 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
39 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
40 unsigned int cycle_state,
41 unsigned int max_packet,
44 struct xhci_segment *seg;
48 seg = kzalloc(sizeof *seg, flags);
52 seg->trbs = dma_pool_zalloc(xhci->segment_pool, flags, &dma);
59 seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
60 if (!seg->bounce_buf) {
61 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
66 /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
67 if (cycle_state == 0) {
68 for (i = 0; i < TRBS_PER_SEGMENT; i++)
69 seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
77 static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
80 dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
83 kfree(seg->bounce_buf);
87 static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
88 struct xhci_segment *first)
90 struct xhci_segment *seg;
93 while (seg != first) {
94 struct xhci_segment *next = seg->next;
95 xhci_segment_free(xhci, seg);
98 xhci_segment_free(xhci, first);
102 * Make the prev segment point to the next segment.
104 * Change the last TRB in the prev segment to be a Link TRB which points to the
105 * DMA address of the next segment. The caller needs to set any Link TRB
106 * related flags, such as End TRB, Toggle Cycle, and no snoop.
108 static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
109 struct xhci_segment *next, enum xhci_ring_type type)
116 if (type != TYPE_EVENT) {
117 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
118 cpu_to_le64(next->dma);
120 /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
121 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
122 val &= ~TRB_TYPE_BITMASK;
123 val |= TRB_TYPE(TRB_LINK);
124 /* Always set the chain bit with 0.95 hardware */
125 /* Set chain bit for isoc rings on AMD 0.96 host */
126 if (xhci_link_trb_quirk(xhci) ||
127 (type == TYPE_ISOC &&
128 (xhci->quirks & XHCI_AMD_0x96_HOST)))
130 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
135 * Link the ring to the new segments.
136 * Set Toggle Cycle for the new ring if needed.
138 static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
139 struct xhci_segment *first, struct xhci_segment *last,
140 unsigned int num_segs)
142 struct xhci_segment *next;
144 if (!ring || !first || !last)
147 next = ring->enq_seg->next;
148 xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
149 xhci_link_segments(xhci, last, next, ring->type);
150 ring->num_segs += num_segs;
151 ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
153 if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
154 ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
155 &= ~cpu_to_le32(LINK_TOGGLE);
156 last->trbs[TRBS_PER_SEGMENT-1].link.control
157 |= cpu_to_le32(LINK_TOGGLE);
158 ring->last_seg = last;
163 * We need a radix tree for mapping physical addresses of TRBs to which stream
164 * ID they belong to. We need to do this because the host controller won't tell
165 * us which stream ring the TRB came from. We could store the stream ID in an
166 * event data TRB, but that doesn't help us for the cancellation case, since the
167 * endpoint may stop before it reaches that event data TRB.
169 * The radix tree maps the upper portion of the TRB DMA address to a ring
170 * segment that has the same upper portion of DMA addresses. For example, say I
171 * have segments of size 1KB, that are always 1KB aligned. A segment may
172 * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
173 * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
174 * pass the radix tree a key to get the right stream ID:
176 * 0x10c90fff >> 10 = 0x43243
177 * 0x10c912c0 >> 10 = 0x43244
178 * 0x10c91400 >> 10 = 0x43245
180 * Obviously, only those TRBs with DMA addresses that are within the segment
181 * will make the radix tree return the stream ID for that ring.
183 * Caveats for the radix tree:
185 * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
186 * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
187 * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
188 * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
189 * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
190 * extended systems (where the DMA address can be bigger than 32-bits),
191 * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
193 static int xhci_insert_segment_mapping(struct radix_tree_root *trb_address_map,
194 struct xhci_ring *ring,
195 struct xhci_segment *seg,
201 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
202 /* Skip any segments that were already added. */
203 if (radix_tree_lookup(trb_address_map, key))
206 ret = radix_tree_maybe_preload(mem_flags);
209 ret = radix_tree_insert(trb_address_map,
211 radix_tree_preload_end();
215 static void xhci_remove_segment_mapping(struct radix_tree_root *trb_address_map,
216 struct xhci_segment *seg)
220 key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
221 if (radix_tree_lookup(trb_address_map, key))
222 radix_tree_delete(trb_address_map, key);
225 static int xhci_update_stream_segment_mapping(
226 struct radix_tree_root *trb_address_map,
227 struct xhci_ring *ring,
228 struct xhci_segment *first_seg,
229 struct xhci_segment *last_seg,
232 struct xhci_segment *seg;
233 struct xhci_segment *failed_seg;
236 if (WARN_ON_ONCE(trb_address_map == NULL))
241 ret = xhci_insert_segment_mapping(trb_address_map,
242 ring, seg, mem_flags);
248 } while (seg != first_seg);
256 xhci_remove_segment_mapping(trb_address_map, seg);
257 if (seg == failed_seg)
260 } while (seg != first_seg);
265 static void xhci_remove_stream_mapping(struct xhci_ring *ring)
267 struct xhci_segment *seg;
269 if (WARN_ON_ONCE(ring->trb_address_map == NULL))
272 seg = ring->first_seg;
274 xhci_remove_segment_mapping(ring->trb_address_map, seg);
276 } while (seg != ring->first_seg);
279 static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
281 return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
282 ring->first_seg, ring->last_seg, mem_flags);
285 /* XXX: Do we need the hcd structure in all these functions? */
286 void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
291 trace_xhci_ring_free(ring);
293 if (ring->first_seg) {
294 if (ring->type == TYPE_STREAM)
295 xhci_remove_stream_mapping(ring);
296 xhci_free_segments_for_ring(xhci, ring->first_seg);
302 static void xhci_initialize_ring_info(struct xhci_ring *ring,
303 unsigned int cycle_state)
305 /* The ring is empty, so the enqueue pointer == dequeue pointer */
306 ring->enqueue = ring->first_seg->trbs;
307 ring->enq_seg = ring->first_seg;
308 ring->dequeue = ring->enqueue;
309 ring->deq_seg = ring->first_seg;
310 /* The ring is initialized to 0. The producer must write 1 to the cycle
311 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
312 * compare CCS to the cycle bit to check ownership, so CCS = 1.
314 * New rings are initialized with cycle state equal to 1; if we are
315 * handling ring expansion, set the cycle state equal to the old ring.
317 ring->cycle_state = cycle_state;
318 /* Not necessary for new rings, but needed for re-initialized rings */
319 ring->enq_updates = 0;
320 ring->deq_updates = 0;
323 * Each segment has a link TRB, and leave an extra TRB for SW
326 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
329 /* Allocate segments and link them for a ring */
330 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
331 struct xhci_segment **first, struct xhci_segment **last,
332 unsigned int num_segs, unsigned int cycle_state,
333 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
335 struct xhci_segment *prev;
337 prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
343 while (num_segs > 0) {
344 struct xhci_segment *next;
346 next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
351 xhci_segment_free(xhci, prev);
356 xhci_link_segments(xhci, prev, next, type);
361 xhci_link_segments(xhci, prev, *first, type);
368 * Create a new ring with zero or more segments.
370 * Link each segment together into a ring.
371 * Set the end flag and the cycle toggle bit on the last segment.
372 * See section 4.9.1 and figures 15 and 16.
374 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
375 unsigned int num_segs, unsigned int cycle_state,
376 enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
378 struct xhci_ring *ring;
381 ring = kzalloc(sizeof *(ring), flags);
385 ring->num_segs = num_segs;
386 ring->bounce_buf_len = max_packet;
387 INIT_LIST_HEAD(&ring->td_list);
392 ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
393 &ring->last_seg, num_segs, cycle_state, type,
398 /* Only event ring does not use link TRB */
399 if (type != TYPE_EVENT) {
400 /* See section 4.9.2.1 and 6.4.4.1 */
401 ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
402 cpu_to_le32(LINK_TOGGLE);
404 xhci_initialize_ring_info(ring, cycle_state);
405 trace_xhci_ring_alloc(ring);
413 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
414 struct xhci_virt_device *virt_dev,
415 unsigned int ep_index)
419 rings_cached = virt_dev->num_rings_cached;
420 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
421 virt_dev->ring_cache[rings_cached] =
422 virt_dev->eps[ep_index].ring;
423 virt_dev->num_rings_cached++;
424 xhci_dbg(xhci, "Cached old ring, "
425 "%d ring%s cached\n",
426 virt_dev->num_rings_cached,
427 (virt_dev->num_rings_cached > 1) ? "s" : "");
429 xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
430 xhci_dbg(xhci, "Ring cache full (%d rings), "
432 virt_dev->num_rings_cached);
434 virt_dev->eps[ep_index].ring = NULL;
437 /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
438 * pointers to the beginning of the ring.
440 static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
441 struct xhci_ring *ring, unsigned int cycle_state,
442 enum xhci_ring_type type)
444 struct xhci_segment *seg = ring->first_seg;
449 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
450 if (cycle_state == 0) {
451 for (i = 0; i < TRBS_PER_SEGMENT; i++)
452 seg->trbs[i].link.control |=
453 cpu_to_le32(TRB_CYCLE);
455 /* All endpoint rings have link TRBs */
456 xhci_link_segments(xhci, seg, seg->next, type);
458 } while (seg != ring->first_seg);
460 xhci_initialize_ring_info(ring, cycle_state);
461 /* td list should be empty since all URBs have been cancelled,
462 * but just in case...
464 INIT_LIST_HEAD(&ring->td_list);
468 * Expand an existing ring.
469 * Look for a cached ring or allocate a new ring which has same segment numbers
470 * and link the two rings.
472 int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
473 unsigned int num_trbs, gfp_t flags)
475 struct xhci_segment *first;
476 struct xhci_segment *last;
477 unsigned int num_segs;
478 unsigned int num_segs_needed;
481 num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
482 (TRBS_PER_SEGMENT - 1);
484 /* Allocate number of segments we needed, or double the ring size */
485 num_segs = ring->num_segs > num_segs_needed ?
486 ring->num_segs : num_segs_needed;
488 ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
489 num_segs, ring->cycle_state, ring->type,
490 ring->bounce_buf_len, flags);
494 if (ring->type == TYPE_STREAM)
495 ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
496 ring, first, last, flags);
498 struct xhci_segment *next;
501 xhci_segment_free(xhci, first);
509 xhci_link_rings(xhci, ring, first, last, num_segs);
510 trace_xhci_ring_expansion(ring);
511 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
512 "ring expansion succeed, now has %d segments",
518 #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
520 static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
521 int type, gfp_t flags)
523 struct xhci_container_ctx *ctx;
525 if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
528 ctx = kzalloc(sizeof(*ctx), flags);
533 ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
534 if (type == XHCI_CTX_TYPE_INPUT)
535 ctx->size += CTX_SIZE(xhci->hcc_params);
537 ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
545 static void xhci_free_container_ctx(struct xhci_hcd *xhci,
546 struct xhci_container_ctx *ctx)
550 dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
554 struct xhci_input_control_ctx *xhci_get_input_control_ctx(
555 struct xhci_container_ctx *ctx)
557 if (ctx->type != XHCI_CTX_TYPE_INPUT)
560 return (struct xhci_input_control_ctx *)ctx->bytes;
563 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
564 struct xhci_container_ctx *ctx)
566 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
567 return (struct xhci_slot_ctx *)ctx->bytes;
569 return (struct xhci_slot_ctx *)
570 (ctx->bytes + CTX_SIZE(xhci->hcc_params));
573 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
574 struct xhci_container_ctx *ctx,
575 unsigned int ep_index)
577 /* increment ep index by offset of start of ep ctx array */
579 if (ctx->type == XHCI_CTX_TYPE_INPUT)
582 return (struct xhci_ep_ctx *)
583 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
587 /***************** Streams structures manipulation *************************/
589 static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
590 unsigned int num_stream_ctxs,
591 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
593 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
594 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
596 if (size > MEDIUM_STREAM_ARRAY_SIZE)
597 dma_free_coherent(dev, size,
599 else if (size <= SMALL_STREAM_ARRAY_SIZE)
600 return dma_pool_free(xhci->small_streams_pool,
603 return dma_pool_free(xhci->medium_streams_pool,
608 * The stream context array for each endpoint with bulk streams enabled can
609 * vary in size, based on:
610 * - how many streams the endpoint supports,
611 * - the maximum primary stream array size the host controller supports,
612 * - and how many streams the device driver asks for.
614 * The stream context array must be a power of 2, and can be as small as
615 * 64 bytes or as large as 1MB.
617 static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
618 unsigned int num_stream_ctxs, dma_addr_t *dma,
621 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
622 size_t size = sizeof(struct xhci_stream_ctx) * num_stream_ctxs;
624 if (size > MEDIUM_STREAM_ARRAY_SIZE)
625 return dma_alloc_coherent(dev, size,
627 else if (size <= SMALL_STREAM_ARRAY_SIZE)
628 return dma_pool_alloc(xhci->small_streams_pool,
631 return dma_pool_alloc(xhci->medium_streams_pool,
635 struct xhci_ring *xhci_dma_to_transfer_ring(
636 struct xhci_virt_ep *ep,
639 if (ep->ep_state & EP_HAS_STREAMS)
640 return radix_tree_lookup(&ep->stream_info->trb_address_map,
641 address >> TRB_SEGMENT_SHIFT);
645 struct xhci_ring *xhci_stream_id_to_ring(
646 struct xhci_virt_device *dev,
647 unsigned int ep_index,
648 unsigned int stream_id)
650 struct xhci_virt_ep *ep = &dev->eps[ep_index];
654 if (!ep->stream_info)
657 if (stream_id > ep->stream_info->num_streams)
659 return ep->stream_info->stream_rings[stream_id];
663 * Change an endpoint's internal structure so it supports stream IDs. The
664 * number of requested streams includes stream 0, which cannot be used by device
667 * The number of stream contexts in the stream context array may be bigger than
668 * the number of streams the driver wants to use. This is because the number of
669 * stream context array entries must be a power of two.
671 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
672 unsigned int num_stream_ctxs,
673 unsigned int num_streams,
674 unsigned int max_packet, gfp_t mem_flags)
676 struct xhci_stream_info *stream_info;
678 struct xhci_ring *cur_ring;
682 xhci_dbg(xhci, "Allocating %u streams and %u "
683 "stream context array entries.\n",
684 num_streams, num_stream_ctxs);
685 if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
686 xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
689 xhci->cmd_ring_reserved_trbs++;
691 stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
695 stream_info->num_streams = num_streams;
696 stream_info->num_stream_ctxs = num_stream_ctxs;
698 /* Initialize the array of virtual pointers to stream rings. */
699 stream_info->stream_rings = kzalloc(
700 sizeof(struct xhci_ring *)*num_streams,
702 if (!stream_info->stream_rings)
705 /* Initialize the array of DMA addresses for stream rings for the HW. */
706 stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
707 num_stream_ctxs, &stream_info->ctx_array_dma,
709 if (!stream_info->stream_ctx_array)
711 memset(stream_info->stream_ctx_array, 0,
712 sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
714 /* Allocate everything needed to free the stream rings later */
715 stream_info->free_streams_command =
716 xhci_alloc_command(xhci, true, true, mem_flags);
717 if (!stream_info->free_streams_command)
720 INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
722 /* Allocate rings for all the streams that the driver will use,
723 * and add their segment DMA addresses to the radix tree.
724 * Stream 0 is reserved.
727 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
728 stream_info->stream_rings[cur_stream] =
729 xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
731 cur_ring = stream_info->stream_rings[cur_stream];
734 cur_ring->stream_id = cur_stream;
735 cur_ring->trb_address_map = &stream_info->trb_address_map;
736 /* Set deq ptr, cycle bit, and stream context type */
737 addr = cur_ring->first_seg->dma |
738 SCT_FOR_CTX(SCT_PRI_TR) |
739 cur_ring->cycle_state;
740 stream_info->stream_ctx_array[cur_stream].stream_ring =
742 xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
743 cur_stream, (unsigned long long) addr);
745 ret = xhci_update_stream_mapping(cur_ring, mem_flags);
747 xhci_ring_free(xhci, cur_ring);
748 stream_info->stream_rings[cur_stream] = NULL;
752 /* Leave the other unused stream ring pointers in the stream context
753 * array initialized to zero. This will cause the xHC to give us an
754 * error if the device asks for a stream ID we don't have setup (if it
755 * was any other way, the host controller would assume the ring is
756 * "empty" and wait forever for data to be queued to that stream ID).
762 for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
763 cur_ring = stream_info->stream_rings[cur_stream];
765 xhci_ring_free(xhci, cur_ring);
766 stream_info->stream_rings[cur_stream] = NULL;
769 xhci_free_command(xhci, stream_info->free_streams_command);
771 kfree(stream_info->stream_rings);
775 xhci->cmd_ring_reserved_trbs--;
779 * Sets the MaxPStreams field and the Linear Stream Array field.
780 * Sets the dequeue pointer to the stream context array.
782 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
783 struct xhci_ep_ctx *ep_ctx,
784 struct xhci_stream_info *stream_info)
786 u32 max_primary_streams;
787 /* MaxPStreams is the number of stream context array entries, not the
788 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
789 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
791 max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
792 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
793 "Setting number of stream ctx array entries to %u",
794 1 << (max_primary_streams + 1));
795 ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
796 ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
798 ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
802 * Sets the MaxPStreams field and the Linear Stream Array field to 0.
803 * Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
804 * not at the beginning of the ring).
806 void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
807 struct xhci_virt_ep *ep)
810 ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
811 addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
812 ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
815 /* Frees all stream contexts associated with the endpoint,
817 * Caller should fix the endpoint context streams fields.
819 void xhci_free_stream_info(struct xhci_hcd *xhci,
820 struct xhci_stream_info *stream_info)
823 struct xhci_ring *cur_ring;
828 for (cur_stream = 1; cur_stream < stream_info->num_streams;
830 cur_ring = stream_info->stream_rings[cur_stream];
832 xhci_ring_free(xhci, cur_ring);
833 stream_info->stream_rings[cur_stream] = NULL;
836 xhci_free_command(xhci, stream_info->free_streams_command);
837 xhci->cmd_ring_reserved_trbs--;
838 if (stream_info->stream_ctx_array)
839 xhci_free_stream_ctx(xhci,
840 stream_info->num_stream_ctxs,
841 stream_info->stream_ctx_array,
842 stream_info->ctx_array_dma);
844 kfree(stream_info->stream_rings);
849 /***************** Device context manipulation *************************/
851 static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
852 struct xhci_virt_ep *ep)
854 setup_timer(&ep->stop_cmd_timer, xhci_stop_endpoint_command_watchdog,
859 static void xhci_free_tt_info(struct xhci_hcd *xhci,
860 struct xhci_virt_device *virt_dev,
863 struct list_head *tt_list_head;
864 struct xhci_tt_bw_info *tt_info, *next;
865 bool slot_found = false;
867 /* If the device never made it past the Set Address stage,
868 * it may not have the real_port set correctly.
870 if (virt_dev->real_port == 0 ||
871 virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
872 xhci_dbg(xhci, "Bad real port.\n");
876 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
877 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
878 /* Multi-TT hubs will have more than one entry */
879 if (tt_info->slot_id == slot_id) {
881 list_del(&tt_info->tt_list);
883 } else if (slot_found) {
889 int xhci_alloc_tt_info(struct xhci_hcd *xhci,
890 struct xhci_virt_device *virt_dev,
891 struct usb_device *hdev,
892 struct usb_tt *tt, gfp_t mem_flags)
894 struct xhci_tt_bw_info *tt_info;
895 unsigned int num_ports;
901 num_ports = hdev->maxchild;
903 for (i = 0; i < num_ports; i++, tt_info++) {
904 struct xhci_interval_bw_table *bw_table;
906 tt_info = kzalloc(sizeof(*tt_info), mem_flags);
909 INIT_LIST_HEAD(&tt_info->tt_list);
910 list_add(&tt_info->tt_list,
911 &xhci->rh_bw[virt_dev->real_port - 1].tts);
912 tt_info->slot_id = virt_dev->udev->slot_id;
914 tt_info->ttport = i+1;
915 bw_table = &tt_info->bw_table;
916 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
917 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
922 xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
927 /* All the xhci_tds in the ring's TD list should be freed at this point.
928 * Should be called with xhci->lock held if there is any chance the TT lists
929 * will be manipulated by the configure endpoint, allocate device, or update
930 * hub functions while this function is removing the TT entries from the list.
932 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
934 struct xhci_virt_device *dev;
936 int old_active_eps = 0;
938 /* Slot ID 0 is reserved */
939 if (slot_id == 0 || !xhci->devs[slot_id])
942 dev = xhci->devs[slot_id];
944 trace_xhci_free_virt_device(dev);
946 xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
951 old_active_eps = dev->tt_info->active_eps;
953 for (i = 0; i < 31; i++) {
954 if (dev->eps[i].ring)
955 xhci_ring_free(xhci, dev->eps[i].ring);
956 if (dev->eps[i].stream_info)
957 xhci_free_stream_info(xhci,
958 dev->eps[i].stream_info);
959 /* Endpoints on the TT/root port lists should have been removed
960 * when usb_disable_device() was called for the device.
961 * We can't drop them anyway, because the udev might have gone
962 * away by this point, and we can't tell what speed it was.
964 if (!list_empty(&dev->eps[i].bw_endpoint_list))
965 xhci_warn(xhci, "Slot %u endpoint %u "
966 "not removed from BW list!\n",
969 /* If this is a hub, free the TT(s) from the TT list */
970 xhci_free_tt_info(xhci, dev, slot_id);
971 /* If necessary, update the number of active TTs on this root port */
972 xhci_update_tt_active_eps(xhci, dev, old_active_eps);
974 if (dev->ring_cache) {
975 for (i = 0; i < dev->num_rings_cached; i++)
976 xhci_ring_free(xhci, dev->ring_cache[i]);
977 kfree(dev->ring_cache);
981 xhci_free_container_ctx(xhci, dev->in_ctx);
983 xhci_free_container_ctx(xhci, dev->out_ctx);
985 kfree(xhci->devs[slot_id]);
986 xhci->devs[slot_id] = NULL;
990 * Free a virt_device structure.
991 * If the virt_device added a tt_info (a hub) and has children pointing to
992 * that tt_info, then free the child first. Recursive.
993 * We can't rely on udev at this point to find child-parent relationships.
995 void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
997 struct xhci_virt_device *vdev;
998 struct list_head *tt_list_head;
999 struct xhci_tt_bw_info *tt_info, *next;
1002 vdev = xhci->devs[slot_id];
1006 tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
1007 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
1008 /* is this a hub device that added a tt_info to the tts list */
1009 if (tt_info->slot_id == slot_id) {
1010 /* are any devices using this tt_info? */
1011 for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1012 vdev = xhci->devs[i];
1013 if (vdev && (vdev->tt_info == tt_info))
1014 xhci_free_virt_devices_depth_first(
1019 /* we are now at a leaf device */
1020 xhci_free_virt_device(xhci, slot_id);
1023 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
1024 struct usb_device *udev, gfp_t flags)
1026 struct xhci_virt_device *dev;
1029 /* Slot ID 0 is reserved */
1030 if (slot_id == 0 || xhci->devs[slot_id]) {
1031 xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
1035 xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
1036 if (!xhci->devs[slot_id])
1038 dev = xhci->devs[slot_id];
1040 /* Allocate the (output) device context that will be used in the HC. */
1041 dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
1045 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
1046 (unsigned long long)dev->out_ctx->dma);
1048 /* Allocate the (input) device context for address device command */
1049 dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
1053 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
1054 (unsigned long long)dev->in_ctx->dma);
1056 /* Initialize the cancellation list and watchdog timers for each ep */
1057 for (i = 0; i < 31; i++) {
1058 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
1059 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
1060 INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
1063 /* Allocate endpoint 0 ring */
1064 dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
1065 if (!dev->eps[0].ring)
1068 /* Allocate pointers to the ring cache */
1069 dev->ring_cache = kzalloc(
1070 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
1072 if (!dev->ring_cache)
1074 dev->num_rings_cached = 0;
1078 /* Point to output device context in dcbaa. */
1079 xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
1080 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
1082 &xhci->dcbaa->dev_context_ptrs[slot_id],
1083 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
1085 trace_xhci_alloc_virt_device(dev);
1089 xhci_free_virt_device(xhci, slot_id);
1093 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
1094 struct usb_device *udev)
1096 struct xhci_virt_device *virt_dev;
1097 struct xhci_ep_ctx *ep0_ctx;
1098 struct xhci_ring *ep_ring;
1100 virt_dev = xhci->devs[udev->slot_id];
1101 ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
1102 ep_ring = virt_dev->eps[0].ring;
1104 * FIXME we don't keep track of the dequeue pointer very well after a
1105 * Set TR dequeue pointer, so we're setting the dequeue pointer of the
1106 * host to our enqueue pointer. This should only be called after a
1107 * configured device has reset, so all control transfers should have
1108 * been completed or cancelled before the reset.
1110 ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
1112 | ep_ring->cycle_state);
1116 * The xHCI roothub may have ports of differing speeds in any order in the port
1117 * status registers. xhci->port_array provides an array of the port speed for
1118 * each offset into the port status registers.
1120 * The xHCI hardware wants to know the roothub port number that the USB device
1121 * is attached to (or the roothub port its ancestor hub is attached to). All we
1122 * know is the index of that port under either the USB 2.0 or the USB 3.0
1123 * roothub, but that doesn't give us the real index into the HW port status
1124 * registers. Call xhci_find_raw_port_number() to get real index.
1126 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
1127 struct usb_device *udev)
1129 struct usb_device *top_dev;
1130 struct usb_hcd *hcd;
1132 if (udev->speed >= USB_SPEED_SUPER)
1133 hcd = xhci->shared_hcd;
1135 hcd = xhci->main_hcd;
1137 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1138 top_dev = top_dev->parent)
1139 /* Found device below root hub */;
1141 return xhci_find_raw_port_number(hcd, top_dev->portnum);
1144 /* Setup an xHCI virtual device for a Set Address command */
1145 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
1147 struct xhci_virt_device *dev;
1148 struct xhci_ep_ctx *ep0_ctx;
1149 struct xhci_slot_ctx *slot_ctx;
1152 struct usb_device *top_dev;
1154 dev = xhci->devs[udev->slot_id];
1155 /* Slot ID 0 is reserved */
1156 if (udev->slot_id == 0 || !dev) {
1157 xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
1161 ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
1162 slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
1164 /* 3) Only the control endpoint is valid - one endpoint context */
1165 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
1166 switch (udev->speed) {
1167 case USB_SPEED_SUPER_PLUS:
1168 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
1169 max_packets = MAX_PACKET(512);
1171 case USB_SPEED_SUPER:
1172 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
1173 max_packets = MAX_PACKET(512);
1175 case USB_SPEED_HIGH:
1176 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
1177 max_packets = MAX_PACKET(64);
1179 /* USB core guesses at a 64-byte max packet first for FS devices */
1180 case USB_SPEED_FULL:
1181 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
1182 max_packets = MAX_PACKET(64);
1185 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
1186 max_packets = MAX_PACKET(8);
1188 case USB_SPEED_WIRELESS:
1189 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
1193 /* Speed was set earlier, this shouldn't happen. */
1196 /* Find the root hub port this device is under */
1197 port_num = xhci_find_real_port_number(xhci, udev);
1200 slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
1201 /* Set the port number in the virtual_device to the faked port number */
1202 for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
1203 top_dev = top_dev->parent)
1204 /* Found device below root hub */;
1205 dev->fake_port = top_dev->portnum;
1206 dev->real_port = port_num;
1207 xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
1208 xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
1210 /* Find the right bandwidth table that this device will be a part of.
1211 * If this is a full speed device attached directly to a root port (or a
1212 * decendent of one), it counts as a primary bandwidth domain, not a
1213 * secondary bandwidth domain under a TT. An xhci_tt_info structure
1214 * will never be created for the HS root hub.
1216 if (!udev->tt || !udev->tt->hub->parent) {
1217 dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
1219 struct xhci_root_port_bw_info *rh_bw;
1220 struct xhci_tt_bw_info *tt_bw;
1222 rh_bw = &xhci->rh_bw[port_num - 1];
1223 /* Find the right TT. */
1224 list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
1225 if (tt_bw->slot_id != udev->tt->hub->slot_id)
1228 if (!dev->udev->tt->multi ||
1230 tt_bw->ttport == dev->udev->ttport)) {
1231 dev->bw_table = &tt_bw->bw_table;
1232 dev->tt_info = tt_bw;
1237 xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
1240 /* Is this a LS/FS device under an external HS hub? */
1241 if (udev->tt && udev->tt->hub->parent) {
1242 slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
1243 (udev->ttport << 8));
1244 if (udev->tt->multi)
1245 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
1247 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
1248 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
1250 /* Step 4 - ring already allocated */
1252 ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
1254 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
1255 ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
1258 ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
1259 dev->eps[0].ring->cycle_state);
1261 trace_xhci_setup_addressable_virt_device(dev);
1263 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
1269 * Convert interval expressed as 2^(bInterval - 1) == interval into
1270 * straight exponent value 2^n == interval.
1273 static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
1274 struct usb_host_endpoint *ep)
1276 unsigned int interval;
1278 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
1279 if (interval != ep->desc.bInterval - 1)
1280 dev_warn(&udev->dev,
1281 "ep %#x - rounding interval to %d %sframes\n",
1282 ep->desc.bEndpointAddress,
1284 udev->speed == USB_SPEED_FULL ? "" : "micro");
1286 if (udev->speed == USB_SPEED_FULL) {
1288 * Full speed isoc endpoints specify interval in frames,
1289 * not microframes. We are using microframes everywhere,
1290 * so adjust accordingly.
1292 interval += 3; /* 1 frame = 2^3 uframes */
1299 * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
1300 * microframes, rounded down to nearest power of 2.
1302 static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
1303 struct usb_host_endpoint *ep, unsigned int desc_interval,
1304 unsigned int min_exponent, unsigned int max_exponent)
1306 unsigned int interval;
1308 interval = fls(desc_interval) - 1;
1309 interval = clamp_val(interval, min_exponent, max_exponent);
1310 if ((1 << interval) != desc_interval)
1312 "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
1313 ep->desc.bEndpointAddress,
1320 static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
1321 struct usb_host_endpoint *ep)
1323 if (ep->desc.bInterval == 0)
1325 return xhci_microframes_to_exponent(udev, ep,
1326 ep->desc.bInterval, 0, 15);
1330 static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
1331 struct usb_host_endpoint *ep)
1333 return xhci_microframes_to_exponent(udev, ep,
1334 ep->desc.bInterval * 8, 3, 10);
1337 /* Return the polling or NAK interval.
1339 * The polling interval is expressed in "microframes". If xHCI's Interval field
1340 * is set to N, it will service the endpoint every 2^(Interval)*125us.
1342 * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
1345 static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
1346 struct usb_host_endpoint *ep)
1348 unsigned int interval = 0;
1350 switch (udev->speed) {
1351 case USB_SPEED_HIGH:
1353 if (usb_endpoint_xfer_control(&ep->desc) ||
1354 usb_endpoint_xfer_bulk(&ep->desc)) {
1355 interval = xhci_parse_microframe_interval(udev, ep);
1358 /* Fall through - SS and HS isoc/int have same decoding */
1360 case USB_SPEED_SUPER_PLUS:
1361 case USB_SPEED_SUPER:
1362 if (usb_endpoint_xfer_int(&ep->desc) ||
1363 usb_endpoint_xfer_isoc(&ep->desc)) {
1364 interval = xhci_parse_exponent_interval(udev, ep);
1368 case USB_SPEED_FULL:
1369 if (usb_endpoint_xfer_isoc(&ep->desc)) {
1370 interval = xhci_parse_exponent_interval(udev, ep);
1374 * Fall through for interrupt endpoint interval decoding
1375 * since it uses the same rules as low speed interrupt
1380 if (usb_endpoint_xfer_int(&ep->desc) ||
1381 usb_endpoint_xfer_isoc(&ep->desc)) {
1383 interval = xhci_parse_frame_interval(udev, ep);
1393 /* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
1394 * High speed endpoint descriptors can define "the number of additional
1395 * transaction opportunities per microframe", but that goes in the Max Burst
1396 * endpoint context field.
1398 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
1399 struct usb_host_endpoint *ep)
1401 if (udev->speed < USB_SPEED_SUPER ||
1402 !usb_endpoint_xfer_isoc(&ep->desc))
1404 return ep->ss_ep_comp.bmAttributes;
1407 static u32 xhci_get_endpoint_max_burst(struct usb_device *udev,
1408 struct usb_host_endpoint *ep)
1410 /* Super speed and Plus have max burst in ep companion desc */
1411 if (udev->speed >= USB_SPEED_SUPER)
1412 return ep->ss_ep_comp.bMaxBurst;
1414 if (udev->speed == USB_SPEED_HIGH &&
1415 (usb_endpoint_xfer_isoc(&ep->desc) ||
1416 usb_endpoint_xfer_int(&ep->desc)))
1417 return usb_endpoint_maxp_mult(&ep->desc) - 1;
1422 static u32 xhci_get_endpoint_type(struct usb_host_endpoint *ep)
1426 in = usb_endpoint_dir_in(&ep->desc);
1428 switch (usb_endpoint_type(&ep->desc)) {
1429 case USB_ENDPOINT_XFER_CONTROL:
1431 case USB_ENDPOINT_XFER_BULK:
1432 return in ? BULK_IN_EP : BULK_OUT_EP;
1433 case USB_ENDPOINT_XFER_ISOC:
1434 return in ? ISOC_IN_EP : ISOC_OUT_EP;
1435 case USB_ENDPOINT_XFER_INT:
1436 return in ? INT_IN_EP : INT_OUT_EP;
1441 /* Return the maximum endpoint service interval time (ESIT) payload.
1442 * Basically, this is the maxpacket size, multiplied by the burst size
1445 static u32 xhci_get_max_esit_payload(struct usb_device *udev,
1446 struct usb_host_endpoint *ep)
1451 /* Only applies for interrupt or isochronous endpoints */
1452 if (usb_endpoint_xfer_control(&ep->desc) ||
1453 usb_endpoint_xfer_bulk(&ep->desc))
1456 /* SuperSpeedPlus Isoc ep sending over 48k per esit */
1457 if ((udev->speed >= USB_SPEED_SUPER_PLUS) &&
1458 USB_SS_SSP_ISOC_COMP(ep->ss_ep_comp.bmAttributes))
1459 return le32_to_cpu(ep->ssp_isoc_ep_comp.dwBytesPerInterval);
1460 /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
1461 else if (udev->speed >= USB_SPEED_SUPER)
1462 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1464 max_packet = usb_endpoint_maxp(&ep->desc);
1465 max_burst = usb_endpoint_maxp_mult(&ep->desc);
1466 /* A 0 in max burst means 1 transfer per ESIT */
1467 return max_packet * max_burst;
1470 /* Set up an endpoint with one ring segment. Do not allocate stream rings.
1471 * Drivers will have to call usb_alloc_streams() to do that.
1473 int xhci_endpoint_init(struct xhci_hcd *xhci,
1474 struct xhci_virt_device *virt_dev,
1475 struct usb_device *udev,
1476 struct usb_host_endpoint *ep,
1479 unsigned int ep_index;
1480 struct xhci_ep_ctx *ep_ctx;
1481 struct xhci_ring *ep_ring;
1482 unsigned int max_packet;
1483 enum xhci_ring_type ring_type;
1484 u32 max_esit_payload;
1486 unsigned int max_burst;
1487 unsigned int interval;
1489 unsigned int avg_trb_len;
1490 unsigned int err_count = 0;
1492 ep_index = xhci_get_endpoint_index(&ep->desc);
1493 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1495 endpoint_type = xhci_get_endpoint_type(ep);
1499 ring_type = usb_endpoint_type(&ep->desc);
1502 * Get values to fill the endpoint context, mostly from ep descriptor.
1503 * The average TRB buffer lengt for bulk endpoints is unclear as we
1504 * have no clue on scatter gather list entry size. For Isoc and Int,
1505 * set it to max available. See xHCI 1.1 spec 4.14.1.1 for details.
1507 max_esit_payload = xhci_get_max_esit_payload(udev, ep);
1508 interval = xhci_get_endpoint_interval(udev, ep);
1509 mult = xhci_get_endpoint_mult(udev, ep);
1510 max_packet = usb_endpoint_maxp(&ep->desc);
1511 max_burst = xhci_get_endpoint_max_burst(udev, ep);
1512 avg_trb_len = max_esit_payload;
1514 /* FIXME dig Mult and streams info out of ep companion desc */
1516 /* Allow 3 retries for everything but isoc, set CErr = 3 */
1517 if (!usb_endpoint_xfer_isoc(&ep->desc))
1519 /* Some devices get this wrong */
1520 if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
1522 /* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
1523 if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
1525 /* xhci 1.1 with LEC support doesn't use mult field, use RsvdZ */
1526 if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
1529 /* Set up the endpoint ring */
1530 virt_dev->eps[ep_index].new_ring =
1531 xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
1532 if (!virt_dev->eps[ep_index].new_ring) {
1533 /* Attempt to use the ring cache */
1534 if (virt_dev->num_rings_cached == 0)
1536 virt_dev->num_rings_cached--;
1537 virt_dev->eps[ep_index].new_ring =
1538 virt_dev->ring_cache[virt_dev->num_rings_cached];
1539 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1540 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1543 virt_dev->eps[ep_index].skip = false;
1544 ep_ring = virt_dev->eps[ep_index].new_ring;
1546 /* Fill the endpoint context */
1547 ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
1548 EP_INTERVAL(interval) |
1550 ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
1551 MAX_PACKET(max_packet) |
1552 MAX_BURST(max_burst) |
1553 ERROR_COUNT(err_count));
1554 ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
1555 ep_ring->cycle_state);
1557 ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1558 EP_AVG_TRB_LENGTH(avg_trb_len));
1560 /* FIXME Debug endpoint context */
1564 void xhci_endpoint_zero(struct xhci_hcd *xhci,
1565 struct xhci_virt_device *virt_dev,
1566 struct usb_host_endpoint *ep)
1568 unsigned int ep_index;
1569 struct xhci_ep_ctx *ep_ctx;
1571 ep_index = xhci_get_endpoint_index(&ep->desc);
1572 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
1574 ep_ctx->ep_info = 0;
1575 ep_ctx->ep_info2 = 0;
1577 ep_ctx->tx_info = 0;
1578 /* Don't free the endpoint ring until the set interface or configuration
1583 void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
1585 bw_info->ep_interval = 0;
1587 bw_info->num_packets = 0;
1588 bw_info->max_packet_size = 0;
1590 bw_info->max_esit_payload = 0;
1593 void xhci_update_bw_info(struct xhci_hcd *xhci,
1594 struct xhci_container_ctx *in_ctx,
1595 struct xhci_input_control_ctx *ctrl_ctx,
1596 struct xhci_virt_device *virt_dev)
1598 struct xhci_bw_info *bw_info;
1599 struct xhci_ep_ctx *ep_ctx;
1600 unsigned int ep_type;
1603 for (i = 1; i < 31; i++) {
1604 bw_info = &virt_dev->eps[i].bw_info;
1606 /* We can't tell what endpoint type is being dropped, but
1607 * unconditionally clearing the bandwidth info for non-periodic
1608 * endpoints should be harmless because the info will never be
1609 * set in the first place.
1611 if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
1612 /* Dropped endpoint */
1613 xhci_clear_endpoint_bw_info(bw_info);
1617 if (EP_IS_ADDED(ctrl_ctx, i)) {
1618 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
1619 ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
1621 /* Ignore non-periodic endpoints */
1622 if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
1623 ep_type != ISOC_IN_EP &&
1624 ep_type != INT_IN_EP)
1627 /* Added or changed endpoint */
1628 bw_info->ep_interval = CTX_TO_EP_INTERVAL(
1629 le32_to_cpu(ep_ctx->ep_info));
1630 /* Number of packets and mult are zero-based in the
1631 * input context, but we want one-based for the
1634 bw_info->mult = CTX_TO_EP_MULT(
1635 le32_to_cpu(ep_ctx->ep_info)) + 1;
1636 bw_info->num_packets = CTX_TO_MAX_BURST(
1637 le32_to_cpu(ep_ctx->ep_info2)) + 1;
1638 bw_info->max_packet_size = MAX_PACKET_DECODED(
1639 le32_to_cpu(ep_ctx->ep_info2));
1640 bw_info->type = ep_type;
1641 bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
1642 le32_to_cpu(ep_ctx->tx_info));
1647 /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
1648 * Useful when you want to change one particular aspect of the endpoint and then
1649 * issue a configure endpoint command.
1651 void xhci_endpoint_copy(struct xhci_hcd *xhci,
1652 struct xhci_container_ctx *in_ctx,
1653 struct xhci_container_ctx *out_ctx,
1654 unsigned int ep_index)
1656 struct xhci_ep_ctx *out_ep_ctx;
1657 struct xhci_ep_ctx *in_ep_ctx;
1659 out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1660 in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1662 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
1663 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
1664 in_ep_ctx->deq = out_ep_ctx->deq;
1665 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
1668 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
1669 * Useful when you want to change one particular aspect of the endpoint and then
1670 * issue a configure endpoint command. Only the context entries field matters,
1671 * but we'll copy the whole thing anyway.
1673 void xhci_slot_copy(struct xhci_hcd *xhci,
1674 struct xhci_container_ctx *in_ctx,
1675 struct xhci_container_ctx *out_ctx)
1677 struct xhci_slot_ctx *in_slot_ctx;
1678 struct xhci_slot_ctx *out_slot_ctx;
1680 in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1681 out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
1683 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
1684 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
1685 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
1686 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
1689 /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
1690 static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
1693 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1694 int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1696 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1697 "Allocating %d scratchpad buffers", num_sp);
1702 xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
1703 if (!xhci->scratchpad)
1706 xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
1707 num_sp * sizeof(u64),
1708 &xhci->scratchpad->sp_dma, flags);
1709 if (!xhci->scratchpad->sp_array)
1712 xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
1713 if (!xhci->scratchpad->sp_buffers)
1716 xhci->scratchpad->sp_dma_buffers =
1717 kzalloc(sizeof(dma_addr_t) * num_sp, flags);
1719 if (!xhci->scratchpad->sp_dma_buffers)
1722 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
1723 for (i = 0; i < num_sp; i++) {
1725 void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
1730 xhci->scratchpad->sp_array[i] = dma;
1731 xhci->scratchpad->sp_buffers[i] = buf;
1732 xhci->scratchpad->sp_dma_buffers[i] = dma;
1738 for (i = i - 1; i >= 0; i--) {
1739 dma_free_coherent(dev, xhci->page_size,
1740 xhci->scratchpad->sp_buffers[i],
1741 xhci->scratchpad->sp_dma_buffers[i]);
1743 kfree(xhci->scratchpad->sp_dma_buffers);
1746 kfree(xhci->scratchpad->sp_buffers);
1749 dma_free_coherent(dev, num_sp * sizeof(u64),
1750 xhci->scratchpad->sp_array,
1751 xhci->scratchpad->sp_dma);
1754 kfree(xhci->scratchpad);
1755 xhci->scratchpad = NULL;
1761 static void scratchpad_free(struct xhci_hcd *xhci)
1765 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1767 if (!xhci->scratchpad)
1770 num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
1772 for (i = 0; i < num_sp; i++) {
1773 dma_free_coherent(dev, xhci->page_size,
1774 xhci->scratchpad->sp_buffers[i],
1775 xhci->scratchpad->sp_dma_buffers[i]);
1777 kfree(xhci->scratchpad->sp_dma_buffers);
1778 kfree(xhci->scratchpad->sp_buffers);
1779 dma_free_coherent(dev, num_sp * sizeof(u64),
1780 xhci->scratchpad->sp_array,
1781 xhci->scratchpad->sp_dma);
1782 kfree(xhci->scratchpad);
1783 xhci->scratchpad = NULL;
1786 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
1787 bool allocate_in_ctx, bool allocate_completion,
1790 struct xhci_command *command;
1792 command = kzalloc(sizeof(*command), mem_flags);
1796 if (allocate_in_ctx) {
1798 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
1800 if (!command->in_ctx) {
1806 if (allocate_completion) {
1807 command->completion =
1808 kzalloc(sizeof(struct completion), mem_flags);
1809 if (!command->completion) {
1810 xhci_free_container_ctx(xhci, command->in_ctx);
1814 init_completion(command->completion);
1817 command->status = 0;
1818 INIT_LIST_HEAD(&command->cmd_list);
1822 void xhci_urb_free_priv(struct urb_priv *urb_priv)
1827 void xhci_free_command(struct xhci_hcd *xhci,
1828 struct xhci_command *command)
1830 xhci_free_container_ctx(xhci,
1832 kfree(command->completion);
1836 void xhci_mem_cleanup(struct xhci_hcd *xhci)
1838 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
1840 int i, j, num_ports;
1842 cancel_delayed_work_sync(&xhci->cmd_timer);
1844 /* Free the Event Ring Segment Table and the actual Event Ring */
1845 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1846 if (xhci->erst.entries)
1847 dma_free_coherent(dev, size,
1848 xhci->erst.entries, xhci->erst.erst_dma_addr);
1849 xhci->erst.entries = NULL;
1850 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
1851 if (xhci->event_ring)
1852 xhci_ring_free(xhci, xhci->event_ring);
1853 xhci->event_ring = NULL;
1854 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
1856 if (xhci->lpm_command)
1857 xhci_free_command(xhci, xhci->lpm_command);
1858 xhci->lpm_command = NULL;
1860 xhci_ring_free(xhci, xhci->cmd_ring);
1861 xhci->cmd_ring = NULL;
1862 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
1863 xhci_cleanup_command_queue(xhci);
1865 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1866 for (i = 0; i < num_ports && xhci->rh_bw; i++) {
1867 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1868 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1869 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1870 while (!list_empty(ep))
1871 list_del_init(ep->next);
1875 for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
1876 xhci_free_virt_devices_depth_first(xhci, i);
1878 dma_pool_destroy(xhci->segment_pool);
1879 xhci->segment_pool = NULL;
1880 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
1882 dma_pool_destroy(xhci->device_pool);
1883 xhci->device_pool = NULL;
1884 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
1886 dma_pool_destroy(xhci->small_streams_pool);
1887 xhci->small_streams_pool = NULL;
1888 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1889 "Freed small stream array pool");
1891 dma_pool_destroy(xhci->medium_streams_pool);
1892 xhci->medium_streams_pool = NULL;
1893 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
1894 "Freed medium stream array pool");
1897 dma_free_coherent(dev, sizeof(*xhci->dcbaa),
1898 xhci->dcbaa, xhci->dcbaa->dma);
1901 scratchpad_free(xhci);
1906 for (i = 0; i < num_ports; i++) {
1907 struct xhci_tt_bw_info *tt, *n;
1908 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1909 list_del(&tt->tt_list);
1915 xhci->cmd_ring_reserved_trbs = 0;
1916 xhci->num_usb2_ports = 0;
1917 xhci->num_usb3_ports = 0;
1918 xhci->num_active_eps = 0;
1919 kfree(xhci->usb2_ports);
1920 kfree(xhci->usb3_ports);
1921 kfree(xhci->port_array);
1923 kfree(xhci->ext_caps);
1925 xhci->usb2_ports = NULL;
1926 xhci->usb3_ports = NULL;
1927 xhci->port_array = NULL;
1929 xhci->ext_caps = NULL;
1931 xhci->page_size = 0;
1932 xhci->page_shift = 0;
1933 xhci->bus_state[0].bus_suspended = 0;
1934 xhci->bus_state[1].bus_suspended = 0;
1937 static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1938 struct xhci_segment *input_seg,
1939 union xhci_trb *start_trb,
1940 union xhci_trb *end_trb,
1941 dma_addr_t input_dma,
1942 struct xhci_segment *result_seg,
1943 char *test_name, int test_number)
1945 unsigned long long start_dma;
1946 unsigned long long end_dma;
1947 struct xhci_segment *seg;
1949 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1950 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1952 seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
1953 if (seg != result_seg) {
1954 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1955 test_name, test_number);
1956 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1957 "input DMA 0x%llx\n",
1959 (unsigned long long) input_dma);
1960 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1961 "ending TRB %p (0x%llx DMA)\n",
1962 start_trb, start_dma,
1964 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1966 trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
1973 /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1974 static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
1977 dma_addr_t input_dma;
1978 struct xhci_segment *result_seg;
1979 } simple_test_vector [] = {
1980 /* A zeroed DMA field should fail */
1982 /* One TRB before the ring start should fail */
1983 { xhci->event_ring->first_seg->dma - 16, NULL },
1984 /* One byte before the ring start should fail */
1985 { xhci->event_ring->first_seg->dma - 1, NULL },
1986 /* Starting TRB should succeed */
1987 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1988 /* Ending TRB should succeed */
1989 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1990 xhci->event_ring->first_seg },
1991 /* One byte after the ring end should fail */
1992 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1993 /* One TRB after the ring end should fail */
1994 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1995 /* An address of all ones should fail */
1996 { (dma_addr_t) (~0), NULL },
1999 struct xhci_segment *input_seg;
2000 union xhci_trb *start_trb;
2001 union xhci_trb *end_trb;
2002 dma_addr_t input_dma;
2003 struct xhci_segment *result_seg;
2004 } complex_test_vector [] = {
2005 /* Test feeding a valid DMA address from a different ring */
2006 { .input_seg = xhci->event_ring->first_seg,
2007 .start_trb = xhci->event_ring->first_seg->trbs,
2008 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2009 .input_dma = xhci->cmd_ring->first_seg->dma,
2012 /* Test feeding a valid end TRB from a different ring */
2013 { .input_seg = xhci->event_ring->first_seg,
2014 .start_trb = xhci->event_ring->first_seg->trbs,
2015 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2016 .input_dma = xhci->cmd_ring->first_seg->dma,
2019 /* Test feeding a valid start and end TRB from a different ring */
2020 { .input_seg = xhci->event_ring->first_seg,
2021 .start_trb = xhci->cmd_ring->first_seg->trbs,
2022 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2023 .input_dma = xhci->cmd_ring->first_seg->dma,
2026 /* TRB in this ring, but after this TD */
2027 { .input_seg = xhci->event_ring->first_seg,
2028 .start_trb = &xhci->event_ring->first_seg->trbs[0],
2029 .end_trb = &xhci->event_ring->first_seg->trbs[3],
2030 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
2033 /* TRB in this ring, but before this TD */
2034 { .input_seg = xhci->event_ring->first_seg,
2035 .start_trb = &xhci->event_ring->first_seg->trbs[3],
2036 .end_trb = &xhci->event_ring->first_seg->trbs[6],
2037 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2040 /* TRB in this ring, but after this wrapped TD */
2041 { .input_seg = xhci->event_ring->first_seg,
2042 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2043 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2044 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
2047 /* TRB in this ring, but before this wrapped TD */
2048 { .input_seg = xhci->event_ring->first_seg,
2049 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2050 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2051 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
2054 /* TRB not in this ring, and we have a wrapped TD */
2055 { .input_seg = xhci->event_ring->first_seg,
2056 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
2057 .end_trb = &xhci->event_ring->first_seg->trbs[1],
2058 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
2063 unsigned int num_tests;
2066 num_tests = ARRAY_SIZE(simple_test_vector);
2067 for (i = 0; i < num_tests; i++) {
2068 ret = xhci_test_trb_in_td(xhci,
2069 xhci->event_ring->first_seg,
2070 xhci->event_ring->first_seg->trbs,
2071 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
2072 simple_test_vector[i].input_dma,
2073 simple_test_vector[i].result_seg,
2079 num_tests = ARRAY_SIZE(complex_test_vector);
2080 for (i = 0; i < num_tests; i++) {
2081 ret = xhci_test_trb_in_td(xhci,
2082 complex_test_vector[i].input_seg,
2083 complex_test_vector[i].start_trb,
2084 complex_test_vector[i].end_trb,
2085 complex_test_vector[i].input_dma,
2086 complex_test_vector[i].result_seg,
2091 xhci_dbg(xhci, "TRB math tests passed.\n");
2095 static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
2100 deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2101 xhci->event_ring->dequeue);
2102 if (deq == 0 && !in_interrupt())
2103 xhci_warn(xhci, "WARN something wrong with SW event ring "
2105 /* Update HC event ring dequeue pointer */
2106 temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2107 temp &= ERST_PTR_MASK;
2108 /* Don't clear the EHB bit (which is RW1C) because
2109 * there might be more events to service.
2112 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2113 "// Write event ring dequeue pointer, "
2114 "preserving EHB bit");
2115 xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
2116 &xhci->ir_set->erst_dequeue);
2119 static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
2120 __le32 __iomem *addr, int max_caps)
2122 u32 temp, port_offset, port_count;
2125 struct xhci_hub *rhub;
2128 major_revision = XHCI_EXT_PORT_MAJOR(temp);
2130 if (major_revision == 0x03) {
2131 rhub = &xhci->usb3_rhub;
2132 } else if (major_revision <= 0x02) {
2133 rhub = &xhci->usb2_rhub;
2135 xhci_warn(xhci, "Ignoring unknown port speed, "
2136 "Ext Cap %p, revision = 0x%x\n",
2137 addr, major_revision);
2138 /* Ignoring port protocol we can't understand. FIXME */
2141 rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp);
2142 rhub->min_rev = XHCI_EXT_PORT_MINOR(temp);
2144 /* Port offset and count in the third dword, see section 7.2 */
2145 temp = readl(addr + 2);
2146 port_offset = XHCI_EXT_PORT_OFF(temp);
2147 port_count = XHCI_EXT_PORT_COUNT(temp);
2148 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2149 "Ext Cap %p, port offset = %u, "
2150 "count = %u, revision = 0x%x",
2151 addr, port_offset, port_count, major_revision);
2152 /* Port count includes the current port offset */
2153 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
2154 /* WTF? "Valid values are ‘1’ to MaxPorts" */
2157 rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
2158 if (rhub->psi_count) {
2159 rhub->psi = kcalloc(rhub->psi_count, sizeof(*rhub->psi),
2162 rhub->psi_count = 0;
2164 rhub->psi_uid_count++;
2165 for (i = 0; i < rhub->psi_count; i++) {
2166 rhub->psi[i] = readl(addr + 4 + i);
2168 /* count unique ID values, two consecutive entries can
2169 * have the same ID if link is assymetric
2171 if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
2172 XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
2173 rhub->psi_uid_count++;
2175 xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
2176 XHCI_EXT_PORT_PSIV(rhub->psi[i]),
2177 XHCI_EXT_PORT_PSIE(rhub->psi[i]),
2178 XHCI_EXT_PORT_PLT(rhub->psi[i]),
2179 XHCI_EXT_PORT_PFD(rhub->psi[i]),
2180 XHCI_EXT_PORT_LP(rhub->psi[i]),
2181 XHCI_EXT_PORT_PSIM(rhub->psi[i]));
2184 /* cache usb2 port capabilities */
2185 if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
2186 xhci->ext_caps[xhci->num_ext_caps++] = temp;
2188 /* Check the host's USB2 LPM capability */
2189 if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
2190 (temp & XHCI_L1C)) {
2191 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2192 "xHCI 0.96: support USB2 software lpm");
2193 xhci->sw_lpm_support = 1;
2196 if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
2197 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2198 "xHCI 1.0: support USB2 software lpm");
2199 xhci->sw_lpm_support = 1;
2200 if (temp & XHCI_HLC) {
2201 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2202 "xHCI 1.0: support USB2 hardware lpm");
2203 xhci->hw_lpm_support = 1;
2208 for (i = port_offset; i < (port_offset + port_count); i++) {
2209 /* Duplicate entry. Ignore the port if the revisions differ. */
2210 if (xhci->port_array[i] != 0) {
2211 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
2212 " port %u\n", addr, i);
2213 xhci_warn(xhci, "Port was marked as USB %u, "
2214 "duplicated as USB %u\n",
2215 xhci->port_array[i], major_revision);
2216 /* Only adjust the roothub port counts if we haven't
2217 * found a similar duplicate.
2219 if (xhci->port_array[i] != major_revision &&
2220 xhci->port_array[i] != DUPLICATE_ENTRY) {
2221 if (xhci->port_array[i] == 0x03)
2222 xhci->num_usb3_ports--;
2224 xhci->num_usb2_ports--;
2225 xhci->port_array[i] = DUPLICATE_ENTRY;
2227 /* FIXME: Should we disable the port? */
2230 xhci->port_array[i] = major_revision;
2231 if (major_revision == 0x03)
2232 xhci->num_usb3_ports++;
2234 xhci->num_usb2_ports++;
2236 /* FIXME: Should we disable ports not in the Extended Capabilities? */
2240 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
2241 * specify what speeds each port is supposed to be. We can't count on the port
2242 * speed bits in the PORTSC register being correct until a device is connected,
2243 * but we need to set up the two fake roothubs with the correct number of USB
2244 * 3.0 and USB 2.0 ports at host controller initialization time.
2246 static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
2250 unsigned int num_ports;
2251 int i, j, port_index;
2255 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
2256 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
2257 if (!xhci->port_array)
2260 xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
2263 for (i = 0; i < num_ports; i++) {
2264 struct xhci_interval_bw_table *bw_table;
2266 INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
2267 bw_table = &xhci->rh_bw[i].bw_table;
2268 for (j = 0; j < XHCI_MAX_INTERVAL; j++)
2269 INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
2271 base = &xhci->cap_regs->hc_capbase;
2273 cap_start = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_PROTOCOL);
2275 xhci_err(xhci, "No Extended Capability registers, unable to set up roothub\n");
2280 /* count extended protocol capability entries for later caching */
2283 offset = xhci_find_next_ext_cap(base, offset,
2284 XHCI_EXT_CAPS_PROTOCOL);
2287 xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
2288 if (!xhci->ext_caps)
2294 xhci_add_in_port(xhci, num_ports, base + offset, cap_count);
2295 if (xhci->num_usb2_ports + xhci->num_usb3_ports == num_ports)
2297 offset = xhci_find_next_ext_cap(base, offset,
2298 XHCI_EXT_CAPS_PROTOCOL);
2301 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
2302 xhci_warn(xhci, "No ports on the roothubs?\n");
2305 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2306 "Found %u USB 2.0 ports and %u USB 3.0 ports.",
2307 xhci->num_usb2_ports, xhci->num_usb3_ports);
2309 /* Place limits on the number of roothub ports so that the hub
2310 * descriptors aren't longer than the USB core will allocate.
2312 if (xhci->num_usb3_ports > 15) {
2313 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2314 "Limiting USB 3.0 roothub ports to 15.");
2315 xhci->num_usb3_ports = 15;
2317 if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
2318 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2319 "Limiting USB 2.0 roothub ports to %u.",
2321 xhci->num_usb2_ports = USB_MAXCHILDREN;
2325 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
2326 * Not sure how the USB core will handle a hub with no ports...
2328 if (xhci->num_usb2_ports) {
2329 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
2330 xhci->num_usb2_ports, flags);
2331 if (!xhci->usb2_ports)
2335 for (i = 0; i < num_ports; i++) {
2336 if (xhci->port_array[i] == 0x03 ||
2337 xhci->port_array[i] == 0 ||
2338 xhci->port_array[i] == DUPLICATE_ENTRY)
2341 xhci->usb2_ports[port_index] =
2342 &xhci->op_regs->port_status_base +
2344 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2345 "USB 2.0 port at index %u, "
2347 xhci->usb2_ports[port_index]);
2349 if (port_index == xhci->num_usb2_ports)
2353 if (xhci->num_usb3_ports) {
2354 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
2355 xhci->num_usb3_ports, flags);
2356 if (!xhci->usb3_ports)
2360 for (i = 0; i < num_ports; i++)
2361 if (xhci->port_array[i] == 0x03) {
2362 xhci->usb3_ports[port_index] =
2363 &xhci->op_regs->port_status_base +
2365 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2366 "USB 3.0 port at index %u, "
2368 xhci->usb3_ports[port_index]);
2370 if (port_index == xhci->num_usb3_ports)
2377 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2380 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
2381 unsigned int val, val2;
2383 struct xhci_segment *seg;
2384 u32 page_size, temp;
2387 INIT_LIST_HEAD(&xhci->cmd_list);
2389 /* init command timeout work */
2390 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout);
2391 init_completion(&xhci->cmd_ring_stop_completion);
2393 page_size = readl(&xhci->op_regs->page_size);
2394 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2395 "Supported page size register = 0x%x", page_size);
2396 for (i = 0; i < 16; i++) {
2397 if ((0x1 & page_size) != 0)
2399 page_size = page_size >> 1;
2402 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2403 "Supported page size of %iK", (1 << (i+12)) / 1024);
2405 xhci_warn(xhci, "WARN: no supported page size\n");
2406 /* Use 4K pages, since that's common and the minimum the HC supports */
2407 xhci->page_shift = 12;
2408 xhci->page_size = 1 << xhci->page_shift;
2409 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2410 "HCD page size set to %iK", xhci->page_size / 1024);
2413 * Program the Number of Device Slots Enabled field in the CONFIG
2414 * register with the max value of slots the HC can handle.
2416 val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
2417 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2418 "// xHC can handle at most %d device slots.", val);
2419 val2 = readl(&xhci->op_regs->config_reg);
2420 val |= (val2 & ~HCS_SLOTS_MASK);
2421 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2422 "// Setting Max device slots reg = 0x%x.", val);
2423 writel(val, &xhci->op_regs->config_reg);
2426 * Section 5.4.8 - doorbell array must be
2427 * "physically contiguous and 64-byte (cache line) aligned".
2429 xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
2433 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
2434 xhci->dcbaa->dma = dma;
2435 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2436 "// Device context base array address = 0x%llx (DMA), %p (virt)",
2437 (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
2438 xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
2441 * Initialize the ring segment pool. The ring must be a contiguous
2442 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
2443 * however, the command ring segment needs 64-byte aligned segments
2444 * and our use of dma addresses in the trb_address_map radix tree needs
2445 * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
2447 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
2448 TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE, xhci->page_size);
2450 /* See Table 46 and Note on Figure 55 */
2451 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2452 2112, 64, xhci->page_size);
2453 if (!xhci->segment_pool || !xhci->device_pool)
2456 /* Linear stream context arrays don't have any boundary restrictions,
2457 * and only need to be 16-byte aligned.
2459 xhci->small_streams_pool =
2460 dma_pool_create("xHCI 256 byte stream ctx arrays",
2461 dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
2462 xhci->medium_streams_pool =
2463 dma_pool_create("xHCI 1KB stream ctx arrays",
2464 dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
2465 /* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
2466 * will be allocated with dma_alloc_coherent()
2469 if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
2472 /* Set up the command ring to have one segments for now. */
2473 xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
2474 if (!xhci->cmd_ring)
2476 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2477 "Allocated command ring at %p", xhci->cmd_ring);
2478 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
2479 (unsigned long long)xhci->cmd_ring->first_seg->dma);
2481 /* Set the address in the Command Ring Control register */
2482 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
2483 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
2484 (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
2485 xhci->cmd_ring->cycle_state;
2486 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2487 "// Setting command ring address to 0x%x", val);
2488 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
2489 xhci_dbg_cmd_ptrs(xhci);
2491 xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
2492 if (!xhci->lpm_command)
2495 /* Reserve one command ring TRB for disabling LPM.
2496 * Since the USB core grabs the shared usb_bus bandwidth mutex before
2497 * disabling LPM, we only need to reserve one TRB for all devices.
2499 xhci->cmd_ring_reserved_trbs++;
2501 val = readl(&xhci->cap_regs->db_off);
2503 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2504 "// Doorbell array is located at offset 0x%x"
2505 " from cap regs base addr", val);
2506 xhci->dba = (void __iomem *) xhci->cap_regs + val;
2507 xhci_dbg_regs(xhci);
2508 xhci_print_run_regs(xhci);
2509 /* Set ir_set to interrupt register set 0 */
2510 xhci->ir_set = &xhci->run_regs->ir_set[0];
2513 * Event ring setup: Allocate a normal ring, but also setup
2514 * the event ring segment table (ERST). Section 4.9.3.
2516 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
2517 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
2519 if (!xhci->event_ring)
2521 if (xhci_check_trb_in_td_math(xhci) < 0)
2524 xhci->erst.entries = dma_alloc_coherent(dev,
2525 sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
2527 if (!xhci->erst.entries)
2529 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2530 "// Allocated event ring segment table at 0x%llx",
2531 (unsigned long long)dma);
2533 memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
2534 xhci->erst.num_entries = ERST_NUM_SEGS;
2535 xhci->erst.erst_dma_addr = dma;
2536 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2537 "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
2538 xhci->erst.num_entries,
2540 (unsigned long long)xhci->erst.erst_dma_addr);
2542 /* set ring base address and size for each segment table entry */
2543 for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
2544 struct xhci_erst_entry *entry = &xhci->erst.entries[val];
2545 entry->seg_addr = cpu_to_le64(seg->dma);
2546 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
2551 /* set ERST count with the number of entries in the segment table */
2552 val = readl(&xhci->ir_set->erst_size);
2553 val &= ERST_SIZE_MASK;
2554 val |= ERST_NUM_SEGS;
2555 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2556 "// Write ERST size = %i to ir_set 0 (some bits preserved)",
2558 writel(val, &xhci->ir_set->erst_size);
2560 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2561 "// Set ERST entries to point to event ring.");
2562 /* set the segment table base address */
2563 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2564 "// Set ERST base address for ir_set 0 = 0x%llx",
2565 (unsigned long long)xhci->erst.erst_dma_addr);
2566 val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
2567 val_64 &= ERST_PTR_MASK;
2568 val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
2569 xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
2571 /* Set the event ring dequeue address */
2572 xhci_set_hc_event_deq(xhci);
2573 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
2574 "Wrote ERST address to ir_set 0.");
2575 xhci_print_ir_set(xhci, 0);
2578 * XXX: Might need to set the Interrupter Moderation Register to
2579 * something other than the default (~1ms minimum between interrupts).
2580 * See section 5.5.1.2.
2582 for (i = 0; i < MAX_HC_SLOTS; i++)
2583 xhci->devs[i] = NULL;
2584 for (i = 0; i < USB_MAXCHILDREN; i++) {
2585 xhci->bus_state[0].resume_done[i] = 0;
2586 xhci->bus_state[1].resume_done[i] = 0;
2587 /* Only the USB 2.0 completions will ever be used. */
2588 init_completion(&xhci->bus_state[1].rexit_done[i]);
2591 if (scratchpad_alloc(xhci, flags))
2593 if (xhci_setup_port_arrays(xhci, flags))
2596 /* Enable USB 3.0 device notifications for function remote wake, which
2597 * is necessary for allowing USB 3.0 devices to do remote wakeup from
2598 * U3 (device suspend).
2600 temp = readl(&xhci->op_regs->dev_notification);
2601 temp &= ~DEV_NOTE_MASK;
2602 temp |= DEV_NOTE_FWAKE;
2603 writel(temp, &xhci->op_regs->dev_notification);
2610 xhci_mem_cleanup(xhci);