1 // SPDX-License-Identifier: GPL-2.0+
3 * USB HOST XHCI Controller stack
5 * Based on xHCI host controller driver in linux-kernel
8 * Copyright (C) 2008 Intel Corp.
11 * Copyright (C) 2013 Samsung Electronics Co.Ltd
12 * Authors: Vivek Gautam <gautam.vivek@samsung.com>
13 * Vikas Sajjan <vikas.sajjan@samsung.com>
20 #include <asm/byteorder.h>
23 #include <asm/cache.h>
24 #include <linux/bug.h>
25 #include <linux/errno.h>
29 #define CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
31 * flushes the address passed till the length
33 * @param addr pointer to memory region to be flushed
34 * @param len the length of the cache line to be flushed
37 void xhci_flush_cache(uintptr_t addr, u32 len)
39 BUG_ON((void *)addr == NULL || len == 0);
41 flush_dcache_range(addr & ~(CACHELINE_SIZE - 1),
42 ALIGN(addr + len, CACHELINE_SIZE));
46 * invalidates the address passed till the length
48 * @param addr pointer to memory region to be invalidates
49 * @param len the length of the cache line to be invalidated
52 void xhci_inval_cache(uintptr_t addr, u32 len)
54 BUG_ON((void *)addr == NULL || len == 0);
56 invalidate_dcache_range(addr & ~(CACHELINE_SIZE - 1),
57 ALIGN(addr + len, CACHELINE_SIZE));
62 * frees the "segment" pointer passed
64 * @param ptr pointer to "segement" to be freed
67 static void xhci_segment_free(struct xhci_segment *seg)
76 * frees the "ring" pointer passed
78 * @param ptr pointer to "ring" to be freed
81 static void xhci_ring_free(struct xhci_ring *ring)
83 struct xhci_segment *seg;
84 struct xhci_segment *first_seg;
88 first_seg = ring->first_seg;
89 seg = first_seg->next;
90 while (seg != first_seg) {
91 struct xhci_segment *next = seg->next;
92 xhci_segment_free(seg);
95 xhci_segment_free(first_seg);
101 * Free the scratchpad buffer array and scratchpad buffers
103 * @ctrl host controller data structure
106 static void xhci_scratchpad_free(struct xhci_ctrl *ctrl)
108 if (!ctrl->scratchpad)
111 ctrl->dcbaa->dev_context_ptrs[0] = 0;
113 free((void *)(uintptr_t)le64_to_cpu(ctrl->scratchpad->sp_array[0]));
114 free(ctrl->scratchpad->sp_array);
115 free(ctrl->scratchpad);
116 ctrl->scratchpad = NULL;
120 * frees the "xhci_container_ctx" pointer passed
122 * @param ptr pointer to "xhci_container_ctx" to be freed
125 static void xhci_free_container_ctx(struct xhci_container_ctx *ctx)
132 * frees the virtual devices for "xhci_ctrl" pointer passed
134 * @param ptr pointer to "xhci_ctrl" whose virtual devices are to be freed
137 static void xhci_free_virt_devices(struct xhci_ctrl *ctrl)
141 struct xhci_virt_device *virt_dev;
144 * refactored here to loop through all virt_dev
145 * Slot ID 0 is reserved
147 for (slot_id = 0; slot_id < MAX_HC_SLOTS; slot_id++) {
148 virt_dev = ctrl->devs[slot_id];
152 ctrl->dcbaa->dev_context_ptrs[slot_id] = 0;
154 for (i = 0; i < 31; ++i)
155 if (virt_dev->eps[i].ring)
156 xhci_ring_free(virt_dev->eps[i].ring);
158 if (virt_dev->in_ctx)
159 xhci_free_container_ctx(virt_dev->in_ctx);
160 if (virt_dev->out_ctx)
161 xhci_free_container_ctx(virt_dev->out_ctx);
164 /* make sure we are pointing to NULL */
165 ctrl->devs[slot_id] = NULL;
170 * frees all the memory allocated
172 * @param ptr pointer to "xhci_ctrl" to be cleaned up
175 void xhci_cleanup(struct xhci_ctrl *ctrl)
177 xhci_ring_free(ctrl->event_ring);
178 xhci_ring_free(ctrl->cmd_ring);
179 xhci_scratchpad_free(ctrl);
180 xhci_free_virt_devices(ctrl);
181 free(ctrl->erst.entries);
183 if (reset_valid(&ctrl->reset))
184 reset_free(&ctrl->reset);
185 memset(ctrl, '\0', sizeof(struct xhci_ctrl));
189 * Malloc the aligned memory
191 * @param size size of memory to be allocated
192 * @return allocates the memory and returns the aligned pointer
194 static void *xhci_malloc(unsigned int size)
197 size_t cacheline_size = max(XHCI_ALIGNMENT, CACHELINE_SIZE);
199 ptr = memalign(cacheline_size, ALIGN(size, cacheline_size));
201 memset(ptr, '\0', size);
203 xhci_flush_cache((uintptr_t)ptr, size);
209 * Make the prev segment point to the next segment.
210 * Change the last TRB in the prev segment to be a Link TRB which points to the
211 * address of the next segment. The caller needs to set any Link TRB
212 * related flags, such as End TRB, Toggle Cycle, and no snoop.
214 * @param prev pointer to the previous segment
215 * @param next pointer to the next segment
216 * @param link_trbs flag to indicate whether to link the trbs or NOT
219 static void xhci_link_segments(struct xhci_segment *prev,
220 struct xhci_segment *next, bool link_trbs)
229 val_64 = virt_to_phys(next->trbs);
230 prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
234 * Set the last TRB in the segment to
235 * have a TRB type ID of Link TRB
237 val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
238 val &= ~TRB_TYPE_BITMASK;
239 val |= (TRB_LINK << TRB_TYPE_SHIFT);
241 prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
246 * Initialises the Ring's enqueue,dequeue,enq_seg pointers
248 * @param ring pointer to the RING to be intialised
251 static void xhci_initialize_ring_info(struct xhci_ring *ring)
254 * The ring is empty, so the enqueue pointer == dequeue pointer
256 ring->enqueue = ring->first_seg->trbs;
257 ring->enq_seg = ring->first_seg;
258 ring->dequeue = ring->enqueue;
259 ring->deq_seg = ring->first_seg;
262 * The ring is initialized to 0. The producer must write 1 to the
263 * cycle bit to handover ownership of the TRB, so PCS = 1.
264 * The consumer must compare CCS to the cycle bit to
265 * check ownership, so CCS = 1.
267 ring->cycle_state = 1;
271 * Allocates a generic ring segment from the ring pool, sets the dma address,
272 * initializes the segment to zero, and sets the private next pointer to NULL.
274 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
277 * @return pointer to the newly allocated SEGMENT
279 static struct xhci_segment *xhci_segment_alloc(void)
281 struct xhci_segment *seg;
283 seg = (struct xhci_segment *)malloc(sizeof(struct xhci_segment));
286 seg->trbs = (union xhci_trb *)xhci_malloc(SEGMENT_SIZE);
294 * Create a new ring with zero or more segments.
295 * TODO: current code only uses one-time-allocated single-segment rings
296 * of 1KB anyway, so we might as well get rid of all the segment and
297 * linking code (and maybe increase the size a bit, e.g. 4KB).
300 * Link each segment together into a ring.
301 * Set the end flag and the cycle toggle bit on the last segment.
302 * See section 4.9.2 and figures 15 and 16 of XHCI spec rev1.0.
304 * @param num_segs number of segments in the ring
305 * @param link_trbs flag to indicate whether to link the trbs or NOT
306 * @return pointer to the newly created RING
308 struct xhci_ring *xhci_ring_alloc(unsigned int num_segs, bool link_trbs)
310 struct xhci_ring *ring;
311 struct xhci_segment *prev;
313 ring = (struct xhci_ring *)malloc(sizeof(struct xhci_ring));
319 ring->first_seg = xhci_segment_alloc();
320 BUG_ON(!ring->first_seg);
324 prev = ring->first_seg;
325 while (num_segs > 0) {
326 struct xhci_segment *next;
328 next = xhci_segment_alloc();
331 xhci_link_segments(prev, next, link_trbs);
336 xhci_link_segments(prev, ring->first_seg, link_trbs);
338 /* See section 4.9.2.1 and 6.4.4.1 */
339 prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
340 cpu_to_le32(LINK_TOGGLE);
342 xhci_initialize_ring_info(ring);
348 * Set up the scratchpad buffer array and scratchpad buffers
350 * @ctrl host controller data structure
351 * @return -ENOMEM if buffer allocation fails, 0 on success
353 static int xhci_scratchpad_alloc(struct xhci_ctrl *ctrl)
355 struct xhci_hccr *hccr = ctrl->hccr;
356 struct xhci_hcor *hcor = ctrl->hcor;
357 struct xhci_scratchpad *scratchpad;
363 num_sp = HCS_MAX_SCRATCHPAD(xhci_readl(&hccr->cr_hcsparams2));
367 scratchpad = malloc(sizeof(*scratchpad));
370 ctrl->scratchpad = scratchpad;
372 scratchpad->sp_array = xhci_malloc(num_sp * sizeof(u64));
373 if (!scratchpad->sp_array)
375 ctrl->dcbaa->dev_context_ptrs[0] =
376 cpu_to_le64((uintptr_t)scratchpad->sp_array);
378 xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[0],
379 sizeof(ctrl->dcbaa->dev_context_ptrs[0]));
381 page_size = xhci_readl(&hcor->or_pagesize) & 0xffff;
382 for (i = 0; i < 16; i++) {
383 if ((0x1 & page_size) != 0)
385 page_size = page_size >> 1;
389 page_size = 1 << (i + 12);
390 buf = memalign(page_size, num_sp * page_size);
393 memset(buf, '\0', num_sp * page_size);
394 xhci_flush_cache((uintptr_t)buf, num_sp * page_size);
396 for (i = 0; i < num_sp; i++) {
397 uintptr_t ptr = (uintptr_t)buf + i * page_size;
398 scratchpad->sp_array[i] = cpu_to_le64(ptr);
401 xhci_flush_cache((uintptr_t)scratchpad->sp_array,
402 sizeof(u64) * num_sp);
407 free(scratchpad->sp_array);
411 ctrl->scratchpad = NULL;
418 * Allocates the Container context
420 * @param ctrl Host controller data structure
421 * @param type type of XHCI Container Context
422 * @return NULL if failed else pointer to the context on success
424 static struct xhci_container_ctx
425 *xhci_alloc_container_ctx(struct xhci_ctrl *ctrl, int type)
427 struct xhci_container_ctx *ctx;
429 ctx = (struct xhci_container_ctx *)
430 malloc(sizeof(struct xhci_container_ctx));
433 BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
435 ctx->size = (MAX_EP_CTX_NUM + 1) *
436 CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
437 if (type == XHCI_CTX_TYPE_INPUT)
438 ctx->size += CTX_SIZE(readl(&ctrl->hccr->cr_hccparams));
440 ctx->bytes = (u8 *)xhci_malloc(ctx->size);
446 * Allocating virtual device
448 * @param udev pointer to USB deivce structure
449 * @return 0 on success else -1 on failure
451 int xhci_alloc_virt_device(struct xhci_ctrl *ctrl, unsigned int slot_id)
454 struct xhci_virt_device *virt_dev;
456 /* Slot ID 0 is reserved */
457 if (ctrl->devs[slot_id]) {
458 printf("Virt dev for slot[%d] already allocated\n", slot_id);
462 ctrl->devs[slot_id] = (struct xhci_virt_device *)
463 malloc(sizeof(struct xhci_virt_device));
465 if (!ctrl->devs[slot_id]) {
466 puts("Failed to allocate virtual device\n");
470 memset(ctrl->devs[slot_id], 0, sizeof(struct xhci_virt_device));
471 virt_dev = ctrl->devs[slot_id];
473 /* Allocate the (output) device context that will be used in the HC. */
474 virt_dev->out_ctx = xhci_alloc_container_ctx(ctrl,
475 XHCI_CTX_TYPE_DEVICE);
476 if (!virt_dev->out_ctx) {
477 puts("Failed to allocate out context for virt dev\n");
481 /* Allocate the (input) device context for address device command */
482 virt_dev->in_ctx = xhci_alloc_container_ctx(ctrl,
483 XHCI_CTX_TYPE_INPUT);
484 if (!virt_dev->in_ctx) {
485 puts("Failed to allocate in context for virt dev\n");
489 /* Allocate endpoint 0 ring */
490 virt_dev->eps[0].ring = xhci_ring_alloc(1, true);
492 byte_64 = virt_to_phys(virt_dev->out_ctx->bytes);
494 /* Point to output device context in dcbaa. */
495 ctrl->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(byte_64);
497 xhci_flush_cache((uintptr_t)&ctrl->dcbaa->dev_context_ptrs[slot_id],
503 * Allocates the necessary data structures
504 * for XHCI host controller
506 * @param ctrl Host controller data structure
507 * @param hccr pointer to HOST Controller Control Registers
508 * @param hcor pointer to HOST Controller Operational Registers
509 * @return 0 if successful else -1 on failure
511 int xhci_mem_init(struct xhci_ctrl *ctrl, struct xhci_hccr *hccr,
512 struct xhci_hcor *hcor)
519 struct xhci_segment *seg;
521 /* DCBAA initialization */
522 ctrl->dcbaa = (struct xhci_device_context_array *)
523 xhci_malloc(sizeof(struct xhci_device_context_array));
524 if (ctrl->dcbaa == NULL) {
525 puts("unable to allocate DCBA\n");
529 val_64 = virt_to_phys(ctrl->dcbaa);
530 /* Set the pointer in DCBAA register */
531 xhci_writeq(&hcor->or_dcbaap, val_64);
533 /* Command ring control pointer register initialization */
534 ctrl->cmd_ring = xhci_ring_alloc(1, true);
536 /* Set the address in the Command Ring Control register */
537 trb_64 = virt_to_phys(ctrl->cmd_ring->first_seg->trbs);
538 val_64 = xhci_readq(&hcor->or_crcr);
539 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
540 (trb_64 & (u64) ~CMD_RING_RSVD_BITS) |
541 ctrl->cmd_ring->cycle_state;
542 xhci_writeq(&hcor->or_crcr, val_64);
544 /* write the address of db register */
545 val = xhci_readl(&hccr->cr_dboff);
547 ctrl->dba = (struct xhci_doorbell_array *)((char *)hccr + val);
549 /* write the address of runtime register */
550 val = xhci_readl(&hccr->cr_rtsoff);
552 ctrl->run_regs = (struct xhci_run_regs *)((char *)hccr + val);
554 /* writting the address of ir_set structure */
555 ctrl->ir_set = &ctrl->run_regs->ir_set[0];
557 /* Event ring does not maintain link TRB */
558 ctrl->event_ring = xhci_ring_alloc(ERST_NUM_SEGS, false);
559 ctrl->erst.entries = (struct xhci_erst_entry *)
560 xhci_malloc(sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS);
562 ctrl->erst.num_entries = ERST_NUM_SEGS;
564 for (val = 0, seg = ctrl->event_ring->first_seg;
567 trb_64 = virt_to_phys(seg->trbs);
568 struct xhci_erst_entry *entry = &ctrl->erst.entries[val];
569 entry->seg_addr = cpu_to_le64(trb_64);
570 entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
574 xhci_flush_cache((uintptr_t)ctrl->erst.entries,
575 ERST_NUM_SEGS * sizeof(struct xhci_erst_entry));
577 deq = virt_to_phys(ctrl->event_ring->dequeue);
579 /* Update HC event ring dequeue pointer */
580 xhci_writeq(&ctrl->ir_set->erst_dequeue,
581 (u64)deq & (u64)~ERST_PTR_MASK);
583 /* set ERST count with the number of entries in the segment table */
584 val = xhci_readl(&ctrl->ir_set->erst_size);
585 val &= ERST_SIZE_MASK;
586 val |= ERST_NUM_SEGS;
587 xhci_writel(&ctrl->ir_set->erst_size, val);
589 /* this is the event ring segment table pointer */
590 val_64 = xhci_readq(&ctrl->ir_set->erst_base);
591 val_64 &= ERST_PTR_MASK;
592 val_64 |= virt_to_phys(ctrl->erst.entries) & ~ERST_PTR_MASK;
594 xhci_writeq(&ctrl->ir_set->erst_base, val_64);
596 /* set up the scratchpad buffer array and scratchpad buffers */
597 xhci_scratchpad_alloc(ctrl);
599 /* initializing the virtual devices to NULL */
600 for (i = 0; i < MAX_HC_SLOTS; ++i)
601 ctrl->devs[i] = NULL;
604 * Just Zero'ing this register completely,
605 * or some spurious Device Notification Events
606 * might screw things here.
608 xhci_writel(&hcor->or_dnctrl, 0x0);
614 * Give the input control context for the passed container context
616 * @param ctx pointer to the context
617 * @return pointer to the Input control context data
619 struct xhci_input_control_ctx
620 *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx)
622 BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
623 return (struct xhci_input_control_ctx *)ctx->bytes;
627 * Give the slot context for the passed container context
629 * @param ctrl Host controller data structure
630 * @param ctx pointer to the context
631 * @return pointer to the slot control context data
633 struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_ctrl *ctrl,
634 struct xhci_container_ctx *ctx)
636 if (ctx->type == XHCI_CTX_TYPE_DEVICE)
637 return (struct xhci_slot_ctx *)ctx->bytes;
639 return (struct xhci_slot_ctx *)
640 (ctx->bytes + CTX_SIZE(readl(&ctrl->hccr->cr_hccparams)));
644 * Gets the EP context from based on the ep_index
646 * @param ctrl Host controller data structure
647 * @param ctx context container
648 * @param ep_index index of the endpoint
649 * @return pointer to the End point context
651 struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_ctrl *ctrl,
652 struct xhci_container_ctx *ctx,
653 unsigned int ep_index)
655 /* increment ep index by offset of start of ep ctx array */
657 if (ctx->type == XHCI_CTX_TYPE_INPUT)
660 return (struct xhci_ep_ctx *)
662 (ep_index * CTX_SIZE(readl(&ctrl->hccr->cr_hccparams))));
666 * Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
667 * Useful when you want to change one particular aspect of the endpoint
668 * and then issue a configure endpoint command.
670 * @param ctrl Host controller data structure
671 * @param in_ctx contains the input context
672 * @param out_ctx contains the input context
673 * @param ep_index index of the end point
676 void xhci_endpoint_copy(struct xhci_ctrl *ctrl,
677 struct xhci_container_ctx *in_ctx,
678 struct xhci_container_ctx *out_ctx,
679 unsigned int ep_index)
681 struct xhci_ep_ctx *out_ep_ctx;
682 struct xhci_ep_ctx *in_ep_ctx;
684 out_ep_ctx = xhci_get_ep_ctx(ctrl, out_ctx, ep_index);
685 in_ep_ctx = xhci_get_ep_ctx(ctrl, in_ctx, ep_index);
687 in_ep_ctx->ep_info = out_ep_ctx->ep_info;
688 in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
689 in_ep_ctx->deq = out_ep_ctx->deq;
690 in_ep_ctx->tx_info = out_ep_ctx->tx_info;
694 * Copy output xhci_slot_ctx to the input xhci_slot_ctx.
695 * Useful when you want to change one particular aspect of the endpoint
696 * and then issue a configure endpoint command.
697 * Only the context entries field matters, but
698 * we'll copy the whole thing anyway.
700 * @param ctrl Host controller data structure
701 * @param in_ctx contains the inpout context
702 * @param out_ctx contains the inpout context
705 void xhci_slot_copy(struct xhci_ctrl *ctrl, struct xhci_container_ctx *in_ctx,
706 struct xhci_container_ctx *out_ctx)
708 struct xhci_slot_ctx *in_slot_ctx;
709 struct xhci_slot_ctx *out_slot_ctx;
711 in_slot_ctx = xhci_get_slot_ctx(ctrl, in_ctx);
712 out_slot_ctx = xhci_get_slot_ctx(ctrl, out_ctx);
714 in_slot_ctx->dev_info = out_slot_ctx->dev_info;
715 in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
716 in_slot_ctx->tt_info = out_slot_ctx->tt_info;
717 in_slot_ctx->dev_state = out_slot_ctx->dev_state;
721 * Setup an xHCI virtual device for a Set Address command
723 * @param udev pointer to the Device Data Structure
724 * @return returns negative value on failure else 0 on success
726 void xhci_setup_addressable_virt_dev(struct xhci_ctrl *ctrl,
727 struct usb_device *udev, int hop_portnr)
729 struct xhci_virt_device *virt_dev;
730 struct xhci_ep_ctx *ep0_ctx;
731 struct xhci_slot_ctx *slot_ctx;
734 int slot_id = udev->slot_id;
735 int speed = udev->speed;
737 #if CONFIG_IS_ENABLED(DM_USB)
738 struct usb_device *dev = udev;
739 struct usb_hub_device *hub;
742 virt_dev = ctrl->devs[slot_id];
746 /* Extract the EP0 and Slot Ctrl */
747 ep0_ctx = xhci_get_ep_ctx(ctrl, virt_dev->in_ctx, 0);
748 slot_ctx = xhci_get_slot_ctx(ctrl, virt_dev->in_ctx);
750 /* Only the control endpoint is valid - one endpoint context */
751 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
753 #if CONFIG_IS_ENABLED(DM_USB)
754 /* Calculate the route string for this device */
755 port_num = dev->portnr;
756 while (!usb_hub_is_root_hub(dev->dev)) {
757 hub = dev_get_uclass_priv(dev->dev);
759 * Each hub in the topology is expected to have no more than
760 * 15 ports in order for the route string of a device to be
761 * unique. SuperSpeed hubs are restricted to only having 15
762 * ports, but FS/LS/HS hubs are not. The xHCI specification
763 * says that if the port number the device is greater than 15,
764 * that portion of the route string shall be set to 15.
768 route |= port_num << (hub->hub_depth * 4);
769 dev = dev_get_parent_priv(dev->dev);
770 port_num = dev->portnr;
771 dev = dev_get_parent_priv(dev->dev->parent);
774 debug("route string %x\n", route);
776 slot_ctx->dev_info |= cpu_to_le32(route);
779 case USB_SPEED_SUPER:
780 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
783 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
786 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
789 slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
792 /* Speed was set earlier, this shouldn't happen. */
796 #if CONFIG_IS_ENABLED(DM_USB)
797 /* Set up TT fields to support FS/LS devices */
798 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) {
799 struct udevice *parent = udev->dev;
803 port_num = dev->portnr;
804 dev = dev_get_parent_priv(parent);
805 if (usb_hub_is_root_hub(dev->dev))
807 parent = dev->dev->parent;
808 } while (dev->speed != USB_SPEED_HIGH);
810 if (!usb_hub_is_root_hub(dev->dev)) {
811 hub = dev_get_uclass_priv(dev->dev);
813 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
814 slot_ctx->tt_info |= cpu_to_le32(TT_PORT(port_num));
815 slot_ctx->tt_info |= cpu_to_le32(TT_SLOT(dev->slot_id));
820 port_num = hop_portnr;
821 debug("port_num = %d\n", port_num);
823 slot_ctx->dev_info2 |=
824 cpu_to_le32(((port_num & ROOT_HUB_PORT_MASK) <<
825 ROOT_HUB_PORT_SHIFT));
827 /* Step 4 - ring already allocated */
829 ep0_ctx->ep_info2 = cpu_to_le32(CTRL_EP << EP_TYPE_SHIFT);
830 debug("SPEED = %d\n", speed);
833 case USB_SPEED_SUPER:
834 ep0_ctx->ep_info2 |= cpu_to_le32(((512 & MAX_PACKET_MASK) <<
836 debug("Setting Packet size = 512bytes\n");
839 /* USB core guesses at a 64-byte max packet first for FS devices */
841 ep0_ctx->ep_info2 |= cpu_to_le32(((64 & MAX_PACKET_MASK) <<
843 debug("Setting Packet size = 64bytes\n");
846 ep0_ctx->ep_info2 |= cpu_to_le32(((8 & MAX_PACKET_MASK) <<
848 debug("Setting Packet size = 8bytes\n");
855 /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
857 cpu_to_le32(((0 & MAX_BURST_MASK) << MAX_BURST_SHIFT) |
858 ((3 & ERROR_COUNT_MASK) << ERROR_COUNT_SHIFT));
860 trb_64 = virt_to_phys(virt_dev->eps[0].ring->first_seg->trbs);
861 ep0_ctx->deq = cpu_to_le64(trb_64 | virt_dev->eps[0].ring->cycle_state);
865 * software shall set 'Average TRB Length' to 8 for control endpoints.
867 ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(8));
869 /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
871 xhci_flush_cache((uintptr_t)ep0_ctx, sizeof(struct xhci_ep_ctx));
872 xhci_flush_cache((uintptr_t)slot_ctx, sizeof(struct xhci_slot_ctx));