From: Mathias Nyman Date: Fri, 29 Jan 2021 13:00:18 +0000 (+0200) Subject: xhci: Avoid parsing transfer events several times X-Git-Tag: v5.15~1778^2~85 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ab58f3bb6aaaf98ba81d5c627ac25c08ff4ed4f1;p=platform%2Fkernel%2Flinux-starfive.git xhci: Avoid parsing transfer events several times When handling transfer events the event is passed along the handling callpath and parsed again in several occasions. The event contains slot_id and endpoint index, from which the driver endpoint structure can be found. There wasn't however a way to get the endpoint index or parent usb device from this endpoint structure. A lot of extra event parsing, and thus some DMA doublefetch cases, and excess variables and code can be avoided by adding endpoint index and parent usb virt device pointer to the endpoint structure. Signed-off-by: Mathias Nyman Link: https://lore.kernel.org/r/20210129130044.206855-2-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 3589b49..d6e2ee1 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1012,6 +1012,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, /* Initialize the cancellation list and watchdog timers for each ep */ for (i = 0; i < 31; i++) { + dev->eps[i].ep_index = i; + dev->eps[i].vdev = dev; xhci_init_endpoint_timer(xhci, &dev->eps[i]); INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index cf0c93a9..cac79a3e 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1861,7 +1861,7 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, * Avoid resetting endpoint if link is inactive. Can cause host hang. * Device will be reset soon to recover the link so don't do anything */ - if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) + if (ep->vdev->flags & VDEV_PORT_ERROR) return; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); @@ -1970,18 +1970,14 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { - struct xhci_virt_device *xdev; struct xhci_ep_ctx *ep_ctx; struct xhci_ring *ep_ring; unsigned int slot_id; u32 trb_comp_code; - int ep_index; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID || @@ -2006,9 +2002,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, * stall later. Hub TT buffer should only be cleared for FS/LS * devices behind HS hubs for functional stalls. */ - if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) + if ((ep->ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR)) xhci_clear_hub_tt_buffer(xhci, td, ep); - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, + xhci_cleanup_halted_endpoint(xhci, slot_id, ep->ep_index, ep_ring->stream_id, td, EP_HARD_RESET); } else { /* Update ring dequeue pointer */ @@ -2042,19 +2038,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { - struct xhci_virt_device *xdev; - unsigned int slot_id; - int ep_index; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; u32 remaining, requested; u32 trb_type; trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); - slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - xdev = xhci->devs[slot_id]; - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; - ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); + ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); requested = td->urb->transfer_buffer_length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2102,7 +2092,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, ep_ctx, trb_comp_code)) break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", - trb_comp_code, ep_index); + trb_comp_code, ep->ep_index); fallthrough; case COMP_STALL_ERROR: /* Did we transfer part of the data (middle) phase? */ @@ -2264,11 +2254,9 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, u32 trb_comp_code; u32 remaining, requested, ep_trb_len; unsigned int slot_id; - int ep_index; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); - slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx); - ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; + slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer)); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); @@ -2306,7 +2294,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) break; *status = 0; - xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, + xhci_cleanup_halted_endpoint(xhci, slot_id, ep->ep_index, ep_ring->stream_id, td, EP_SOFT_RESET); return 0; default: diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 25e57bc..c76381f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -918,6 +918,8 @@ struct xhci_bw_info { #define SS_BW_RESERVED 10 struct xhci_virt_ep { + struct xhci_virt_device *vdev; /* parent */ + unsigned int ep_index; struct xhci_ring *ring; /* Related to endpoints that are configured to use stream IDs only */ struct xhci_stream_info *stream_info;