drv->check_bandwidth = over->check_bandwidth;
if (over->reset_bandwidth)
drv->reset_bandwidth = over->reset_bandwidth;
+ if (over->update_hub_device)
+ drv->update_hub_device = over->update_hub_device;
+ if (over->hub_control)
+ drv->hub_control = over->hub_control;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
struct usb_host_endpoint *ep);
int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+ int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
+ struct usb_tt *tt, gfp_t mem_flags);
+ int (*hub_control)(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
};
#define XHCI_CFC_DELAY 10
ucsi_unregister_altmodes(&ucsi->connector[i],
UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(&ucsi->connector[i]);
- if (ucsi->connector[i].wq)
+
+ if (ucsi->connector[i].wq) {
+ struct ucsi_work *uwork;
+
+ mutex_lock(&ucsi->connector[i].lock);
+ /*
+ * queue delayed items immediately so they can execute
+ * and free themselves before the wq is destroyed
+ */
+ list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
+ mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
+ mutex_unlock(&ucsi->connector[i].lock);
destroy_workqueue(ucsi->connector[i].wq);
+ }
++
+ usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
+ ucsi->connector[i].port_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
+ ucsi->connector[i].port_source_caps = NULL;
+ usb_power_delivery_unregister(ucsi->connector[i].pd);
+ ucsi->connector[i].pd = NULL;
typec_unregister_port(ucsi->connector[i].port);
}