1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - switch/port utility functions
5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6 * Copyright (C) 2018, Intel Corporation
9 #include <linux/delay.h>
10 #include <linux/idr.h>
11 #include <linux/nvmem-provider.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/sched/signal.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
20 /* Switch NVM support */
24 struct nvm_auth_status {
25 struct list_head list;
30 static bool clx_enabled = true;
31 module_param_named(clx, clx_enabled, bool, 0444);
32 MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
35 * Hold NVM authentication failure status per switch This information
36 * needs to stay around even when the switch gets power cycled so we
39 static LIST_HEAD(nvm_auth_status_cache);
40 static DEFINE_MUTEX(nvm_auth_status_lock);
42 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
44 struct nvm_auth_status *st;
46 list_for_each_entry(st, &nvm_auth_status_cache, list) {
47 if (uuid_equal(&st->uuid, sw->uuid))
54 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
56 struct nvm_auth_status *st;
58 mutex_lock(&nvm_auth_status_lock);
59 st = __nvm_get_auth_status(sw);
60 mutex_unlock(&nvm_auth_status_lock);
62 *status = st ? st->status : 0;
65 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
67 struct nvm_auth_status *st;
69 if (WARN_ON(!sw->uuid))
72 mutex_lock(&nvm_auth_status_lock);
73 st = __nvm_get_auth_status(sw);
76 st = kzalloc(sizeof(*st), GFP_KERNEL);
80 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
81 INIT_LIST_HEAD(&st->list);
82 list_add_tail(&st->list, &nvm_auth_status_cache);
87 mutex_unlock(&nvm_auth_status_lock);
90 static void nvm_clear_auth_status(const struct tb_switch *sw)
92 struct nvm_auth_status *st;
94 mutex_lock(&nvm_auth_status_lock);
95 st = __nvm_get_auth_status(sw);
100 mutex_unlock(&nvm_auth_status_lock);
103 static int nvm_validate_and_write(struct tb_switch *sw)
105 unsigned int image_size, hdr_size;
106 const u8 *buf = sw->nvm->buf;
113 image_size = sw->nvm->buf_data_size;
114 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
118 * FARB pointer must point inside the image and must at least
119 * contain parts of the digital section we will be reading here.
121 hdr_size = (*(u32 *)buf) & 0xffffff;
122 if (hdr_size + NVM_DEVID + 2 >= image_size)
125 /* Digital section start should be aligned to 4k page */
126 if (!IS_ALIGNED(hdr_size, SZ_4K))
130 * Read digital section size and check that it also fits inside
133 ds_size = *(u16 *)(buf + hdr_size);
134 if (ds_size >= image_size)
137 if (!sw->safe_mode) {
141 * Make sure the device ID in the image matches the one
142 * we read from the switch config space.
144 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
145 if (device_id != sw->config.device_id)
148 if (sw->generation < 3) {
149 /* Write CSS headers first */
150 ret = dma_port_flash_write(sw->dma_port,
151 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
152 DMA_PORT_CSS_MAX_SIZE);
157 /* Skip headers in the image */
159 image_size -= hdr_size;
162 if (tb_switch_is_usb4(sw))
163 ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
165 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
167 sw->nvm->flushed = true;
171 static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
176 * Root switch NVM upgrade requires that we disconnect the
177 * existing paths first (in case it is not in safe mode
180 if (!sw->safe_mode) {
183 ret = tb_domain_disconnect_all_paths(sw->tb);
187 * The host controller goes away pretty soon after this if
188 * everything goes well so getting timeout is expected.
190 ret = dma_port_flash_update_auth(sw->dma_port);
191 if (!ret || ret == -ETIMEDOUT)
195 * Any error from update auth operation requires power
196 * cycling of the host router.
198 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
199 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
200 nvm_set_auth_status(sw, status);
204 * From safe mode we can get out by just power cycling the
207 dma_port_power_cycle(sw->dma_port);
211 static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
213 int ret, retries = 10;
215 ret = dma_port_flash_update_auth(sw->dma_port);
221 /* Power cycle is required */
228 * Poll here for the authentication status. It takes some time
229 * for the device to respond (we get timeout for a while). Once
230 * we get response the device needs to be power cycled in order
231 * to the new NVM to be taken into use.
236 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
237 if (ret < 0 && ret != -ETIMEDOUT)
241 tb_sw_warn(sw, "failed to authenticate NVM\n");
242 nvm_set_auth_status(sw, status);
245 tb_sw_info(sw, "power cycling the switch now\n");
246 dma_port_power_cycle(sw->dma_port);
256 static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
258 struct pci_dev *root_port;
261 * During host router NVM upgrade we should not allow root port to
262 * go into D3cold because some root ports cannot trigger PME
263 * itself. To be on the safe side keep the root port in D0 during
264 * the whole upgrade process.
266 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
268 pm_runtime_get_noresume(&root_port->dev);
271 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
273 struct pci_dev *root_port;
275 root_port = pcie_find_root_port(sw->tb->nhi->pdev);
277 pm_runtime_put(&root_port->dev);
280 static inline bool nvm_readable(struct tb_switch *sw)
282 if (tb_switch_is_usb4(sw)) {
284 * USB4 devices must support NVM operations but it is
285 * optional for hosts. Therefore we query the NVM sector
286 * size here and if it is supported assume NVM
287 * operations are implemented.
289 return usb4_switch_nvm_sector_size(sw) > 0;
292 /* Thunderbolt 2 and 3 devices support NVM through DMA port */
293 return !!sw->dma_port;
296 static inline bool nvm_upgradeable(struct tb_switch *sw)
298 if (sw->no_nvm_upgrade)
300 return nvm_readable(sw);
303 static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
307 if (tb_switch_is_usb4(sw)) {
309 ret = usb4_switch_nvm_set_offset(sw, 0);
313 sw->nvm->authenticating = true;
314 return usb4_switch_nvm_authenticate(sw);
315 } else if (auth_only) {
319 sw->nvm->authenticating = true;
321 nvm_authenticate_start_dma_port(sw);
322 ret = nvm_authenticate_host_dma_port(sw);
324 ret = nvm_authenticate_device_dma_port(sw);
331 * tb_switch_nvm_read() - Read router NVM
332 * @sw: Router whose NVM to read
333 * @address: Start address on the NVM
334 * @buf: Buffer where the read data is copied
335 * @size: Size of the buffer in bytes
337 * Reads from router NVM and returns the requested data in @buf. Locking
338 * is up to the caller. Returns %0 in success and negative errno in case
341 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
344 if (tb_switch_is_usb4(sw))
345 return usb4_switch_nvm_read(sw, address, buf, size);
346 return dma_port_flash_read(sw->dma_port, address, buf, size);
349 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
351 struct tb_nvm *nvm = priv;
352 struct tb_switch *sw = tb_to_switch(nvm->dev);
355 pm_runtime_get_sync(&sw->dev);
357 if (!mutex_trylock(&sw->tb->lock)) {
358 ret = restart_syscall();
362 ret = tb_switch_nvm_read(sw, offset, val, bytes);
363 mutex_unlock(&sw->tb->lock);
366 pm_runtime_mark_last_busy(&sw->dev);
367 pm_runtime_put_autosuspend(&sw->dev);
372 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
374 struct tb_nvm *nvm = priv;
375 struct tb_switch *sw = tb_to_switch(nvm->dev);
378 if (!mutex_trylock(&sw->tb->lock))
379 return restart_syscall();
382 * Since writing the NVM image might require some special steps,
383 * for example when CSS headers are written, we cache the image
384 * locally here and handle the special cases when the user asks
385 * us to authenticate the image.
387 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
388 mutex_unlock(&sw->tb->lock);
393 static int tb_switch_nvm_add(struct tb_switch *sw)
399 if (!nvm_readable(sw))
403 * The NVM format of non-Intel hardware is not known so
404 * currently restrict NVM upgrade for Intel hardware. We may
405 * relax this in the future when we learn other NVM formats.
407 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
408 sw->config.vendor_id != 0x8087) {
410 "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
411 sw->config.vendor_id);
415 nvm = tb_nvm_alloc(&sw->dev);
420 * If the switch is in safe-mode the only accessible portion of
421 * the NVM is the non-active one where userspace is expected to
422 * write new functional NVM.
424 if (!sw->safe_mode) {
425 u32 nvm_size, hdr_size;
427 ret = tb_switch_nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
431 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
432 nvm_size = (SZ_1M << (val & 7)) / 8;
433 nvm_size = (nvm_size - hdr_size) / 2;
435 ret = tb_switch_nvm_read(sw, NVM_VERSION, &val, sizeof(val));
439 nvm->major = (val >> 16) & 0xff;
440 nvm->minor = (val >> 8) & 0xff;
442 ret = tb_nvm_add_active(nvm, nvm_size, nvm_read);
447 if (!sw->no_nvm_upgrade) {
448 ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, nvm_write);
461 static void tb_switch_nvm_remove(struct tb_switch *sw)
471 /* Remove authentication status in case the switch is unplugged */
472 if (!nvm->authenticating)
473 nvm_clear_auth_status(sw);
478 /* port utility functions */
480 static const char *tb_port_type(const struct tb_regs_port_header *port)
482 switch (port->type >> 16) {
484 switch ((u8) port->type) {
509 static void tb_dump_port(struct tb *tb, const struct tb_port *port)
511 const struct tb_regs_port_header *regs = &port->config;
514 " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
515 regs->port_number, regs->vendor_id, regs->device_id,
516 regs->revision, regs->thunderbolt_version, tb_port_type(regs),
518 tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
519 regs->max_in_hop_id, regs->max_out_hop_id);
520 tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
521 tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
522 tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
527 * tb_port_state() - get connectedness state of a port
528 * @port: the port to check
530 * The port must have a TB_CAP_PHY (i.e. it should be a real port).
532 * Return: Returns an enum tb_port_state on success or an error code on failure.
534 int tb_port_state(struct tb_port *port)
536 struct tb_cap_phy phy;
538 if (port->cap_phy == 0) {
539 tb_port_WARN(port, "does not have a PHY\n");
542 res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
549 * tb_wait_for_port() - wait for a port to become ready
550 * @port: Port to wait
551 * @wait_if_unplugged: Wait also when port is unplugged
553 * Wait up to 1 second for a port to reach state TB_PORT_UP. If
554 * wait_if_unplugged is set then we also wait if the port is in state
555 * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
556 * switch resume). Otherwise we only wait if a device is registered but the link
557 * has not yet been established.
559 * Return: Returns an error code on failure. Returns 0 if the port is not
560 * connected or failed to reach state TB_PORT_UP within one second. Returns 1
561 * if the port is connected and in state TB_PORT_UP.
563 int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
567 if (!port->cap_phy) {
568 tb_port_WARN(port, "does not have PHY\n");
571 if (tb_is_upstream_port(port)) {
572 tb_port_WARN(port, "is the upstream port\n");
577 state = tb_port_state(port);
580 if (state == TB_PORT_DISABLED) {
581 tb_port_dbg(port, "is disabled (state: 0)\n");
584 if (state == TB_PORT_UNPLUGGED) {
585 if (wait_if_unplugged) {
586 /* used during resume */
588 "is unplugged (state: 7), retrying...\n");
592 tb_port_dbg(port, "is unplugged (state: 7)\n");
595 if (state == TB_PORT_UP) {
596 tb_port_dbg(port, "is connected, link is up (state: 2)\n");
601 * After plug-in the state is TB_PORT_CONNECTING. Give it some
605 "is connected, link is not up (state: %d), retrying...\n",
610 "failed to reach state TB_PORT_UP. Ignoring port...\n");
615 * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
616 * @port: Port to add/remove NFC credits
617 * @credits: Credits to add/remove
619 * Change the number of NFC credits allocated to @port by @credits. To remove
620 * NFC credits pass a negative amount of credits.
622 * Return: Returns 0 on success or an error code on failure.
624 int tb_port_add_nfc_credits(struct tb_port *port, int credits)
628 if (credits == 0 || port->sw->is_unplugged)
632 * USB4 restricts programming NFC buffers to lane adapters only
633 * so skip other ports.
635 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
638 nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
640 credits = max_t(int, -nfc_credits, credits);
642 nfc_credits += credits;
644 tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
645 port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
647 port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
648 port->config.nfc_credits |= nfc_credits;
650 return tb_port_write(port, &port->config.nfc_credits,
651 TB_CFG_PORT, ADP_CS_4, 1);
655 * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
656 * @port: Port whose counters to clear
657 * @counter: Counter index to clear
659 * Return: Returns 0 on success or an error code on failure.
661 int tb_port_clear_counter(struct tb_port *port, int counter)
663 u32 zero[3] = { 0, 0, 0 };
664 tb_port_dbg(port, "clearing counter %d\n", counter);
665 return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
669 * tb_port_unlock() - Unlock downstream port
670 * @port: Port to unlock
672 * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
673 * downstream router accessible for CM.
675 int tb_port_unlock(struct tb_port *port)
677 if (tb_switch_is_icm(port->sw))
679 if (!tb_port_is_null(port))
681 if (tb_switch_is_usb4(port->sw))
682 return usb4_port_unlock(port);
686 static int __tb_port_enable(struct tb_port *port, bool enable)
691 if (!tb_port_is_null(port))
694 ret = tb_port_read(port, &phy, TB_CFG_PORT,
695 port->cap_phy + LANE_ADP_CS_1, 1);
700 phy &= ~LANE_ADP_CS_1_LD;
702 phy |= LANE_ADP_CS_1_LD;
705 ret = tb_port_write(port, &phy, TB_CFG_PORT,
706 port->cap_phy + LANE_ADP_CS_1, 1);
710 tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis");
715 * tb_port_enable() - Enable lane adapter
716 * @port: Port to enable (can be %NULL)
718 * This is used for lane 0 and 1 adapters to enable it.
720 int tb_port_enable(struct tb_port *port)
722 return __tb_port_enable(port, true);
726 * tb_port_disable() - Disable lane adapter
727 * @port: Port to disable (can be %NULL)
729 * This is used for lane 0 and 1 adapters to disable it.
731 int tb_port_disable(struct tb_port *port)
733 return __tb_port_enable(port, false);
737 * tb_init_port() - initialize a port
739 * This is a helper method for tb_switch_alloc. Does not check or initialize
740 * any downstream switches.
742 * Return: Returns 0 on success or an error code on failure.
744 static int tb_init_port(struct tb_port *port)
749 INIT_LIST_HEAD(&port->list);
751 /* Control adapter does not have configuration space */
755 res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
757 if (res == -ENODEV) {
758 tb_dbg(port->sw->tb, " Port %d: not implemented\n",
760 port->disabled = true;
766 /* Port 0 is the switch itself and has no PHY. */
767 if (port->config.type == TB_TYPE_PORT) {
768 cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
773 tb_port_WARN(port, "non switch port without a PHY\n");
775 cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
777 port->cap_usb4 = cap;
780 * USB4 ports the buffers allocated for the control path
781 * can be read from the path config space. Legacy
782 * devices we use hard-coded value.
784 if (tb_switch_is_usb4(port->sw)) {
785 struct tb_regs_hop hop;
787 if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
788 port->ctl_credits = hop.initial_credits;
790 if (!port->ctl_credits)
791 port->ctl_credits = 2;
794 cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
796 port->cap_adap = cap;
799 port->total_credits =
800 (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
801 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
803 tb_dump_port(port->sw->tb, port);
807 static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
814 port_max_hopid = port->config.max_in_hop_id;
815 ida = &port->in_hopids;
817 port_max_hopid = port->config.max_out_hop_id;
818 ida = &port->out_hopids;
822 * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
825 if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
826 min_hopid = TB_PATH_MIN_HOPID;
828 if (max_hopid < 0 || max_hopid > port_max_hopid)
829 max_hopid = port_max_hopid;
831 return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
835 * tb_port_alloc_in_hopid() - Allocate input HopID from port
836 * @port: Port to allocate HopID for
837 * @min_hopid: Minimum acceptable input HopID
838 * @max_hopid: Maximum acceptable input HopID
840 * Return: HopID between @min_hopid and @max_hopid or negative errno in
843 int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
845 return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
849 * tb_port_alloc_out_hopid() - Allocate output HopID from port
850 * @port: Port to allocate HopID for
851 * @min_hopid: Minimum acceptable output HopID
852 * @max_hopid: Maximum acceptable output HopID
854 * Return: HopID between @min_hopid and @max_hopid or negative errno in
857 int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
859 return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
863 * tb_port_release_in_hopid() - Release allocated input HopID from port
864 * @port: Port whose HopID to release
865 * @hopid: HopID to release
867 void tb_port_release_in_hopid(struct tb_port *port, int hopid)
869 ida_simple_remove(&port->in_hopids, hopid);
873 * tb_port_release_out_hopid() - Release allocated output HopID from port
874 * @port: Port whose HopID to release
875 * @hopid: HopID to release
877 void tb_port_release_out_hopid(struct tb_port *port, int hopid)
879 ida_simple_remove(&port->out_hopids, hopid);
882 static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
883 const struct tb_switch *sw)
885 u64 mask = (1ULL << parent->config.depth * 8) - 1;
886 return (tb_route(parent) & mask) == (tb_route(sw) & mask);
890 * tb_next_port_on_path() - Return next port for given port on a path
891 * @start: Start port of the walk
892 * @end: End port of the walk
893 * @prev: Previous port (%NULL if this is the first)
895 * This function can be used to walk from one port to another if they
896 * are connected through zero or more switches. If the @prev is dual
897 * link port, the function follows that link and returns another end on
900 * If the @end port has been reached, return %NULL.
902 * Domain tb->lock must be held when this function is called.
904 struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
905 struct tb_port *prev)
907 struct tb_port *next;
912 if (prev->sw == end->sw) {
918 if (tb_switch_is_reachable(prev->sw, end->sw)) {
919 next = tb_port_at(tb_route(end->sw), prev->sw);
920 /* Walk down the topology if next == prev */
922 (next == prev || next->dual_link_port == prev))
925 if (tb_is_upstream_port(prev)) {
928 next = tb_upstream_port(prev->sw);
930 * Keep the same link if prev and next are both
933 if (next->dual_link_port &&
934 next->link_nr != prev->link_nr) {
935 next = next->dual_link_port;
940 return next != prev ? next : NULL;
944 * tb_port_get_link_speed() - Get current link speed
945 * @port: Port to check (USB4 or CIO)
947 * Returns link speed in Gb/s or negative errno in case of failure.
949 int tb_port_get_link_speed(struct tb_port *port)
957 ret = tb_port_read(port, &val, TB_CFG_PORT,
958 port->cap_phy + LANE_ADP_CS_1, 1);
962 speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
963 LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
964 return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
968 * tb_port_get_link_width() - Get current link width
969 * @port: Port to check (USB4 or CIO)
971 * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane)
972 * or negative errno in case of failure.
974 int tb_port_get_link_width(struct tb_port *port)
982 ret = tb_port_read(port, &val, TB_CFG_PORT,
983 port->cap_phy + LANE_ADP_CS_1, 1);
987 return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
988 LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
991 static bool tb_port_is_width_supported(struct tb_port *port, int width)
999 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1000 port->cap_phy + LANE_ADP_CS_0, 1);
1004 widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
1005 LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
1007 return !!(widths & width);
1011 * tb_port_set_link_width() - Set target link width of the lane adapter
1012 * @port: Lane adapter
1013 * @width: Target link width (%1 or %2)
1015 * Sets the target link width of the lane adapter to @width. Does not
1016 * enable/disable lane bonding. For that call tb_port_set_lane_bonding().
1018 * Return: %0 in case of success and negative errno in case of error
1020 int tb_port_set_link_width(struct tb_port *port, unsigned int width)
1028 ret = tb_port_read(port, &val, TB_CFG_PORT,
1029 port->cap_phy + LANE_ADP_CS_1, 1);
1033 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
1036 val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
1037 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1040 val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
1041 LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
1047 return tb_port_write(port, &val, TB_CFG_PORT,
1048 port->cap_phy + LANE_ADP_CS_1, 1);
1052 * tb_port_set_lane_bonding() - Enable/disable lane bonding
1053 * @port: Lane adapter
1054 * @bonding: enable/disable bonding
1056 * Enables or disables lane bonding. This should be called after target
1057 * link width has been set (tb_port_set_link_width()). Note in most
1058 * cases one should use tb_port_lane_bonding_enable() instead to enable
1061 * As a side effect sets @port->bonding accordingly (and does the same
1064 * Return: %0 in case of success and negative errno in case of error
1066 int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
1074 ret = tb_port_read(port, &val, TB_CFG_PORT,
1075 port->cap_phy + LANE_ADP_CS_1, 1);
1080 val |= LANE_ADP_CS_1_LB;
1082 val &= ~LANE_ADP_CS_1_LB;
1084 ret = tb_port_write(port, &val, TB_CFG_PORT,
1085 port->cap_phy + LANE_ADP_CS_1, 1);
1090 * When lane 0 bonding is set it will affect lane 1 too so
1093 port->bonded = bonding;
1094 port->dual_link_port->bonded = bonding;
1100 * tb_port_lane_bonding_enable() - Enable bonding on port
1101 * @port: port to enable
1103 * Enable bonding by setting the link width of the port and the other
1104 * port in case of dual link port. Does not wait for the link to
1105 * actually reach the bonded state so caller needs to call
1106 * tb_port_wait_for_link_width() before enabling any paths through the
1107 * link to make sure the link is in expected state.
1109 * Return: %0 in case of success and negative errno in case of error
1111 int tb_port_lane_bonding_enable(struct tb_port *port)
1116 * Enable lane bonding for both links if not already enabled by
1117 * for example the boot firmware.
1119 ret = tb_port_get_link_width(port);
1121 ret = tb_port_set_link_width(port, 2);
1126 ret = tb_port_get_link_width(port->dual_link_port);
1128 ret = tb_port_set_link_width(port->dual_link_port, 2);
1133 ret = tb_port_set_lane_bonding(port, true);
1140 tb_port_set_link_width(port->dual_link_port, 1);
1142 tb_port_set_link_width(port, 1);
1147 * tb_port_lane_bonding_disable() - Disable bonding on port
1148 * @port: port to disable
1150 * Disable bonding by setting the link width of the port and the
1151 * other port in case of dual link port.
1153 void tb_port_lane_bonding_disable(struct tb_port *port)
1155 tb_port_set_lane_bonding(port, false);
1156 tb_port_set_link_width(port->dual_link_port, 1);
1157 tb_port_set_link_width(port, 1);
1161 * tb_port_wait_for_link_width() - Wait until link reaches specific width
1162 * @port: Port to wait for
1163 * @width: Expected link width (%1 or %2)
1164 * @timeout_msec: Timeout in ms how long to wait
1166 * Should be used after both ends of the link have been bonded (or
1167 * bonding has been disabled) to wait until the link actually reaches
1168 * the expected state. Returns %-ETIMEDOUT if the @width was not reached
1169 * within the given timeout, %0 if it did.
1171 int tb_port_wait_for_link_width(struct tb_port *port, int width,
1174 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1178 ret = tb_port_get_link_width(port);
1181 * Sometimes we get port locked error when
1182 * polling the lanes so we can ignore it and
1187 } else if (ret == width) {
1191 usleep_range(1000, 2000);
1192 } while (ktime_before(ktime_get(), timeout));
1197 static int tb_port_do_update_credits(struct tb_port *port)
1202 ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
1206 if (nfc_credits != port->config.nfc_credits) {
1209 total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
1210 ADP_CS_4_TOTAL_BUFFERS_SHIFT;
1212 tb_port_dbg(port, "total credits changed %u -> %u\n",
1213 port->total_credits, total);
1215 port->config.nfc_credits = nfc_credits;
1216 port->total_credits = total;
1223 * tb_port_update_credits() - Re-read port total credits
1224 * @port: Port to update
1226 * After the link is bonded (or bonding was disabled) the port total
1227 * credits may change, so this function needs to be called to re-read
1228 * the credits. Updates also the second lane adapter.
1230 int tb_port_update_credits(struct tb_port *port)
1234 ret = tb_port_do_update_credits(port);
1237 return tb_port_do_update_credits(port->dual_link_port);
1240 static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
1245 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1246 port->cap_phy + LANE_ADP_CS_1, 1);
1251 phy |= LANE_ADP_CS_1_PMS;
1253 phy &= ~LANE_ADP_CS_1_PMS;
1255 return tb_port_write(port, &phy, TB_CFG_PORT,
1256 port->cap_phy + LANE_ADP_CS_1, 1);
1259 static int tb_port_pm_secondary_enable(struct tb_port *port)
1261 return __tb_port_pm_secondary_set(port, true);
1264 static int tb_port_pm_secondary_disable(struct tb_port *port)
1266 return __tb_port_pm_secondary_set(port, false);
1269 /* Called for USB4 or Titan Ridge routers only */
1270 static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask)
1275 /* Don't enable CLx in case of two single-lane links */
1276 if (!port->bonded && port->dual_link_port)
1279 /* Don't enable CLx in case of inter-domain link */
1283 if (tb_switch_is_usb4(port->sw)) {
1284 if (!usb4_port_clx_supported(port))
1286 } else if (!tb_lc_is_clx_supported(port)) {
1290 if (clx_mask & TB_CL1) {
1291 /* CL0s and CL1 are enabled and supported together */
1292 mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
1294 if (clx_mask & TB_CL2)
1295 mask |= LANE_ADP_CS_0_CL2_SUPPORT;
1297 ret = tb_port_read(port, &val, TB_CFG_PORT,
1298 port->cap_phy + LANE_ADP_CS_0, 1);
1302 return !!(val & mask);
1305 static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
1310 /* CL0s and CL1 are enabled and supported together */
1312 mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
1314 /* For now we support only CL0s and CL1. Not CL2 */
1317 ret = tb_port_read(port, &phy, TB_CFG_PORT,
1318 port->cap_phy + LANE_ADP_CS_1, 1);
1327 return tb_port_write(port, &phy, TB_CFG_PORT,
1328 port->cap_phy + LANE_ADP_CS_1, 1);
1331 static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
1333 return __tb_port_clx_set(port, clx, false);
1336 static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
1338 return __tb_port_clx_set(port, clx, true);
1342 * tb_port_is_clx_enabled() - Is given CL state enabled
1343 * @port: USB4 port to check
1344 * @clx_mask: Mask of CL states to check
1346 * Returns true if any of the given CL states is enabled for @port.
1348 bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask)
1353 if (!tb_port_clx_supported(port, clx_mask))
1356 if (clx_mask & TB_CL1)
1357 mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
1358 if (clx_mask & TB_CL2)
1359 mask |= LANE_ADP_CS_1_CL2_ENABLE;
1361 ret = tb_port_read(port, &val, TB_CFG_PORT,
1362 port->cap_phy + LANE_ADP_CS_1, 1);
1366 return !!(val & mask);
1369 static int tb_port_start_lane_initialization(struct tb_port *port)
1373 if (tb_switch_is_usb4(port->sw))
1376 ret = tb_lc_start_lane_initialization(port);
1377 return ret == -EINVAL ? 0 : ret;
1381 * Returns true if the port had something (router, XDomain) connected
1384 static bool tb_port_resume(struct tb_port *port)
1386 bool has_remote = tb_port_has_remote(port);
1389 usb4_port_device_resume(port->usb4);
1390 } else if (!has_remote) {
1392 * For disconnected downstream lane adapters start lane
1393 * initialization now so we detect future connects.
1395 * For XDomain start the lane initialzation now so the
1396 * link gets re-established.
1398 * This is only needed for non-USB4 ports.
1400 if (!tb_is_upstream_port(port) || port->xdomain)
1401 tb_port_start_lane_initialization(port);
1404 return has_remote || port->xdomain;
1408 * tb_port_is_enabled() - Is the adapter port enabled
1409 * @port: Port to check
1411 bool tb_port_is_enabled(struct tb_port *port)
1413 switch (port->config.type) {
1414 case TB_TYPE_PCIE_UP:
1415 case TB_TYPE_PCIE_DOWN:
1416 return tb_pci_port_is_enabled(port);
1418 case TB_TYPE_DP_HDMI_IN:
1419 case TB_TYPE_DP_HDMI_OUT:
1420 return tb_dp_port_is_enabled(port);
1422 case TB_TYPE_USB3_UP:
1423 case TB_TYPE_USB3_DOWN:
1424 return tb_usb3_port_is_enabled(port);
1432 * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
1433 * @port: USB3 adapter port to check
1435 bool tb_usb3_port_is_enabled(struct tb_port *port)
1439 if (tb_port_read(port, &data, TB_CFG_PORT,
1440 port->cap_adap + ADP_USB3_CS_0, 1))
1443 return !!(data & ADP_USB3_CS_0_PE);
1447 * tb_usb3_port_enable() - Enable USB3 adapter port
1448 * @port: USB3 adapter port to enable
1449 * @enable: Enable/disable the USB3 adapter
1451 int tb_usb3_port_enable(struct tb_port *port, bool enable)
1453 u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
1456 if (!port->cap_adap)
1458 return tb_port_write(port, &word, TB_CFG_PORT,
1459 port->cap_adap + ADP_USB3_CS_0, 1);
1463 * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
1464 * @port: PCIe port to check
1466 bool tb_pci_port_is_enabled(struct tb_port *port)
1470 if (tb_port_read(port, &data, TB_CFG_PORT,
1471 port->cap_adap + ADP_PCIE_CS_0, 1))
1474 return !!(data & ADP_PCIE_CS_0_PE);
1478 * tb_pci_port_enable() - Enable PCIe adapter port
1479 * @port: PCIe port to enable
1480 * @enable: Enable/disable the PCIe adapter
1482 int tb_pci_port_enable(struct tb_port *port, bool enable)
1484 u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
1485 if (!port->cap_adap)
1487 return tb_port_write(port, &word, TB_CFG_PORT,
1488 port->cap_adap + ADP_PCIE_CS_0, 1);
1492 * tb_dp_port_hpd_is_active() - Is HPD already active
1493 * @port: DP out port to check
1495 * Checks if the DP OUT adapter port has HDP bit already set.
1497 int tb_dp_port_hpd_is_active(struct tb_port *port)
1502 ret = tb_port_read(port, &data, TB_CFG_PORT,
1503 port->cap_adap + ADP_DP_CS_2, 1);
1507 return !!(data & ADP_DP_CS_2_HDP);
1511 * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
1512 * @port: Port to clear HPD
1514 * If the DP IN port has HDP set, this function can be used to clear it.
1516 int tb_dp_port_hpd_clear(struct tb_port *port)
1521 ret = tb_port_read(port, &data, TB_CFG_PORT,
1522 port->cap_adap + ADP_DP_CS_3, 1);
1526 data |= ADP_DP_CS_3_HDPC;
1527 return tb_port_write(port, &data, TB_CFG_PORT,
1528 port->cap_adap + ADP_DP_CS_3, 1);
1532 * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
1533 * @port: DP IN/OUT port to set hops
1534 * @video: Video Hop ID
1535 * @aux_tx: AUX TX Hop ID
1536 * @aux_rx: AUX RX Hop ID
1538 * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
1539 * router DP adapters too but does not program the values as the fields
1542 int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
1543 unsigned int aux_tx, unsigned int aux_rx)
1548 if (tb_switch_is_usb4(port->sw))
1551 ret = tb_port_read(port, data, TB_CFG_PORT,
1552 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1556 data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
1557 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1558 data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1560 data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
1561 ADP_DP_CS_0_VIDEO_HOPID_MASK;
1562 data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
1563 data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
1564 ADP_DP_CS_1_AUX_RX_HOPID_MASK;
1566 return tb_port_write(port, data, TB_CFG_PORT,
1567 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1571 * tb_dp_port_is_enabled() - Is DP adapter port enabled
1572 * @port: DP adapter port to check
1574 bool tb_dp_port_is_enabled(struct tb_port *port)
1578 if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
1582 return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
1586 * tb_dp_port_enable() - Enables/disables DP paths of a port
1587 * @port: DP IN/OUT port
1588 * @enable: Enable/disable DP path
1590 * Once Hop IDs are programmed DP paths can be enabled or disabled by
1591 * calling this function.
1593 int tb_dp_port_enable(struct tb_port *port, bool enable)
1598 ret = tb_port_read(port, data, TB_CFG_PORT,
1599 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1604 data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
1606 data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
1608 return tb_port_write(port, data, TB_CFG_PORT,
1609 port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
1612 /* switch utility functions */
1614 static const char *tb_switch_generation_name(const struct tb_switch *sw)
1616 switch (sw->generation) {
1618 return "Thunderbolt 1";
1620 return "Thunderbolt 2";
1622 return "Thunderbolt 3";
1630 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
1632 const struct tb_regs_switch_header *regs = &sw->config;
1634 tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
1635 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
1636 regs->revision, regs->thunderbolt_version);
1637 tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
1638 tb_dbg(tb, " Config:\n");
1640 " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
1641 regs->upstream_port_number, regs->depth,
1642 (((u64) regs->route_hi) << 32) | regs->route_lo,
1643 regs->enabled, regs->plug_events_delay);
1644 tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
1645 regs->__unknown1, regs->__unknown4);
1649 * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
1650 * @sw: Switch to reset
1652 * Return: Returns 0 on success or an error code on failure.
1654 int tb_switch_reset(struct tb_switch *sw)
1656 struct tb_cfg_result res;
1658 if (sw->generation > 1)
1661 tb_sw_dbg(sw, "resetting switch\n");
1663 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
1664 TB_CFG_SWITCH, 2, 2);
1667 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
1674 * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
1675 * @sw: Router to read the offset value from
1676 * @offset: Offset in the router config space to read from
1677 * @bit: Bit mask in the offset to wait for
1678 * @value: Value of the bits to wait for
1679 * @timeout_msec: Timeout in ms how long to wait
1681 * Wait till the specified bits in specified offset reach specified value.
1682 * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
1683 * within the given timeout or a negative errno in case of failure.
1685 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
1686 u32 value, int timeout_msec)
1688 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1694 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
1698 if ((val & bit) == value)
1701 usleep_range(50, 100);
1702 } while (ktime_before(ktime_get(), timeout));
1708 * tb_plug_events_active() - enable/disable plug events on a switch
1710 * Also configures a sane plug_events_delay of 255ms.
1712 * Return: Returns 0 on success or an error code on failure.
1714 static int tb_plug_events_active(struct tb_switch *sw, bool active)
1719 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
1722 sw->config.plug_events_delay = 0xff;
1723 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
1727 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
1732 data = data & 0xFFFFFF83;
1733 switch (sw->config.device_id) {
1734 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
1735 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
1736 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
1740 * Skip Alpine Ridge, it needs to have vendor
1741 * specific USB hotplug event enabled for the
1742 * internal xHCI to work.
1744 if (!tb_switch_is_alpine_ridge(sw))
1745 data |= TB_PLUG_EVENTS_USB_DISABLE;
1750 return tb_sw_write(sw, &data, TB_CFG_SWITCH,
1751 sw->cap_plug_events + 1, 1);
1754 static ssize_t authorized_show(struct device *dev,
1755 struct device_attribute *attr,
1758 struct tb_switch *sw = tb_to_switch(dev);
1760 return sprintf(buf, "%u\n", sw->authorized);
1763 static int disapprove_switch(struct device *dev, void *not_used)
1765 char *envp[] = { "AUTHORIZED=0", NULL };
1766 struct tb_switch *sw;
1768 sw = tb_to_switch(dev);
1769 if (sw && sw->authorized) {
1772 /* First children */
1773 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
1777 ret = tb_domain_disapprove_switch(sw->tb, sw);
1782 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1788 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
1790 char envp_string[13];
1792 char *envp[] = { envp_string, NULL };
1794 if (!mutex_trylock(&sw->tb->lock))
1795 return restart_syscall();
1797 if (!!sw->authorized == !!val)
1801 /* Disapprove switch */
1804 ret = disapprove_switch(&sw->dev, NULL);
1809 /* Approve switch */
1812 ret = tb_domain_approve_switch_key(sw->tb, sw);
1814 ret = tb_domain_approve_switch(sw->tb, sw);
1817 /* Challenge switch */
1820 ret = tb_domain_challenge_switch_key(sw->tb, sw);
1828 sw->authorized = val;
1830 * Notify status change to the userspace, informing the new
1831 * value of /sys/bus/thunderbolt/devices/.../authorized.
1833 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
1834 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
1838 mutex_unlock(&sw->tb->lock);
1842 static ssize_t authorized_store(struct device *dev,
1843 struct device_attribute *attr,
1844 const char *buf, size_t count)
1846 struct tb_switch *sw = tb_to_switch(dev);
1850 ret = kstrtouint(buf, 0, &val);
1856 pm_runtime_get_sync(&sw->dev);
1857 ret = tb_switch_set_authorized(sw, val);
1858 pm_runtime_mark_last_busy(&sw->dev);
1859 pm_runtime_put_autosuspend(&sw->dev);
1861 return ret ? ret : count;
1863 static DEVICE_ATTR_RW(authorized);
1865 static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
1868 struct tb_switch *sw = tb_to_switch(dev);
1870 return sprintf(buf, "%u\n", sw->boot);
1872 static DEVICE_ATTR_RO(boot);
1874 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
1877 struct tb_switch *sw = tb_to_switch(dev);
1879 return sprintf(buf, "%#x\n", sw->device);
1881 static DEVICE_ATTR_RO(device);
1884 device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
1886 struct tb_switch *sw = tb_to_switch(dev);
1888 return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
1890 static DEVICE_ATTR_RO(device_name);
1893 generation_show(struct device *dev, struct device_attribute *attr, char *buf)
1895 struct tb_switch *sw = tb_to_switch(dev);
1897 return sprintf(buf, "%u\n", sw->generation);
1899 static DEVICE_ATTR_RO(generation);
1901 static ssize_t key_show(struct device *dev, struct device_attribute *attr,
1904 struct tb_switch *sw = tb_to_switch(dev);
1907 if (!mutex_trylock(&sw->tb->lock))
1908 return restart_syscall();
1911 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
1913 ret = sprintf(buf, "\n");
1915 mutex_unlock(&sw->tb->lock);
1919 static ssize_t key_store(struct device *dev, struct device_attribute *attr,
1920 const char *buf, size_t count)
1922 struct tb_switch *sw = tb_to_switch(dev);
1923 u8 key[TB_SWITCH_KEY_SIZE];
1924 ssize_t ret = count;
1927 if (!strcmp(buf, "\n"))
1929 else if (hex2bin(key, buf, sizeof(key)))
1932 if (!mutex_trylock(&sw->tb->lock))
1933 return restart_syscall();
1935 if (sw->authorized) {
1942 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
1948 mutex_unlock(&sw->tb->lock);
1951 static DEVICE_ATTR(key, 0600, key_show, key_store);
1953 static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
1956 struct tb_switch *sw = tb_to_switch(dev);
1958 return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
1962 * Currently all lanes must run at the same speed but we expose here
1963 * both directions to allow possible asymmetric links in the future.
1965 static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
1966 static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
1968 static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
1971 struct tb_switch *sw = tb_to_switch(dev);
1973 return sprintf(buf, "%u\n", sw->link_width);
1977 * Currently link has same amount of lanes both directions (1 or 2) but
1978 * expose them separately to allow possible asymmetric links in the future.
1980 static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
1981 static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
1983 static ssize_t nvm_authenticate_show(struct device *dev,
1984 struct device_attribute *attr, char *buf)
1986 struct tb_switch *sw = tb_to_switch(dev);
1989 nvm_get_auth_status(sw, &status);
1990 return sprintf(buf, "%#x\n", status);
1993 static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
1996 struct tb_switch *sw = tb_to_switch(dev);
1999 pm_runtime_get_sync(&sw->dev);
2001 if (!mutex_trylock(&sw->tb->lock)) {
2002 ret = restart_syscall();
2006 /* If NVMem devices are not yet added */
2012 ret = kstrtoint(buf, 10, &val);
2016 /* Always clear the authentication status */
2017 nvm_clear_auth_status(sw);
2020 if (val == AUTHENTICATE_ONLY) {
2024 ret = nvm_authenticate(sw, true);
2026 if (!sw->nvm->flushed) {
2027 if (!sw->nvm->buf) {
2032 ret = nvm_validate_and_write(sw);
2033 if (ret || val == WRITE_ONLY)
2036 if (val == WRITE_AND_AUTHENTICATE) {
2038 ret = tb_lc_force_power(sw);
2040 ret = nvm_authenticate(sw, false);
2046 mutex_unlock(&sw->tb->lock);
2048 pm_runtime_mark_last_busy(&sw->dev);
2049 pm_runtime_put_autosuspend(&sw->dev);
2054 static ssize_t nvm_authenticate_store(struct device *dev,
2055 struct device_attribute *attr, const char *buf, size_t count)
2057 int ret = nvm_authenticate_sysfs(dev, buf, false);
2062 static DEVICE_ATTR_RW(nvm_authenticate);
2064 static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
2065 struct device_attribute *attr, char *buf)
2067 return nvm_authenticate_show(dev, attr, buf);
2070 static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
2071 struct device_attribute *attr, const char *buf, size_t count)
2075 ret = nvm_authenticate_sysfs(dev, buf, true);
2076 return ret ? ret : count;
2078 static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
2080 static ssize_t nvm_version_show(struct device *dev,
2081 struct device_attribute *attr, char *buf)
2083 struct tb_switch *sw = tb_to_switch(dev);
2086 if (!mutex_trylock(&sw->tb->lock))
2087 return restart_syscall();
2094 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
2096 mutex_unlock(&sw->tb->lock);
2100 static DEVICE_ATTR_RO(nvm_version);
2102 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
2105 struct tb_switch *sw = tb_to_switch(dev);
2107 return sprintf(buf, "%#x\n", sw->vendor);
2109 static DEVICE_ATTR_RO(vendor);
2112 vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
2114 struct tb_switch *sw = tb_to_switch(dev);
2116 return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
2118 static DEVICE_ATTR_RO(vendor_name);
2120 static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
2123 struct tb_switch *sw = tb_to_switch(dev);
2125 return sprintf(buf, "%pUb\n", sw->uuid);
2127 static DEVICE_ATTR_RO(unique_id);
2129 static struct attribute *switch_attrs[] = {
2130 &dev_attr_authorized.attr,
2131 &dev_attr_boot.attr,
2132 &dev_attr_device.attr,
2133 &dev_attr_device_name.attr,
2134 &dev_attr_generation.attr,
2136 &dev_attr_nvm_authenticate.attr,
2137 &dev_attr_nvm_authenticate_on_disconnect.attr,
2138 &dev_attr_nvm_version.attr,
2139 &dev_attr_rx_speed.attr,
2140 &dev_attr_rx_lanes.attr,
2141 &dev_attr_tx_speed.attr,
2142 &dev_attr_tx_lanes.attr,
2143 &dev_attr_vendor.attr,
2144 &dev_attr_vendor_name.attr,
2145 &dev_attr_unique_id.attr,
2149 static umode_t switch_attr_is_visible(struct kobject *kobj,
2150 struct attribute *attr, int n)
2152 struct device *dev = kobj_to_dev(kobj);
2153 struct tb_switch *sw = tb_to_switch(dev);
2155 if (attr == &dev_attr_authorized.attr) {
2156 if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
2157 sw->tb->security_level == TB_SECURITY_DPONLY)
2159 } else if (attr == &dev_attr_device.attr) {
2162 } else if (attr == &dev_attr_device_name.attr) {
2163 if (!sw->device_name)
2165 } else if (attr == &dev_attr_vendor.attr) {
2168 } else if (attr == &dev_attr_vendor_name.attr) {
2169 if (!sw->vendor_name)
2171 } else if (attr == &dev_attr_key.attr) {
2173 sw->tb->security_level == TB_SECURITY_SECURE &&
2174 sw->security_level == TB_SECURITY_SECURE)
2177 } else if (attr == &dev_attr_rx_speed.attr ||
2178 attr == &dev_attr_rx_lanes.attr ||
2179 attr == &dev_attr_tx_speed.attr ||
2180 attr == &dev_attr_tx_lanes.attr) {
2184 } else if (attr == &dev_attr_nvm_authenticate.attr) {
2185 if (nvm_upgradeable(sw))
2188 } else if (attr == &dev_attr_nvm_version.attr) {
2189 if (nvm_readable(sw))
2192 } else if (attr == &dev_attr_boot.attr) {
2196 } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
2197 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
2202 return sw->safe_mode ? 0 : attr->mode;
2205 static const struct attribute_group switch_group = {
2206 .is_visible = switch_attr_is_visible,
2207 .attrs = switch_attrs,
2210 static const struct attribute_group *switch_groups[] = {
2215 static void tb_switch_release(struct device *dev)
2217 struct tb_switch *sw = tb_to_switch(dev);
2218 struct tb_port *port;
2220 dma_port_free(sw->dma_port);
2222 tb_switch_for_each_port(sw, port) {
2223 ida_destroy(&port->in_hopids);
2224 ida_destroy(&port->out_hopids);
2228 kfree(sw->device_name);
2229 kfree(sw->vendor_name);
2236 static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
2238 struct tb_switch *sw = tb_to_switch(dev);
2241 if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
2242 if (add_uevent_var(env, "USB4_VERSION=1.0"))
2246 if (!tb_route(sw)) {
2249 const struct tb_port *port;
2252 /* Device is hub if it has any downstream ports */
2253 tb_switch_for_each_port(sw, port) {
2254 if (!port->disabled && !tb_is_upstream_port(port) &&
2255 tb_port_is_null(port)) {
2261 type = hub ? "hub" : "device";
2264 if (add_uevent_var(env, "USB4_TYPE=%s", type))
2270 * Currently only need to provide the callbacks. Everything else is handled
2271 * in the connection manager.
2273 static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
2275 struct tb_switch *sw = tb_to_switch(dev);
2276 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2278 if (cm_ops->runtime_suspend_switch)
2279 return cm_ops->runtime_suspend_switch(sw);
2284 static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
2286 struct tb_switch *sw = tb_to_switch(dev);
2287 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
2289 if (cm_ops->runtime_resume_switch)
2290 return cm_ops->runtime_resume_switch(sw);
2294 static const struct dev_pm_ops tb_switch_pm_ops = {
2295 SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
2299 struct device_type tb_switch_type = {
2300 .name = "thunderbolt_device",
2301 .release = tb_switch_release,
2302 .uevent = tb_switch_uevent,
2303 .pm = &tb_switch_pm_ops,
2306 static int tb_switch_get_generation(struct tb_switch *sw)
2308 switch (sw->config.device_id) {
2309 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
2310 case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
2311 case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
2312 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
2313 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
2314 case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
2315 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
2316 case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
2319 case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
2320 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
2321 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
2324 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
2325 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
2326 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
2327 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
2328 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
2329 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
2330 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
2331 case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
2332 case PCI_DEVICE_ID_INTEL_ICL_NHI0:
2333 case PCI_DEVICE_ID_INTEL_ICL_NHI1:
2337 if (tb_switch_is_usb4(sw))
2341 * For unknown switches assume generation to be 1 to be
2344 tb_sw_warn(sw, "unsupported switch device id %#x\n",
2345 sw->config.device_id);
2350 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
2354 if (tb_switch_is_usb4(sw) ||
2355 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
2356 max_depth = USB4_SWITCH_MAX_DEPTH;
2358 max_depth = TB_SWITCH_MAX_DEPTH;
2360 return depth > max_depth;
2364 * tb_switch_alloc() - allocate a switch
2365 * @tb: Pointer to the owning domain
2366 * @parent: Parent device for this switch
2367 * @route: Route string for this switch
2369 * Allocates and initializes a switch. Will not upload configuration to
2370 * the switch. For that you need to call tb_switch_configure()
2371 * separately. The returned switch should be released by calling
2374 * Return: Pointer to the allocated switch or ERR_PTR() in case of
2377 struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
2380 struct tb_switch *sw;
2384 /* Unlock the downstream port so we can access the switch below */
2386 struct tb_switch *parent_sw = tb_to_switch(parent);
2387 struct tb_port *down;
2389 down = tb_port_at(route, parent_sw);
2390 tb_port_unlock(down);
2393 depth = tb_route_length(route);
2395 upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
2396 if (upstream_port < 0)
2397 return ERR_PTR(upstream_port);
2399 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2401 return ERR_PTR(-ENOMEM);
2404 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
2406 goto err_free_sw_ports;
2408 sw->generation = tb_switch_get_generation(sw);
2410 tb_dbg(tb, "current switch config:\n");
2411 tb_dump_switch(tb, sw);
2413 /* configure switch */
2414 sw->config.upstream_port_number = upstream_port;
2415 sw->config.depth = depth;
2416 sw->config.route_hi = upper_32_bits(route);
2417 sw->config.route_lo = lower_32_bits(route);
2418 sw->config.enabled = 0;
2420 /* Make sure we do not exceed maximum topology limit */
2421 if (tb_switch_exceeds_max_depth(sw, depth)) {
2422 ret = -EADDRNOTAVAIL;
2423 goto err_free_sw_ports;
2426 /* initialize ports */
2427 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
2431 goto err_free_sw_ports;
2434 for (i = 0; i <= sw->config.max_port_number; i++) {
2435 /* minimum setup for tb_find_cap and tb_drom_read to work */
2436 sw->ports[i].sw = sw;
2437 sw->ports[i].port = i;
2439 /* Control port does not need HopID allocation */
2441 ida_init(&sw->ports[i].in_hopids);
2442 ida_init(&sw->ports[i].out_hopids);
2446 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
2448 sw->cap_plug_events = ret;
2450 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
2452 sw->cap_vsec_tmu = ret;
2454 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
2458 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
2462 /* Root switch is always authorized */
2464 sw->authorized = true;
2466 device_initialize(&sw->dev);
2467 sw->dev.parent = parent;
2468 sw->dev.bus = &tb_bus_type;
2469 sw->dev.type = &tb_switch_type;
2470 sw->dev.groups = switch_groups;
2471 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2479 return ERR_PTR(ret);
2483 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
2484 * @tb: Pointer to the owning domain
2485 * @parent: Parent device for this switch
2486 * @route: Route string for this switch
2488 * This creates a switch in safe mode. This means the switch pretty much
2489 * lacks all capabilities except DMA configuration port before it is
2490 * flashed with a valid NVM firmware.
2492 * The returned switch must be released by calling tb_switch_put().
2494 * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
2497 tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
2499 struct tb_switch *sw;
2501 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
2503 return ERR_PTR(-ENOMEM);
2506 sw->config.depth = tb_route_length(route);
2507 sw->config.route_hi = upper_32_bits(route);
2508 sw->config.route_lo = lower_32_bits(route);
2509 sw->safe_mode = true;
2511 device_initialize(&sw->dev);
2512 sw->dev.parent = parent;
2513 sw->dev.bus = &tb_bus_type;
2514 sw->dev.type = &tb_switch_type;
2515 sw->dev.groups = switch_groups;
2516 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
2522 * tb_switch_configure() - Uploads configuration to the switch
2523 * @sw: Switch to configure
2525 * Call this function before the switch is added to the system. It will
2526 * upload configuration to the switch and makes it available for the
2527 * connection manager to use. Can be called to the switch again after
2528 * resume from low power states to re-initialize it.
2530 * Return: %0 in case of success and negative errno in case of failure
2532 int tb_switch_configure(struct tb_switch *sw)
2534 struct tb *tb = sw->tb;
2538 route = tb_route(sw);
2540 tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
2541 sw->config.enabled ? "restoring" : "initializing", route,
2542 tb_route_length(route), sw->config.upstream_port_number);
2544 sw->config.enabled = 1;
2546 if (tb_switch_is_usb4(sw)) {
2548 * For USB4 devices, we need to program the CM version
2549 * accordingly so that it knows to expose all the
2550 * additional capabilities.
2552 sw->config.cmuv = USB4_VERSION_1_0;
2554 /* Enumerate the switch */
2555 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2560 ret = usb4_switch_setup(sw);
2562 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
2563 tb_sw_warn(sw, "unknown switch vendor id %#x\n",
2564 sw->config.vendor_id);
2566 if (!sw->cap_plug_events) {
2567 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
2571 /* Enumerate the switch */
2572 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
2578 return tb_plug_events_active(sw, true);
2581 static int tb_switch_set_uuid(struct tb_switch *sw)
2590 if (tb_switch_is_usb4(sw)) {
2591 ret = usb4_switch_read_uid(sw, &sw->uid);
2597 * The newer controllers include fused UUID as part of
2598 * link controller specific registers
2600 ret = tb_lc_read_uuid(sw, uuid);
2610 * ICM generates UUID based on UID and fills the upper
2611 * two words with ones. This is not strictly following
2612 * UUID format but we want to be compatible with it so
2613 * we do the same here.
2615 uuid[0] = sw->uid & 0xffffffff;
2616 uuid[1] = (sw->uid >> 32) & 0xffffffff;
2617 uuid[2] = 0xffffffff;
2618 uuid[3] = 0xffffffff;
2621 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
2627 static int tb_switch_add_dma_port(struct tb_switch *sw)
2632 switch (sw->generation) {
2634 /* Only root switch can be upgraded */
2641 ret = tb_switch_set_uuid(sw);
2648 * DMA port is the only thing available when the switch
2656 if (sw->no_nvm_upgrade)
2659 if (tb_switch_is_usb4(sw)) {
2660 ret = usb4_switch_nvm_authenticate_status(sw, &status);
2665 tb_sw_info(sw, "switch flash authentication failed\n");
2666 nvm_set_auth_status(sw, status);
2672 /* Root switch DMA port requires running firmware */
2673 if (!tb_route(sw) && !tb_switch_is_icm(sw))
2676 sw->dma_port = dma_port_alloc(sw);
2681 * If there is status already set then authentication failed
2682 * when the dma_port_flash_update_auth() returned. Power cycling
2683 * is not needed (it was done already) so only thing we do here
2684 * is to unblock runtime PM of the root port.
2686 nvm_get_auth_status(sw, &status);
2689 nvm_authenticate_complete_dma_port(sw);
2694 * Check status of the previous flash authentication. If there
2695 * is one we need to power cycle the switch in any case to make
2696 * it functional again.
2698 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
2702 /* Now we can allow root port to suspend again */
2704 nvm_authenticate_complete_dma_port(sw);
2707 tb_sw_info(sw, "switch flash authentication failed\n");
2708 nvm_set_auth_status(sw, status);
2711 tb_sw_info(sw, "power cycling the switch now\n");
2712 dma_port_power_cycle(sw->dma_port);
2715 * We return error here which causes the switch adding failure.
2716 * It should appear back after power cycle is complete.
2721 static void tb_switch_default_link_ports(struct tb_switch *sw)
2725 for (i = 1; i <= sw->config.max_port_number; i++) {
2726 struct tb_port *port = &sw->ports[i];
2727 struct tb_port *subordinate;
2729 if (!tb_port_is_null(port))
2732 /* Check for the subordinate port */
2733 if (i == sw->config.max_port_number ||
2734 !tb_port_is_null(&sw->ports[i + 1]))
2737 /* Link them if not already done so (by DROM) */
2738 subordinate = &sw->ports[i + 1];
2739 if (!port->dual_link_port && !subordinate->dual_link_port) {
2741 port->dual_link_port = subordinate;
2742 subordinate->link_nr = 1;
2743 subordinate->dual_link_port = port;
2745 tb_sw_dbg(sw, "linked ports %d <-> %d\n",
2746 port->port, subordinate->port);
2751 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
2753 const struct tb_port *up = tb_upstream_port(sw);
2755 if (!up->dual_link_port || !up->dual_link_port->remote)
2758 if (tb_switch_is_usb4(sw))
2759 return usb4_switch_lane_bonding_possible(sw);
2760 return tb_lc_lane_bonding_possible(sw);
2763 static int tb_switch_update_link_attributes(struct tb_switch *sw)
2766 bool change = false;
2769 if (!tb_route(sw) || tb_switch_is_icm(sw))
2772 up = tb_upstream_port(sw);
2774 ret = tb_port_get_link_speed(up);
2777 if (sw->link_speed != ret)
2779 sw->link_speed = ret;
2781 ret = tb_port_get_link_width(up);
2784 if (sw->link_width != ret)
2786 sw->link_width = ret;
2788 /* Notify userspace that there is possible link attribute change */
2789 if (device_is_registered(&sw->dev) && change)
2790 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
2796 * tb_switch_lane_bonding_enable() - Enable lane bonding
2797 * @sw: Switch to enable lane bonding
2799 * Connection manager can call this function to enable lane bonding of a
2800 * switch. If conditions are correct and both switches support the feature,
2801 * lanes are bonded. It is safe to call this to any switch.
2803 int tb_switch_lane_bonding_enable(struct tb_switch *sw)
2805 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2806 struct tb_port *up, *down;
2807 u64 route = tb_route(sw);
2813 if (!tb_switch_lane_bonding_possible(sw))
2816 up = tb_upstream_port(sw);
2817 down = tb_port_at(route, parent);
2819 if (!tb_port_is_width_supported(up, 2) ||
2820 !tb_port_is_width_supported(down, 2))
2823 ret = tb_port_lane_bonding_enable(up);
2825 tb_port_warn(up, "failed to enable lane bonding\n");
2829 ret = tb_port_lane_bonding_enable(down);
2831 tb_port_warn(down, "failed to enable lane bonding\n");
2832 tb_port_lane_bonding_disable(up);
2836 ret = tb_port_wait_for_link_width(down, 2, 100);
2838 tb_port_warn(down, "timeout enabling lane bonding\n");
2842 tb_port_update_credits(down);
2843 tb_port_update_credits(up);
2844 tb_switch_update_link_attributes(sw);
2846 tb_sw_dbg(sw, "lane bonding enabled\n");
2851 * tb_switch_lane_bonding_disable() - Disable lane bonding
2852 * @sw: Switch whose lane bonding to disable
2854 * Disables lane bonding between @sw and parent. This can be called even
2855 * if lanes were not bonded originally.
2857 void tb_switch_lane_bonding_disable(struct tb_switch *sw)
2859 struct tb_switch *parent = tb_to_switch(sw->dev.parent);
2860 struct tb_port *up, *down;
2865 up = tb_upstream_port(sw);
2869 down = tb_port_at(tb_route(sw), parent);
2871 tb_port_lane_bonding_disable(up);
2872 tb_port_lane_bonding_disable(down);
2875 * It is fine if we get other errors as the router might have
2878 if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT)
2879 tb_sw_warn(sw, "timeout disabling lane bonding\n");
2881 tb_port_update_credits(down);
2882 tb_port_update_credits(up);
2883 tb_switch_update_link_attributes(sw);
2885 tb_sw_dbg(sw, "lane bonding disabled\n");
2889 * tb_switch_configure_link() - Set link configured
2890 * @sw: Switch whose link is configured
2892 * Sets the link upstream from @sw configured (from both ends) so that
2893 * it will not be disconnected when the domain exits sleep. Can be
2894 * called for any switch.
2896 * It is recommended that this is called after lane bonding is enabled.
2898 * Returns %0 on success and negative errno in case of error.
2900 int tb_switch_configure_link(struct tb_switch *sw)
2902 struct tb_port *up, *down;
2905 if (!tb_route(sw) || tb_switch_is_icm(sw))
2908 up = tb_upstream_port(sw);
2909 if (tb_switch_is_usb4(up->sw))
2910 ret = usb4_port_configure(up);
2912 ret = tb_lc_configure_port(up);
2917 if (tb_switch_is_usb4(down->sw))
2918 return usb4_port_configure(down);
2919 return tb_lc_configure_port(down);
2923 * tb_switch_unconfigure_link() - Unconfigure link
2924 * @sw: Switch whose link is unconfigured
2926 * Sets the link unconfigured so the @sw will be disconnected if the
2927 * domain exists sleep.
2929 void tb_switch_unconfigure_link(struct tb_switch *sw)
2931 struct tb_port *up, *down;
2933 if (sw->is_unplugged)
2935 if (!tb_route(sw) || tb_switch_is_icm(sw))
2938 up = tb_upstream_port(sw);
2939 if (tb_switch_is_usb4(up->sw))
2940 usb4_port_unconfigure(up);
2942 tb_lc_unconfigure_port(up);
2945 if (tb_switch_is_usb4(down->sw))
2946 usb4_port_unconfigure(down);
2948 tb_lc_unconfigure_port(down);
2951 static void tb_switch_credits_init(struct tb_switch *sw)
2953 if (tb_switch_is_icm(sw))
2955 if (!tb_switch_is_usb4(sw))
2957 if (usb4_switch_credits_init(sw))
2958 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
2962 * tb_switch_add() - Add a switch to the domain
2963 * @sw: Switch to add
2965 * This is the last step in adding switch to the domain. It will read
2966 * identification information from DROM and initializes ports so that
2967 * they can be used to connect other switches. The switch will be
2968 * exposed to the userspace when this function successfully returns. To
2969 * remove and release the switch, call tb_switch_remove().
2971 * Return: %0 in case of success and negative errno in case of failure
2973 int tb_switch_add(struct tb_switch *sw)
2978 * Initialize DMA control port now before we read DROM. Recent
2979 * host controllers have more complete DROM on NVM that includes
2980 * vendor and model identification strings which we then expose
2981 * to the userspace. NVM can be accessed through DMA
2982 * configuration based mailbox.
2984 ret = tb_switch_add_dma_port(sw);
2986 dev_err(&sw->dev, "failed to add DMA port\n");
2990 if (!sw->safe_mode) {
2991 tb_switch_credits_init(sw);
2994 ret = tb_drom_read(sw);
2996 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
2997 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
2999 tb_check_quirks(sw);
3001 ret = tb_switch_set_uuid(sw);
3003 dev_err(&sw->dev, "failed to set UUID\n");
3007 for (i = 0; i <= sw->config.max_port_number; i++) {
3008 if (sw->ports[i].disabled) {
3009 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
3012 ret = tb_init_port(&sw->ports[i]);
3014 dev_err(&sw->dev, "failed to initialize port %d\n", i);
3019 tb_switch_default_link_ports(sw);
3021 ret = tb_switch_update_link_attributes(sw);
3025 ret = tb_switch_tmu_init(sw);
3030 ret = device_add(&sw->dev);
3032 dev_err(&sw->dev, "failed to add device: %d\n", ret);
3037 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
3038 sw->vendor, sw->device);
3039 if (sw->vendor_name && sw->device_name)
3040 dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
3044 ret = usb4_switch_add_ports(sw);
3046 dev_err(&sw->dev, "failed to add USB4 ports\n");
3050 ret = tb_switch_nvm_add(sw);
3052 dev_err(&sw->dev, "failed to add NVM devices\n");
3057 * Thunderbolt routers do not generate wakeups themselves but
3058 * they forward wakeups from tunneled protocols, so enable it
3061 device_init_wakeup(&sw->dev, true);
3063 pm_runtime_set_active(&sw->dev);
3065 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
3066 pm_runtime_use_autosuspend(&sw->dev);
3067 pm_runtime_mark_last_busy(&sw->dev);
3068 pm_runtime_enable(&sw->dev);
3069 pm_request_autosuspend(&sw->dev);
3072 tb_switch_debugfs_init(sw);
3076 usb4_switch_remove_ports(sw);
3078 device_del(&sw->dev);
3084 * tb_switch_remove() - Remove and release a switch
3085 * @sw: Switch to remove
3087 * This will remove the switch from the domain and release it after last
3088 * reference count drops to zero. If there are switches connected below
3089 * this switch, they will be removed as well.
3091 void tb_switch_remove(struct tb_switch *sw)
3093 struct tb_port *port;
3095 tb_switch_debugfs_remove(sw);
3098 pm_runtime_get_sync(&sw->dev);
3099 pm_runtime_disable(&sw->dev);
3102 /* port 0 is the switch itself and never has a remote */
3103 tb_switch_for_each_port(sw, port) {
3104 if (tb_port_has_remote(port)) {
3105 tb_switch_remove(port->remote->sw);
3106 port->remote = NULL;
3107 } else if (port->xdomain) {
3108 tb_xdomain_remove(port->xdomain);
3109 port->xdomain = NULL;
3112 /* Remove any downstream retimers */
3113 tb_retimer_remove_all(port);
3116 if (!sw->is_unplugged)
3117 tb_plug_events_active(sw, false);
3119 tb_switch_nvm_remove(sw);
3120 usb4_switch_remove_ports(sw);
3123 dev_info(&sw->dev, "device disconnected\n");
3124 device_unregister(&sw->dev);
3128 * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
3129 * @sw: Router to mark unplugged
3131 void tb_sw_set_unplugged(struct tb_switch *sw)
3133 struct tb_port *port;
3135 if (sw == sw->tb->root_switch) {
3136 tb_sw_WARN(sw, "cannot unplug root switch\n");
3139 if (sw->is_unplugged) {
3140 tb_sw_WARN(sw, "is_unplugged already set\n");
3143 sw->is_unplugged = true;
3144 tb_switch_for_each_port(sw, port) {
3145 if (tb_port_has_remote(port))
3146 tb_sw_set_unplugged(port->remote->sw);
3147 else if (port->xdomain)
3148 port->xdomain->is_unplugged = true;
3152 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
3155 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
3157 tb_sw_dbg(sw, "disabling wakeup\n");
3159 if (tb_switch_is_usb4(sw))
3160 return usb4_switch_set_wake(sw, flags);
3161 return tb_lc_set_wake(sw, flags);
3164 int tb_switch_resume(struct tb_switch *sw)
3166 struct tb_port *port;
3169 tb_sw_dbg(sw, "resuming switch\n");
3172 * Check for UID of the connected switches except for root
3173 * switch which we assume cannot be removed.
3179 * Check first that we can still read the switch config
3180 * space. It may be that there is now another domain
3183 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
3185 tb_sw_info(sw, "switch not present anymore\n");
3189 /* We don't have any way to confirm this was the same device */
3193 if (tb_switch_is_usb4(sw))
3194 err = usb4_switch_read_uid(sw, &uid);
3196 err = tb_drom_read_uid_only(sw, &uid);
3198 tb_sw_warn(sw, "uid read failed\n");
3201 if (sw->uid != uid) {
3203 "changed while suspended (uid %#llx -> %#llx)\n",
3209 err = tb_switch_configure(sw);
3214 tb_switch_set_wake(sw, 0);
3216 err = tb_switch_tmu_init(sw);
3220 /* check for surviving downstream switches */
3221 tb_switch_for_each_port(sw, port) {
3222 if (!tb_port_is_null(port))
3225 if (!tb_port_resume(port))
3228 if (tb_wait_for_port(port, true) <= 0) {
3230 "lost during suspend, disconnecting\n");
3231 if (tb_port_has_remote(port))
3232 tb_sw_set_unplugged(port->remote->sw);
3233 else if (port->xdomain)
3234 port->xdomain->is_unplugged = true;
3237 * Always unlock the port so the downstream
3238 * switch/domain is accessible.
3240 if (tb_port_unlock(port))
3241 tb_port_warn(port, "failed to unlock port\n");
3242 if (port->remote && tb_switch_resume(port->remote->sw)) {
3244 "lost during suspend, disconnecting\n");
3245 tb_sw_set_unplugged(port->remote->sw);
3253 * tb_switch_suspend() - Put a switch to sleep
3254 * @sw: Switch to suspend
3255 * @runtime: Is this runtime suspend or system sleep
3257 * Suspends router and all its children. Enables wakes according to
3258 * value of @runtime and then sets sleep bit for the router. If @sw is
3259 * host router the domain is ready to go to sleep once this function
3262 void tb_switch_suspend(struct tb_switch *sw, bool runtime)
3264 unsigned int flags = 0;
3265 struct tb_port *port;
3268 tb_sw_dbg(sw, "suspending switch\n");
3271 * Actually only needed for Titan Ridge but for simplicity can be
3272 * done for USB4 device too as CLx is re-enabled at resume.
3273 * CL0s and CL1 are enabled and supported together.
3275 if (tb_switch_is_clx_enabled(sw, TB_CL1)) {
3276 if (tb_switch_disable_clx(sw, TB_CL1))
3277 tb_sw_warn(sw, "failed to disable %s on upstream port\n",
3278 tb_switch_clx_name(TB_CL1));
3281 err = tb_plug_events_active(sw, false);
3285 tb_switch_for_each_port(sw, port) {
3286 if (tb_port_has_remote(port))
3287 tb_switch_suspend(port->remote->sw, runtime);
3291 /* Trigger wake when something is plugged in/out */
3292 flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
3293 flags |= TB_WAKE_ON_USB4;
3294 flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
3295 } else if (device_may_wakeup(&sw->dev)) {
3296 flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
3299 tb_switch_set_wake(sw, flags);
3301 if (tb_switch_is_usb4(sw))
3302 usb4_switch_set_sleep(sw);
3304 tb_lc_set_sleep(sw);
3308 * tb_switch_query_dp_resource() - Query availability of DP resource
3309 * @sw: Switch whose DP resource is queried
3312 * Queries availability of DP resource for DP tunneling using switch
3313 * specific means. Returns %true if resource is available.
3315 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
3317 if (tb_switch_is_usb4(sw))
3318 return usb4_switch_query_dp_resource(sw, in);
3319 return tb_lc_dp_sink_query(sw, in);
3323 * tb_switch_alloc_dp_resource() - Allocate available DP resource
3324 * @sw: Switch whose DP resource is allocated
3327 * Allocates DP resource for DP tunneling. The resource must be
3328 * available for this to succeed (see tb_switch_query_dp_resource()).
3329 * Returns %0 in success and negative errno otherwise.
3331 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3335 if (tb_switch_is_usb4(sw))
3336 ret = usb4_switch_alloc_dp_resource(sw, in);
3338 ret = tb_lc_dp_sink_alloc(sw, in);
3341 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
3344 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
3350 * tb_switch_dealloc_dp_resource() - De-allocate DP resource
3351 * @sw: Switch whose DP resource is de-allocated
3354 * De-allocates DP resource that was previously allocated for DP
3357 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
3361 if (tb_switch_is_usb4(sw))
3362 ret = usb4_switch_dealloc_dp_resource(sw, in);
3364 ret = tb_lc_dp_sink_dealloc(sw, in);
3367 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
3370 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
3373 struct tb_sw_lookup {
3381 static int tb_switch_match(struct device *dev, const void *data)
3383 struct tb_switch *sw = tb_to_switch(dev);
3384 const struct tb_sw_lookup *lookup = data;
3388 if (sw->tb != lookup->tb)
3392 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
3394 if (lookup->route) {
3395 return sw->config.route_lo == lower_32_bits(lookup->route) &&
3396 sw->config.route_hi == upper_32_bits(lookup->route);
3399 /* Root switch is matched only by depth */
3403 return sw->link == lookup->link && sw->depth == lookup->depth;
3407 * tb_switch_find_by_link_depth() - Find switch by link and depth
3408 * @tb: Domain the switch belongs
3409 * @link: Link number the switch is connected
3410 * @depth: Depth of the switch in link
3412 * Returned switch has reference count increased so the caller needs to
3413 * call tb_switch_put() when done with the switch.
3415 struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
3417 struct tb_sw_lookup lookup;
3420 memset(&lookup, 0, sizeof(lookup));
3423 lookup.depth = depth;
3425 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3427 return tb_to_switch(dev);
3433 * tb_switch_find_by_uuid() - Find switch by UUID
3434 * @tb: Domain the switch belongs
3435 * @uuid: UUID to look for
3437 * Returned switch has reference count increased so the caller needs to
3438 * call tb_switch_put() when done with the switch.
3440 struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
3442 struct tb_sw_lookup lookup;
3445 memset(&lookup, 0, sizeof(lookup));
3449 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3451 return tb_to_switch(dev);
3457 * tb_switch_find_by_route() - Find switch by route string
3458 * @tb: Domain the switch belongs
3459 * @route: Route string to look for
3461 * Returned switch has reference count increased so the caller needs to
3462 * call tb_switch_put() when done with the switch.
3464 struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
3466 struct tb_sw_lookup lookup;
3470 return tb_switch_get(tb->root_switch);
3472 memset(&lookup, 0, sizeof(lookup));
3474 lookup.route = route;
3476 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
3478 return tb_to_switch(dev);
3484 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3485 * @sw: Switch to find the port from
3486 * @type: Port type to look for
3488 struct tb_port *tb_switch_find_port(struct tb_switch *sw,
3489 enum tb_port_type type)
3491 struct tb_port *port;
3493 tb_switch_for_each_port(sw, port) {
3494 if (port->config.type == type)
3501 static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
3503 struct tb_switch *parent = tb_switch_parent(sw);
3504 struct tb_port *up, *down;
3510 up = tb_upstream_port(sw);
3511 down = tb_port_at(tb_route(sw), parent);
3512 ret = tb_port_pm_secondary_enable(up);
3516 return tb_port_pm_secondary_disable(down);
3519 static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3521 struct tb_switch *parent = tb_switch_parent(sw);
3522 bool up_clx_support, down_clx_support;
3523 struct tb_port *up, *down;
3526 if (!tb_switch_is_clx_supported(sw))
3530 * Enable CLx for host router's downstream port as part of the
3531 * downstream router enabling procedure.
3536 /* Enable CLx only for first hop router (depth = 1) */
3537 if (tb_route(parent))
3540 ret = tb_switch_pm_secondary_resolve(sw);
3544 up = tb_upstream_port(sw);
3545 down = tb_port_at(tb_route(sw), parent);
3547 up_clx_support = tb_port_clx_supported(up, clx);
3548 down_clx_support = tb_port_clx_supported(down, clx);
3550 tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx),
3551 up_clx_support ? "" : "not ");
3552 tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx),
3553 down_clx_support ? "" : "not ");
3555 if (!up_clx_support || !down_clx_support)
3558 ret = tb_port_clx_enable(up, clx);
3562 ret = tb_port_clx_enable(down, clx);
3564 tb_port_clx_disable(up, clx);
3568 ret = tb_switch_mask_clx_objections(sw);
3570 tb_port_clx_disable(up, clx);
3571 tb_port_clx_disable(down, clx);
3577 tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx));
3582 * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
3583 * @sw: Router to enable CLx for
3584 * @clx: The CLx state to enable
3586 * Enable CLx state only for first hop router. That is the most common
3587 * use-case, that is intended for better thermal management, and so helps
3588 * to improve performance. CLx is enabled only if both sides of the link
3589 * support CLx, and if both sides of the link are not configured as two
3590 * single lane links and only if the link is not inter-domain link. The
3591 * complete set of conditions is described in CM Guide 1.0 section 8.1.
3593 * Return: Returns 0 on success or an error code on failure.
3595 int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
3597 struct tb_switch *root_sw = sw->tb->root_switch;
3603 * CLx is not enabled and validated on Intel USB4 platforms before
3606 if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
3611 /* CL0s and CL1 are enabled and supported together */
3612 return __tb_switch_enable_clx(sw, clx);
3619 static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3621 struct tb_switch *parent = tb_switch_parent(sw);
3622 struct tb_port *up, *down;
3625 if (!tb_switch_is_clx_supported(sw))
3629 * Disable CLx for host router's downstream port as part of the
3630 * downstream router enabling procedure.
3635 /* Disable CLx only for first hop router (depth = 1) */
3636 if (tb_route(parent))
3639 up = tb_upstream_port(sw);
3640 down = tb_port_at(tb_route(sw), parent);
3641 ret = tb_port_clx_disable(up, clx);
3645 ret = tb_port_clx_disable(down, clx);
3649 sw->clx = TB_CLX_DISABLE;
3651 tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx));
3656 * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
3657 * @sw: Router to disable CLx for
3658 * @clx: The CLx state to disable
3660 * Return: Returns 0 on success or an error code on failure.
3662 int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
3669 /* CL0s and CL1 are enabled and supported together */
3670 return __tb_switch_disable_clx(sw, clx);
3678 * tb_switch_mask_clx_objections() - Mask CLx objections for a router
3679 * @sw: Router to mask objections for
3681 * Mask the objections coming from the second depth routers in order to
3682 * stop these objections from interfering with the CLx states of the first
3685 int tb_switch_mask_clx_objections(struct tb_switch *sw)
3687 int up_port = sw->config.upstream_port_number;
3688 u32 offset, val[2], mask_obj, unmask_obj;
3691 /* Only Titan Ridge of pre-USB4 devices support CLx states */
3692 if (!tb_switch_is_titan_ridge(sw))
3699 * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
3700 * Port A consists of lane adapters 1,2 and
3701 * Port B consists of lane adapters 3,4
3702 * If upstream port is A, (lanes are 1,2), we mask objections from
3703 * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
3706 mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3707 unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3708 offset = TB_LOW_PWR_C1_CL1;
3710 mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
3711 unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
3712 offset = TB_LOW_PWR_C3_CL1;
3715 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
3716 sw->cap_lp + offset, ARRAY_SIZE(val));
3720 for (i = 0; i < ARRAY_SIZE(val); i++) {
3722 val[i] &= ~unmask_obj;
3725 return tb_sw_write(sw, &val, TB_CFG_SWITCH,
3726 sw->cap_lp + offset, ARRAY_SIZE(val));
3730 * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
3731 * device. For now used only for Titan Ridge.
3733 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
3734 unsigned int pcie_offset, u32 value)
3736 u32 offset, command, val;
3739 if (sw->generation != 3)
3742 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
3743 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
3747 command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
3748 command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
3749 command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
3750 command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
3751 << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
3752 command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
3754 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
3756 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
3760 ret = tb_switch_wait_for_bit(sw, offset,
3761 TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
3765 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
3769 if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
3776 * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
3777 * @sw: Router to enable PCIe L1
3779 * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
3780 * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
3781 * was configured. Due to Intel platforms limitation, shall be called only
3782 * for first hop switch.
3784 int tb_switch_pcie_l1_enable(struct tb_switch *sw)
3786 struct tb_switch *parent = tb_switch_parent(sw);
3792 if (!tb_switch_is_titan_ridge(sw))
3795 /* Enable PCIe L1 enable only for first hop router (depth = 1) */
3796 if (tb_route(parent))
3799 /* Write to downstream PCIe bridge #5 aka Dn4 */
3800 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
3804 /* Write to Upstream PCIe bridge #0 aka Up0 */
3805 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
3809 * tb_switch_xhci_connect() - Connect internal xHCI
3810 * @sw: Router whose xHCI to connect
3812 * Can be called to any router. For Alpine Ridge and Titan Ridge
3813 * performs special flows that bring the xHCI functional for any device
3814 * connected to the type-C port. Call only after PCIe tunnel has been
3815 * established. The function only does the connect if not done already
3816 * so can be called several times for the same router.
3818 int tb_switch_xhci_connect(struct tb_switch *sw)
3820 bool usb_port1, usb_port3, xhci_port1, xhci_port3;
3821 struct tb_port *port1, *port3;
3824 port1 = &sw->ports[1];
3825 port3 = &sw->ports[3];
3827 if (tb_switch_is_alpine_ridge(sw)) {
3828 usb_port1 = tb_lc_is_usb_plugged(port1);
3829 usb_port3 = tb_lc_is_usb_plugged(port3);
3830 xhci_port1 = tb_lc_is_xhci_connected(port1);
3831 xhci_port3 = tb_lc_is_xhci_connected(port3);
3833 /* Figure out correct USB port to connect */
3834 if (usb_port1 && !xhci_port1) {
3835 ret = tb_lc_xhci_connect(port1);
3839 if (usb_port3 && !xhci_port3)
3840 return tb_lc_xhci_connect(port3);
3841 } else if (tb_switch_is_titan_ridge(sw)) {
3842 ret = tb_lc_xhci_connect(port1);
3845 return tb_lc_xhci_connect(port3);
3852 * tb_switch_xhci_disconnect() - Disconnect internal xHCI
3853 * @sw: Router whose xHCI to disconnect
3855 * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
3858 void tb_switch_xhci_disconnect(struct tb_switch *sw)
3860 if (sw->generation == 3) {
3861 struct tb_port *port1 = &sw->ports[1];
3862 struct tb_port *port3 = &sw->ports[3];
3864 tb_lc_xhci_disconnect(port1);
3865 tb_port_dbg(port1, "disconnected xHCI\n");
3866 tb_lc_xhci_disconnect(port3);
3867 tb_port_dbg(port3, "disconnected xHCI\n");