1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt link controller support
5 * Copyright (C) 2019, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
13 * @sw: Switch whose UUID is read
14 * @uuid: UUID is placed here
16 int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
20 return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
23 static int read_lc_desc(struct tb_switch *sw, u32 *desc)
27 return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
30 static int find_port_lc_cap(struct tb_port *port)
32 struct tb_switch *sw = port->sw;
33 int start, phys, ret, size;
36 ret = read_lc_desc(sw, &desc);
40 /* Start of port LC registers */
41 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
42 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
43 phys = tb_phy_port_from_link(port->port);
45 return sw->cap_lc + start + phys * size;
48 static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
50 bool upstream = tb_is_upstream_port(port);
51 struct tb_switch *sw = port->sw;
55 if (sw->generation < 2)
58 cap = find_port_lc_cap(port);
62 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
66 /* Resolve correct lane */
68 lane = TB_LC_SX_CTRL_L1C;
70 lane = TB_LC_SX_CTRL_L2C;
75 ctrl |= TB_LC_SX_CTRL_UPSTREAM;
79 ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
82 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
86 * tb_lc_configure_port() - Let LC know about configured port
87 * @port: Port that is set as configured
89 * Sets the port configured for power management purposes.
91 int tb_lc_configure_port(struct tb_port *port)
93 return tb_lc_set_port_configured(port, true);
97 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
98 * @port: Port that is set as configured
100 * Sets the port unconfigured for power management purposes.
102 void tb_lc_unconfigure_port(struct tb_port *port)
104 tb_lc_set_port_configured(port, false);
107 static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
109 struct tb_switch *sw = port->sw;
113 if (sw->generation < 2)
116 cap = find_port_lc_cap(port);
120 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
124 /* Resolve correct lane */
126 lane = TB_LC_SX_CTRL_L1D;
128 lane = TB_LC_SX_CTRL_L2D;
135 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
139 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140 * @port: Switch downstream port connected to another host
142 * Sets the lane configured for XDomain accordingly so that the LC knows
143 * about this. Returns %0 in success and negative errno in failure.
145 int tb_lc_configure_xdomain(struct tb_port *port)
147 return tb_lc_set_xdomain_configured(port, true);
151 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152 * @port: Switch downstream port that was connected to another host
154 * Unsets the lane XDomain configuration.
156 void tb_lc_unconfigure_xdomain(struct tb_port *port)
158 tb_lc_set_xdomain_configured(port, false);
162 * tb_lc_start_lane_initialization() - Start lane initialization
163 * @port: Device router lane 0 adapter
165 * Starts lane initialization for @port after the router resumed from
166 * sleep. Should be called for those downstream lane adapters that were
167 * not connected (tb_lc_configure_port() was not called) before sleep.
169 * Returns %0 in success and negative errno in case of failure.
171 int tb_lc_start_lane_initialization(struct tb_port *port)
173 struct tb_switch *sw = port->sw;
180 if (sw->generation < 2)
183 cap = find_port_lc_cap(port);
187 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
191 ctrl |= TB_LC_SX_CTRL_SLI;
193 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
196 static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
203 * Enable wake on PCIe and USB4 (wake coming from another
206 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
207 offset + TB_LC_SX_CTRL, 1);
211 ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
212 TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
214 if (flags & TB_WAKE_ON_CONNECT)
215 ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
216 if (flags & TB_WAKE_ON_USB4)
217 ctrl |= TB_LC_SX_CTRL_WOU4;
218 if (flags & TB_WAKE_ON_PCIE)
219 ctrl |= TB_LC_SX_CTRL_WOP;
220 if (flags & TB_WAKE_ON_DP)
221 ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
223 return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
227 * tb_lc_set_wake() - Enable/disable wake
228 * @sw: Switch whose wakes to configure
229 * @flags: Wakeup flags (%0 to disable)
231 * For each LC sets wake bits accordingly.
233 int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
235 int start, size, nlc, ret, i;
238 if (sw->generation < 2)
244 ret = read_lc_desc(sw, &desc);
248 /* Figure out number of link controllers */
249 nlc = desc & TB_LC_DESC_NLC_MASK;
250 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
251 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
253 /* For each link controller set sleep bit */
254 for (i = 0; i < nlc; i++) {
255 unsigned int offset = sw->cap_lc + start + i * size;
257 ret = tb_lc_set_wake_one(sw, offset, flags);
266 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
267 * @sw: Switch to set sleep
269 * Let the switch link controllers know that the switch is going to
272 int tb_lc_set_sleep(struct tb_switch *sw)
274 int start, size, nlc, ret, i;
277 if (sw->generation < 2)
280 ret = read_lc_desc(sw, &desc);
284 /* Figure out number of link controllers */
285 nlc = desc & TB_LC_DESC_NLC_MASK;
286 start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
287 size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
289 /* For each link controller set sleep bit */
290 for (i = 0; i < nlc; i++) {
291 unsigned int offset = sw->cap_lc + start + i * size;
294 ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
295 offset + TB_LC_SX_CTRL, 1);
299 ctrl |= TB_LC_SX_CTRL_SLP;
300 ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
301 offset + TB_LC_SX_CTRL, 1);
310 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
311 * @sw: Switch to check
313 * Checks whether conditions for lane bonding from parent to @sw are
316 bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
322 if (sw->generation < 2)
325 up = tb_upstream_port(sw);
326 cap = find_port_lc_cap(up);
330 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
334 return !!(val & TB_LC_PORT_ATTR_BE);
337 static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
340 struct tb_port *port;
342 /* The first DP IN port is sink 0 and second is sink 1 */
343 tb_switch_for_each_port(sw, port) {
344 if (tb_port_is_dpin(port))
351 static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
356 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
357 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
362 * Sink is available for CM/SW to use if the allocation valie is
366 alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
367 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
370 alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
371 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
372 if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
380 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
381 * @sw: Switch whose DP sink is queried
382 * @in: DP IN port to check
384 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
385 * for the given DP IN port or not.
387 bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
392 * For older generations sink is always available as there is no
393 * allocation mechanism.
395 if (sw->generation < 3)
398 sink = tb_lc_dp_sink_from_port(sw, in);
402 return !tb_lc_dp_sink_available(sw, sink);
406 * tb_lc_dp_sink_alloc() - Allocate DP sink
407 * @sw: Switch whose DP sink is allocated
408 * @in: DP IN port the DP sink is allocated for
410 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
411 * resource is available and allocation is successful returns %0. In all
412 * other cases returs negative errno. In particular %-EBUSY is returned if
413 * the resource was not available.
415 int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
420 if (sw->generation < 3)
423 sink = tb_lc_dp_sink_from_port(sw, in);
427 ret = tb_lc_dp_sink_available(sw, sink);
431 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
432 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
437 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
438 val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
440 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
441 val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
442 TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
445 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
446 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
451 tb_port_dbg(in, "sink %d allocated\n", sink);
456 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
457 * @sw: Switch whose DP sink is de-allocated
458 * @in: DP IN port whose DP sink is de-allocated
460 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
462 int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
467 if (sw->generation < 3)
470 sink = tb_lc_dp_sink_from_port(sw, in);
474 /* Needs to be owned by CM/SW */
475 ret = tb_lc_dp_sink_available(sw, sink);
479 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
480 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
485 val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
487 val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
489 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
490 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
494 tb_port_dbg(in, "sink %d de-allocated\n", sink);
499 * tb_lc_force_power() - Forces LC to be powered on
500 * @sw: Thunderbolt switch
502 * This is useful to let authentication cycle pass even without
503 * a Thunderbolt link present.
505 int tb_lc_force_power(struct tb_switch *sw)
509 return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);