diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
commit | 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch) | |
tree | 8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/thunderbolt | |
parent | Adding debian version 6.7.12-1. (diff) | |
download | linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r-- | drivers/thunderbolt/domain.c | 7 | ||||
-rw-r--r-- | drivers/thunderbolt/icm.c | 4 | ||||
-rw-r--r-- | drivers/thunderbolt/lc.c | 45 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.c | 23 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.h | 2 | ||||
-rw-r--r-- | drivers/thunderbolt/path.c | 13 | ||||
-rw-r--r-- | drivers/thunderbolt/quirks.c | 14 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 211 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.c | 167 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 32 | ||||
-rw-r--r-- | drivers/thunderbolt/tb_regs.h | 6 | ||||
-rw-r--r-- | drivers/thunderbolt/tmu.c | 2 | ||||
-rw-r--r-- | drivers/thunderbolt/tunnel.c | 60 | ||||
-rw-r--r-- | drivers/thunderbolt/usb4.c | 52 | ||||
-rw-r--r-- | drivers/thunderbolt/xdomain.c | 54 |
15 files changed, 570 insertions, 122 deletions
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index ec7b5f6580..df0d845e06 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -307,7 +307,7 @@ static const struct attribute_group *domain_attr_groups[] = { NULL, }; -struct bus_type tb_bus_type = { +const struct bus_type tb_bus_type = { .name = "thunderbolt", .match = tb_service_match, .probe = tb_service_probe, @@ -423,6 +423,7 @@ err_free: /** * tb_domain_add() - Add domain to the system * @tb: Domain to add + * @reset: Issue reset to the host router * * Starts the domain and adds it to the system. Hotplugging devices will * work after this has been returned successfully. In order to remove @@ -431,7 +432,7 @@ err_free: * * Return: %0 in case of success and negative errno in case of error */ -int tb_domain_add(struct tb *tb) +int tb_domain_add(struct tb *tb, bool reset) { int ret; @@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb) /* Start the domain */ if (tb->cm_ops->start) { - ret = tb->cm_ops->start(tb); + ret = tb->cm_ops->start(tb, reset); if (ret) goto err_domain_del; } diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index d8b9c734ab..baf10d099c 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), - 1, 10, 2000); + 1, 10, 250); if (ret) return ret; @@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb) return 0; } -static int icm_start(struct tb *tb) +static int icm_start(struct tb *tb, bool not_used) { struct icm *icm = tb_priv(tb); int ret; diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index 633970fbe9..63cb4b6afb 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -6,6 +6,8 @@ * Author: Mika Westerberg <mika.westerberg@linux.intel.com> */ +#include <linux/delay.h> + #include "tb.h" /** @@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port) return sw->cap_lc + start + phys * size; } +/** + * tb_lc_reset_port() - Trigger downstream port reset through LC + * @port: Port that is reset + * + * Triggers downstream port reset through link controller registers. + * Returns %0 in case of success negative errno otherwise. Only supports + * non-USB4 routers with link controller (that's Thunderbolt 2 and + * Thunderbolt 3). + */ +int tb_lc_reset_port(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 mode; + + if (sw->generation < 2) + return -EINVAL; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); + if (ret) + return ret; + + mode |= TB_LC_PORT_MODE_DPR; + + ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); + if (ret) + return ret; + + fsleep(10000); + + ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); + if (ret) + return ret; + + mode &= ~TB_LC_PORT_MODE_DPR; + + return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1); +} + static int tb_lc_set_port_configured(struct tb_port *port, bool configured) { bool upstream = tb_is_upstream_port(port); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 4b7bec74e8..b22023fae6 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -1221,7 +1221,7 @@ static void nhi_check_iommu(struct tb_nhi *nhi) str_enabled_disabled(port_ok)); } -static void nhi_reset(struct tb_nhi *nhi) +static bool nhi_reset(struct tb_nhi *nhi) { ktime_t timeout; u32 val; @@ -1229,11 +1229,11 @@ static void nhi_reset(struct tb_nhi *nhi) val = ioread32(nhi->iobase + REG_CAPS); /* Reset only v2 and later routers */ if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2) - return; + return false; if (!host_reset) { dev_dbg(&nhi->pdev->dev, "skipping host router reset\n"); - return; + return false; } iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET); @@ -1244,12 +1244,14 @@ static void nhi_reset(struct tb_nhi *nhi) val = ioread32(nhi->iobase + REG_RESET); if (!(val & REG_RESET_HRR)) { dev_warn(&nhi->pdev->dev, "host router reset successful\n"); - return; + return true; } usleep_range(10, 20); } while (ktime_before(ktime_get(), timeout)); dev_warn(&nhi->pdev->dev, "timeout resetting host router\n"); + + return false; } static int nhi_init_msi(struct tb_nhi *nhi) @@ -1331,6 +1333,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct device *dev = &pdev->dev; struct tb_nhi *nhi; struct tb *tb; + bool reset; int res; if (!nhi_imr_valid(pdev)) @@ -1365,7 +1368,11 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) nhi_check_quirks(nhi); nhi_check_iommu(nhi); - nhi_reset(nhi); + /* + * Only USB4 v2 hosts support host reset so if we already did + * that then don't do it again when the domain is initialized. + */ + reset = nhi_reset(nhi) ? false : host_reset; res = nhi_init_msi(nhi); if (res) @@ -1392,7 +1399,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_dbg(dev, "NHI initialized, starting thunderbolt\n"); - res = tb_domain_add(tb); + res = tb_domain_add(tb, reset); if (res) { /* * At this point the RX/TX rings might already have been @@ -1517,6 +1524,10 @@ static struct pci_device_id nhi_ids[] = { .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) }, diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 0f029ce758..7a07c7c1a9 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -90,6 +90,8 @@ extern const struct tb_nhi_ops icl_nhi_ops; #define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21 #define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e #define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d +#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833 +#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834 #define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 091a81bbdb..f760e54cd9 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index, return -ETIMEDOUT; } +/** + * tb_path_deactivate_hop() - Deactivate one path in path config space + * @port: Lane or protocol adapter + * @hop_index: HopID of the path to be cleared + * + * This deactivates or clears a single path config space entry at + * @hop_index. Returns %0 in success and negative errno otherwise. + */ +int tb_path_deactivate_hop(struct tb_port *port, int hop_index) +{ + return __tb_path_deactivate_hop(port, hop_index, true); +} + static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) { int i, res; diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c index e6bfa63b40..e81de9c30e 100644 --- a/drivers/thunderbolt/quirks.c +++ b/drivers/thunderbolt/quirks.c @@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw) } } +static void quirk_block_rpm_in_redrive(struct tb_switch *sw) +{ + sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE; + tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n"); +} + struct tb_quirk { u16 hw_vendor_id; u16 hw_device_id; @@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = { { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000, quirk_usb3_maximum_bandwidth }, /* + * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host + * controllers. + */ + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000, + quirk_block_rpm_in_redrive }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000, + quirk_block_rpm_in_redrive }, + /* * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms. */ { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable }, diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index a3c68c808e..7b086923ce 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port) return __tb_port_enable(port, false); } +static int tb_port_reset(struct tb_port *port) +{ + if (tb_switch_is_usb4(port->sw)) + return port->cap_usb4 ? usb4_port_reset(port) : 0; + return tb_lc_reset_port(port); +} + /* * tb_init_port() - initialize a port * @@ -941,22 +948,6 @@ int tb_port_get_link_generation(struct tb_port *port) } } -static const char *width_name(enum tb_link_width width) -{ - switch (width) { - case TB_LINK_WIDTH_SINGLE: - return "symmetric, single lane"; - case TB_LINK_WIDTH_DUAL: - return "symmetric, dual lanes"; - case TB_LINK_WIDTH_ASYM_TX: - return "asymmetric, 3 transmitters, 1 receiver"; - case TB_LINK_WIDTH_ASYM_RX: - return "asymmetric, 3 receivers, 1 transmitter"; - default: - return "unknown"; - } -} - /** * tb_port_get_link_width() - Get current link width * @port: Port to check (USB4 or CIO) @@ -1550,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) regs->__unknown1, regs->__unknown4); } +static int tb_switch_reset_host(struct tb_switch *sw) +{ + if (sw->generation > 1) { + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + int i, ret; + + /* + * For lane adapters we issue downstream port + * reset and clear up path config spaces. + * + * For protocol adapters we disable the path and + * clear path config space one by one (from 8 to + * Max Input HopID of the adapter). + */ + if (tb_port_is_null(port) && !tb_is_upstream_port(port)) { + ret = tb_port_reset(port); + if (ret) + return ret; + } else if (tb_port_is_usb3_down(port) || + tb_port_is_usb3_up(port)) { + tb_usb3_port_enable(port, false); + } else if (tb_port_is_dpin(port) || + tb_port_is_dpout(port)) { + tb_dp_port_enable(port, false); + } else if (tb_port_is_pcie_down(port) || + tb_port_is_pcie_up(port)) { + tb_pci_port_enable(port, false); + } else { + continue; + } + + /* Cleanup path config space of protocol adapter */ + for (i = TB_PATH_MIN_HOPID; + i <= port->config.max_in_hop_id; i++) { + ret = tb_path_deactivate_hop(port, i); + if (ret) + return ret; + } + } + } else { + struct tb_cfg_result res; + + /* Thunderbolt 1 uses the "reset" config space packet */ + res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, + TB_CFG_SWITCH, 2, 2); + if (res.err) + return res.err; + res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); + if (res.err > 0) + return -EIO; + else if (res.err < 0) + return res.err; + } + + return 0; +} + +static int tb_switch_reset_device(struct tb_switch *sw) +{ + return tb_port_reset(tb_switch_downstream_port(sw)); +} + +static bool tb_switch_enumerated(struct tb_switch *sw) +{ + u32 val; + int ret; + + /* + * Read directly from the hardware because we use this also + * during system sleep where sw->config.enabled is already set + * by us. + */ + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1); + if (ret) + return false; + + return !!(val & ROUTER_CS_3_V); +} + /** - * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET - * @sw: Switch to reset + * tb_switch_reset() - Perform reset to the router + * @sw: Router to reset * - * Return: Returns 0 on success or an error code on failure. + * Issues reset to the router @sw. Can be used for any router. For host + * routers, resets all the downstream ports and cleans up path config + * spaces accordingly. For device routers issues downstream port reset + * through the parent router, so as side effect there will be unplug + * soon after this is finished. + * + * If the router is not enumerated does nothing. + * + * Returns %0 on success or negative errno in case of failure. */ int tb_switch_reset(struct tb_switch *sw) { - struct tb_cfg_result res; + int ret; - if (sw->generation > 1) + /* + * We cannot access the port config spaces unless the router is + * already enumerated. If the router is not enumerated it is + * equal to being reset so we can skip that here. + */ + if (!tb_switch_enumerated(sw)) return 0; - tb_sw_dbg(sw, "resetting switch\n"); + tb_sw_dbg(sw, "resetting\n"); + + if (tb_route(sw)) + ret = tb_switch_reset_device(sw); + else + ret = tb_switch_reset_host(sw); + + if (ret) + tb_sw_warn(sw, "failed to reset\n"); - res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, - TB_CFG_SWITCH, 2, 2); - if (res.err) - return res.err; - res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); - if (res.err > 0) - return -EIO; - return res.err; + return ret; } /** @@ -2772,7 +2858,7 @@ static void tb_switch_link_init(struct tb_switch *sw) return; tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); - tb_sw_dbg(sw, "current link width %s\n", width_name(sw->link_width)); + tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width)); bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; @@ -2792,6 +2878,19 @@ static void tb_switch_link_init(struct tb_switch *sw) if (down->dual_link_port) down->dual_link_port->bonded = bonded; tb_port_update_credits(down); + + if (tb_port_get_link_generation(up) < 4) + return; + + /* + * Set the Gen 4 preferred link width. This is what the router + * prefers when the link is brought up. If the router does not + * support asymmetric link configuration, this also will be set + * to TB_LINK_WIDTH_DUAL. + */ + sw->preferred_link_width = sw->link_width; + tb_sw_dbg(sw, "preferred link width %s\n", + tb_width_name(sw->preferred_link_width)); } /** @@ -3032,7 +3131,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) tb_switch_update_link_attributes(sw); - tb_sw_dbg(sw, "link width set to %s\n", width_name(width)); + tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width)); return ret; } @@ -3081,22 +3180,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw) { struct tb_port *up, *down; - if (sw->is_unplugged) - return; if (!tb_route(sw) || tb_switch_is_icm(sw)) return; + /* + * Unconfigure downstream port so that wake-on-connect can be + * configured after router unplug. No need to unconfigure upstream port + * since its router is unplugged. + */ up = tb_upstream_port(sw); - if (tb_switch_is_usb4(up->sw)) - usb4_port_unconfigure(up); - else - tb_lc_unconfigure_port(up); - down = up->remote; if (tb_switch_is_usb4(down->sw)) usb4_port_unconfigure(down); else tb_lc_unconfigure_port(down); + + if (sw->is_unplugged) + return; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + usb4_port_unconfigure(up); + else + tb_lc_unconfigure_port(up); } static void tb_switch_credits_init(struct tb_switch *sw) @@ -3342,7 +3448,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) return tb_lc_set_wake(sw, flags); } -int tb_switch_resume(struct tb_switch *sw) +static void tb_switch_check_wakes(struct tb_switch *sw) +{ + if (device_may_wakeup(&sw->dev)) { + if (tb_switch_is_usb4(sw)) + usb4_switch_check_wakes(sw); + } +} + +/** + * tb_switch_resume() - Resume a switch after sleep + * @sw: Switch to resume + * @runtime: Is this resume from runtime suspend or system sleep + * + * Resumes and re-enumerates router (and all its children), if still plugged + * after suspend. Don't enumerate device router whose UID was changed during + * suspend. If this is resume from system sleep, notifies PM core about the + * wakes occurred during suspend. Disables all wakes, except USB4 wake of + * upstream port for USB4 routers that shall be always enabled. + */ +int tb_switch_resume(struct tb_switch *sw, bool runtime) { struct tb_port *port; int err; @@ -3391,6 +3516,9 @@ int tb_switch_resume(struct tb_switch *sw) if (err) return err; + if (!runtime) + tb_switch_check_wakes(sw); + /* Disable wakes */ tb_switch_set_wake(sw, 0); @@ -3420,7 +3548,8 @@ int tb_switch_resume(struct tb_switch *sw) */ if (tb_port_unlock(port)) tb_port_warn(port, "failed to unlock port\n"); - if (port->remote && tb_switch_resume(port->remote->sw)) { + if (port->remote && + tb_switch_resume(port->remote->sw, runtime)) { tb_port_warn(port, "lost during suspend, disconnecting\n"); tb_sw_set_unplugged(port->remote->sw); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index fd49f86e03..525f515e8b 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -513,8 +513,6 @@ static void tb_port_unconfigure_xdomain(struct tb_port *port) usb4_port_unconfigure_xdomain(port); else tb_lc_unconfigure_xdomain(port); - - tb_port_enable(port->dual_link_port); } static void tb_scan_xdomain(struct tb_port *port) @@ -1087,15 +1085,14 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, int requested_up, int requested_down) { + bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; - bool clx, downstream; struct tb_port *up; int ret = 0; if (!asym_threshold) return 0; - /* Disable CL states before doing any transitions */ downstream = tb_port_path_direction_downstream(src_port, dst_port); /* Pick up router deepest in the hierarchy */ if (downstream) @@ -1103,11 +1100,10 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, else sw = src_port->sw; - clx = tb_disable_clx(sw); - tb_for_each_upstream_port_on_path(src_port, dst_port, up) { + struct tb_port *down = tb_switch_downstream_port(up->sw); + enum tb_link_width width_up, width_down; int consumed_up, consumed_down; - enum tb_link_width width; ret = tb_consumed_dp_bandwidth(tb, src_port, dst_port, up, &consumed_up, &consumed_down); @@ -1128,7 +1124,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, if (consumed_down + requested_down < asym_threshold) continue; - width = TB_LINK_WIDTH_ASYM_RX; + width_up = TB_LINK_WIDTH_ASYM_RX; + width_down = TB_LINK_WIDTH_ASYM_TX; } else { /* Upstream, the opposite of above */ if (consumed_down + requested_down >= TB_ASYM_MIN) { @@ -1138,22 +1135,34 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, if (consumed_up + requested_up < asym_threshold) continue; - width = TB_LINK_WIDTH_ASYM_TX; + width_up = TB_LINK_WIDTH_ASYM_TX; + width_down = TB_LINK_WIDTH_ASYM_RX; } - if (up->sw->link_width == width) + if (up->sw->link_width == width_up) continue; - if (!tb_port_width_supported(up, width)) + if (!tb_port_width_supported(up, width_up) || + !tb_port_width_supported(down, width_down)) continue; + /* + * Disable CL states before doing any transitions. We + * delayed it until now that we know there is a real + * transition taking place. + */ + if (!clx_disabled) { + clx = tb_disable_clx(sw); + clx_disabled = true; + } + tb_sw_dbg(up->sw, "configuring asymmetric link\n"); /* * Here requested + consumed > threshold so we need to * transtion the link into asymmetric now. */ - ret = tb_switch_set_link_width(up->sw, width); + ret = tb_switch_set_link_width(up->sw, width_up); if (ret) { tb_sw_warn(up->sw, "failed to set link width\n"); break; @@ -1174,24 +1183,24 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port, * @dst_port: Destination adapter * @requested_up: New lower bandwidth request upstream (Mb/s) * @requested_down: New lower bandwidth request downstream (Mb/s) + * @keep_asym: Keep asymmetric link if preferred * * Goes over each link from @src_port to @dst_port and tries to * transition the link to symmetric if the currently consumed bandwidth - * allows. + * allows and link asymmetric preference is ignored (if @keep_asym is %false). */ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, struct tb_port *dst_port, int requested_up, - int requested_down) + int requested_down, bool keep_asym) { + bool clx = false, clx_disabled = false, downstream; struct tb_switch *sw; - bool clx, downstream; struct tb_port *up; int ret = 0; if (!asym_threshold) return 0; - /* Disable CL states before doing any transitions */ downstream = tb_port_path_direction_downstream(src_port, dst_port); /* Pick up router deepest in the hierarchy */ if (downstream) @@ -1199,8 +1208,6 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, else sw = src_port->sw; - clx = tb_disable_clx(sw); - tb_for_each_upstream_port_on_path(src_port, dst_port, up) { int consumed_up, consumed_down; @@ -1233,6 +1240,25 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port, if (up->sw->link_width == TB_LINK_WIDTH_DUAL) continue; + /* + * Here consumed < threshold so we can transition the + * link to symmetric. + * + * However, if the router prefers asymmetric link we + * honor that (unless @keep_asym is %false). + */ + if (keep_asym && + up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { + tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); + continue; + } + + /* Disable CL states before doing any transitions */ + if (!clx_disabled) { + clx = tb_disable_clx(sw); + clx_disabled = true; + } + tb_sw_dbg(up->sw, "configuring symmetric link\n"); ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); @@ -1280,7 +1306,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up, struct tb_port *host_port; host_port = tb_port_at(tb_route(sw), tb->root_switch); - tb_configure_sym(tb, host_port, up, 0, 0); + tb_configure_sym(tb, host_port, up, 0, 0, false); } /* Set the link configured */ @@ -1465,7 +1491,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) * If bandwidth on a link is < asym_threshold * transition the link to symmetric. */ - tb_configure_sym(tb, src_port, dst_port, 0, 0); + tb_configure_sym(tb, src_port, dst_port, 0, 0, true); /* Now we can allow the domain to runtime suspend again */ pm_runtime_mark_last_busy(&dst_port->sw->dev); pm_runtime_put_autosuspend(&dst_port->sw->dev); @@ -1691,6 +1717,12 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) continue; } + /* Needs to be on different routers */ + if (in->sw == port->sw) { + tb_port_dbg(port, "skipping DP OUT on same router\n"); + continue; + } + tb_port_dbg(port, "DP OUT available\n"); /* @@ -1861,6 +1893,49 @@ static void tb_tunnel_dp(struct tb *tb) ; } +static void tb_enter_redrive(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + + if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) + return; + + /* + * If we get hot-unplug for the DP IN port of the host router + * and the DP resource is not available anymore it means there + * is a monitor connected directly to the Type-C port and we are + * in "redrive" mode. For this to work we cannot enter RTD3 so + * we bump up the runtime PM reference count here. + */ + if (!tb_port_is_dpin(port)) + return; + if (tb_route(sw)) + return; + if (!tb_switch_query_dp_resource(sw, port)) { + port->redrive = true; + pm_runtime_get(&sw->dev); + tb_port_dbg(port, "enter redrive mode, keeping powered\n"); + } +} + +static void tb_exit_redrive(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + + if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) + return; + + if (!tb_port_is_dpin(port)) + return; + if (tb_route(sw)) + return; + if (port->redrive && tb_switch_query_dp_resource(sw, port)) { + port->redrive = false; + pm_runtime_put(&sw->dev); + tb_port_dbg(port, "exit redrive mode\n"); + } +} + static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) { struct tb_port *in, *out; @@ -1877,7 +1952,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) } tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); - tb_deactivate_and_free_tunnel(tunnel); + if (tunnel) + tb_deactivate_and_free_tunnel(tunnel); + else + tb_enter_redrive(port); list_del_init(&port->list); /* @@ -1901,9 +1979,10 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) return; } - tb_port_dbg(port, "DP %s resource available\n", + tb_port_dbg(port, "DP %s resource available after hotplug\n", tb_port_is_dpin(port) ? "IN" : "OUT"); list_add_tail(&port->list, &tcm->dp_resources); + tb_exit_redrive(port); /* Look for suitable DP IN <-> DP OUT pairs now */ tb_tunnel_dp(tb); @@ -2287,7 +2366,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, * If bandwidth on a link is < asym_threshold transition * the link to symmetric. */ - tb_configure_sym(tb, in, out, *requested_up, *requested_down); + tb_configure_sym(tb, in, out, *requested_up, *requested_down, true); /* * If requested bandwidth is less or equal than what is * currently allocated to that tunnel we simply change @@ -2330,7 +2409,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, ret = tb_configure_asym(tb, in, out, *requested_up, *requested_down); if (ret) { - tb_configure_sym(tb, in, out, 0, 0); + tb_configure_sym(tb, in, out, 0, 0, true); return ret; } @@ -2338,7 +2417,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, requested_down); if (ret) { tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n"); - tb_configure_sym(tb, in, out, 0, 0); + tb_configure_sym(tb, in, out, 0, 0, true); } } else { ret = -ENOBUFS; @@ -2555,7 +2634,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data) return 0; } -static int tb_start(struct tb *tb) +static int tb_start(struct tb *tb, bool reset) { struct tb_cm *tcm = tb_priv(tb); int ret; @@ -2596,12 +2675,24 @@ static int tb_start(struct tb *tb) tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES); /* Enable TMU if it is off */ tb_switch_tmu_enable(tb->root_switch); - /* Full scan to discover devices added before the driver was loaded. */ - tb_scan_switch(tb->root_switch); - /* Find out tunnels created by the boot firmware */ - tb_discover_tunnels(tb); - /* Add DP resources from the DP tunnels created by the boot firmware */ - tb_discover_dp_resources(tb); + + /* + * Boot firmware might have created tunnels of its own. Since we + * cannot be sure they are usable for us, tear them down and + * reset the ports to handle it as new hotplug for USB4 v1 + * routers (for USB4 v2 and beyond we already do host reset). + */ + if (reset && usb4_switch_version(tb->root_switch) == 1) { + tb_switch_reset(tb->root_switch); + } else { + /* Full scan to discover devices added before the driver was loaded. */ + tb_scan_switch(tb->root_switch); + /* Find out tunnels created by the boot firmware */ + tb_discover_tunnels(tb); + /* Add DP resources from the DP tunnels created by the boot firmware */ + tb_discover_dp_resources(tb); + } + /* * If the boot firmware did not create USB 3.x tunnels create them * now for the whole topology. @@ -2672,10 +2763,14 @@ static int tb_resume_noirq(struct tb *tb) tb_dbg(tb, "resuming...\n"); - /* remove any pci devices the firmware might have setup */ - tb_switch_reset(tb->root_switch); + /* + * For non-USB4 hosts (Apple systems) remove any PCIe devices + * the firmware might have setup. + */ + if (!tb_switch_is_usb4(tb->root_switch)) + tb_switch_reset(tb->root_switch); - tb_switch_resume(tb->root_switch); + tb_switch_resume(tb->root_switch, false); tb_free_invalid_tunnels(tb); tb_free_unplugged_children(tb->root_switch); tb_restore_children(tb->root_switch); @@ -2801,7 +2896,7 @@ static int tb_runtime_resume(struct tb *tb) struct tb_tunnel *tunnel, *n; mutex_lock(&tb->lock); - tb_switch_resume(tb->root_switch); + tb_switch_resume(tb->root_switch, true); tb_free_invalid_tunnels(tb); tb_restore_children(tb->root_switch); list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index e299e53473..7706f8e08c 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -23,6 +23,8 @@ #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) /* Disable CLx if not supported */ #define QUIRK_NO_CLX BIT(1) +/* Need to keep power on while USB4 port is in redrive mode */ +#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2) /** * struct tb_nvm - Structure holding NVM information @@ -125,6 +127,7 @@ struct tb_switch_tmu { * @device_name: Name of the device (or %NULL if not known) * @link_speed: Speed of the link in Gb/s * @link_width: Width of the upstream facing link + * @preferred_link_width: Router preferred link width (only set for Gen 4 links) * @link_usb4: Upstream link is USB4 * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) @@ -178,6 +181,7 @@ struct tb_switch { const char *device_name; unsigned int link_speed; enum tb_link_width link_width; + enum tb_link_width preferred_link_width; bool link_usb4; unsigned int generation; int cap_plug_events; @@ -256,6 +260,7 @@ struct tb_bandwidth_group { * @group_list: The adapter is linked to the group's list of ports through this * @max_bw: Maximum possible bandwidth through this adapter if set to * non-zero. + * @redrive: For DP IN, if true the adapter is in redrive mode. * * In USB4 terminology this structure represents an adapter (protocol or * lane adapter). @@ -284,6 +289,7 @@ struct tb_port { struct tb_bandwidth_group *group; struct list_head group_list; unsigned int max_bw; + bool redrive; }; /** @@ -481,7 +487,7 @@ struct tb_path { */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); - int (*start)(struct tb *tb); + int (*start)(struct tb *tb, bool reset); void (*stop)(struct tb *tb); int (*suspend_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb); @@ -568,6 +574,22 @@ static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) return &sw->ports[port]; } +static inline const char *tb_width_name(enum tb_link_width width) +{ + switch (width) { + case TB_LINK_WIDTH_SINGLE: + return "symmetric, single lane"; + case TB_LINK_WIDTH_DUAL: + return "symmetric, dual lanes"; + case TB_LINK_WIDTH_ASYM_TX: + return "asymmetric, 3 transmitters, 1 receiver"; + case TB_LINK_WIDTH_ASYM_RX: + return "asymmetric, 3 receivers, 1 transmitter"; + default: + return "unknown"; + } +} + /** * tb_port_has_remote() - Does the port have switch connected downstream * @port: Port to check @@ -728,7 +750,7 @@ int tb_xdomain_init(void); void tb_xdomain_exit(void); struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); -int tb_domain_add(struct tb *tb); +int tb_domain_add(struct tb *tb, bool reset); void tb_domain_remove(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb); @@ -795,7 +817,7 @@ int tb_switch_configuration_valid(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw); void tb_switch_suspend(struct tb_switch *sw, bool runtime); -int tb_switch_resume(struct tb_switch *sw); +int tb_switch_resume(struct tb_switch *sw, bool runtime); int tb_switch_reset(struct tb_switch *sw); int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, u32 value, int timeout_msec); @@ -1132,6 +1154,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); +int tb_path_deactivate_hop(struct tb_port *port, int hop_index); bool tb_path_is_invalid(struct tb_path *path); bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port); @@ -1151,6 +1174,7 @@ int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); +int tb_lc_reset_port(struct tb_port *port); int tb_lc_configure_port(struct tb_port *port); void tb_lc_unconfigure_port(struct tb_port *port); int tb_lc_configure_xdomain(struct tb_port *port); @@ -1254,6 +1278,7 @@ static inline bool tb_switch_is_usb4(const struct tb_switch *sw) return usb4_switch_version(sw) > 0; } +void usb4_switch_check_wakes(struct tb_switch *sw); int usb4_switch_setup(struct tb_switch *sw); int usb4_switch_configuration_valid(struct tb_switch *sw); int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); @@ -1283,6 +1308,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw); int usb4_port_unlock(struct tb_port *port); int usb4_port_hotplug_enable(struct tb_port *port); +int usb4_port_reset(struct tb_port *port); int usb4_port_configure(struct tb_port *port); void usb4_port_unconfigure(struct tb_port *port); int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 6f798f6a2b..4e43b47f9f 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -194,6 +194,8 @@ struct tb_regs_switch_header { #define USB4_VERSION_MAJOR_MASK GENMASK(7, 5) #define ROUTER_CS_1 0x01 +#define ROUTER_CS_3 0x03 +#define ROUTER_CS_3_V BIT(31) #define ROUTER_CS_4 0x04 /* Used with the router cmuv field */ #define ROUTER_CS_4_CMUV_V1 0x10 @@ -389,6 +391,7 @@ struct tb_regs_port_header { #define PORT_CS_18_CSA BIT(22) #define PORT_CS_18_TIP BIT(24) #define PORT_CS_19 0x13 +#define PORT_CS_19_DPR BIT(0) #define PORT_CS_19_PC BIT(3) #define PORT_CS_19_PID BIT(4) #define PORT_CS_19_WOC BIT(16) @@ -584,6 +587,9 @@ struct tb_regs_hop { #define TB_LC_POWER 0x740 /* Link controller registers */ +#define TB_LC_PORT_MODE 0x26 +#define TB_LC_PORT_MODE_DPR BIT(0) + #define TB_LC_CS_42 0x2a #define TB_LC_CS_42_USB_PLUGGED BIT(31) diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 11f2aec2a5..9a259c72e5 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -894,7 +894,7 @@ static int tb_switch_tmu_change_mode(struct tb_switch *sw) ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); if (ret) - return ret; + goto out; /* Program the new mode and the downstream router lane adapter */ switch (sw->tmu.mode_request) { diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 7534cd3a81..4f09216b70 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -173,16 +173,28 @@ static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) int ret; /* Only supported of both routers are at least USB4 v2 */ - if (tb_port_get_link_generation(port) < 4) + if ((usb4_switch_version(tunnel->src_port->sw) < 2) || + (usb4_switch_version(tunnel->dst_port->sw) < 2)) + return 0; + + if (enable && tb_port_get_link_generation(port) < 4) return 0; ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); if (ret) return ret; + /* + * Downstream router could be unplugged so disable of encapsulation + * in upstream router is still possible. + */ ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); - if (ret) - return ret; + if (ret) { + if (enable) + return ret; + if (ret != -ENODEV) + return ret; + } tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", str_enabled_disabled(enable)); @@ -199,14 +211,21 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) return res; } - res = tb_pci_port_enable(tunnel->src_port, activate); + if (activate) + res = tb_pci_port_enable(tunnel->dst_port, activate); + else + res = tb_pci_port_enable(tunnel->src_port, activate); if (res) return res; - if (tb_port_is_pcie_up(tunnel->dst_port)) { - res = tb_pci_port_enable(tunnel->dst_port, activate); + + if (activate) { + res = tb_pci_port_enable(tunnel->src_port, activate); if (res) return res; + } else { + /* Downstream router could be unplugged */ + tb_pci_port_enable(tunnel->dst_port, activate); } return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); @@ -1067,8 +1086,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, return 0; } -static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, - int timeout_msec) +static int tb_dp_wait_dprx(struct tb_tunnel *tunnel, int timeout_msec) { ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); struct tb_port *in = tunnel->src_port; @@ -1087,15 +1105,13 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, return ret; if (val & DP_COMMON_CAP_DPRX_DONE) { - *rate = tb_dp_cap_get_rate(val); - *lanes = tb_dp_cap_get_lanes(val); - tb_tunnel_dbg(tunnel, "DPRX read done\n"); return 0; } usleep_range(100, 150); } while (ktime_before(ktime_get(), timeout)); + tb_tunnel_dbg(tunnel, "DPRX read timeout\n"); return -ETIMEDOUT; } @@ -1110,6 +1126,7 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, switch (cap) { case DP_LOCAL_CAP: case DP_REMOTE_CAP: + case DP_COMMON_CAP: break; default: @@ -1179,17 +1196,16 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, /* * Then see if the DPRX negotiation is ready and if yes * return that bandwidth (it may be smaller than the - * reduced one). Otherwise return the remote (possibly - * reduced) caps. + * reduced one). According to VESA spec, the DPRX + * negotiation shall compete in 5 seconds after tunnel + * established. We give it 100ms extra just in case. */ - ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150); - if (ret) { - if (ret == -ETIMEDOUT) - ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, - &rate, &lanes); - if (ret) - return ret; - } + ret = tb_dp_wait_dprx(tunnel, 5100); + if (ret) + return ret; + ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes); + if (ret) + return ret; } else if (sw->generation >= 2) { ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); if (ret) @@ -1313,8 +1329,6 @@ static void tb_dp_dump(struct tb_tunnel *tunnel) "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", rate, lanes, tb_dp_bandwidth(rate, lanes)); - out = tunnel->dst_port; - if (tb_port_read(out, &dp_cap, TB_CFG_PORT, out->cap_adap + DP_LOCAL_CAP, 1)) return; diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 1515eff8cc..a74c9ea67b 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -155,7 +155,13 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, tx_dwords, rx_data, rx_dwords); } -static void usb4_switch_check_wakes(struct tb_switch *sw) +/** + * usb4_switch_check_wakes() - Check for wakes and notify PM core about them + * @sw: Router whose wakes to check + * + * Checks wakes occurred during suspend and notify the PM core about them. + */ +void usb4_switch_check_wakes(struct tb_switch *sw) { bool wakeup_usb4 = false; struct usb4_port *usb4; @@ -163,9 +169,6 @@ static void usb4_switch_check_wakes(struct tb_switch *sw) bool wakeup = false; u32 val; - if (!device_may_wakeup(&sw->dev)) - return; - if (tb_route(sw)) { if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) return; @@ -244,8 +247,6 @@ int usb4_switch_setup(struct tb_switch *sw) u32 val = 0; int ret; - usb4_switch_check_wakes(sw); - if (!tb_route(sw)) return 0; @@ -1113,6 +1114,45 @@ int usb4_port_hotplug_enable(struct tb_port *port) return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); } +/** + * usb4_port_reset() - Issue downstream port reset + * @port: USB4 port to reset + * + * Issues downstream port reset to @port. + */ +int usb4_port_reset(struct tb_port *port) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + val |= PORT_CS_19_DPR; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + fsleep(10000); + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + val &= ~PORT_CS_19_DPR; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + static int usb4_port_set_configured(struct tb_port *port, bool configured) { int ret; diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 9803f0bbf2..9495742913 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -1462,6 +1462,11 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd) tb_port_disable(port->dual_link_port); } + dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n", + xd->link_speed); + dev_dbg(&xd->dev, "current link width %s\n", + tb_width_name(xd->link_width)); + if (device_add(&xd->dev)) { dev_err(&xd->dev, "failed to add XDomain device\n"); return -ENODEV; @@ -1895,6 +1900,50 @@ struct device_type tb_xdomain_type = { }; EXPORT_SYMBOL_GPL(tb_xdomain_type); +static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down) +{ + if (!down->dual_link_port) + return; + + /* + * Gen 4 links come up already as bonded so only update the port + * structures here. + */ + if (tb_port_get_link_generation(down) >= 4) { + down->bonded = true; + down->dual_link_port->bonded = true; + } else { + xd->bonding_possible = true; + } +} + +static void tb_xdomain_link_exit(struct tb_xdomain *xd) +{ + struct tb_port *down = tb_xdomain_downstream_port(xd); + + if (!down->dual_link_port) + return; + + if (tb_port_get_link_generation(down) >= 4) { + down->bonded = false; + down->dual_link_port->bonded = false; + } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) { + /* + * Just return port structures back to way they were and + * update credits. No need to update userspace because + * the XDomain is removed soon anyway. + */ + tb_port_lane_bonding_disable(down); + tb_port_update_credits(down); + } else if (down->dual_link_port) { + /* + * Re-enable the lane 1 adapter we disabled at the end + * of tb_xdomain_get_properties(). + */ + tb_port_enable(down->dual_link_port); + } +} + /** * tb_xdomain_alloc() - Allocate new XDomain object * @tb: Domain where the XDomain belongs @@ -1945,7 +1994,8 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, goto err_free_local_uuid; } else { xd->needs_uuid = true; - xd->bonding_possible = !!down->dual_link_port; + + tb_xdomain_link_init(xd, down); } device_initialize(&xd->dev); @@ -2014,6 +2064,8 @@ void tb_xdomain_remove(struct tb_xdomain *xd) device_for_each_child_reverse(&xd->dev, xd, unregister_service); + tb_xdomain_link_exit(xd); + /* * Undo runtime PM here explicitly because it is possible that * the XDomain was never added to the bus and thus device_del() |