diff options
Diffstat (limited to 'drivers/thunderbolt')
37 files changed, 35786 insertions, 0 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig new file mode 100644 index 0000000000..448fd2ec8f --- /dev/null +++ b/drivers/thunderbolt/Kconfig @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: GPL-2.0-only +menuconfig USB4 + tristate "Unified support for USB4 and Thunderbolt" + depends on PCI + select APPLE_PROPERTIES if EFI_STUB && X86 + select CRC32 + select CRYPTO + select CRYPTO_HASH + select NVMEM + help + USB4 and Thunderbolt driver. USB4 is the public specification + based on the Thunderbolt 3 protocol. This driver is required if + you want to hotplug Thunderbolt and USB4 compliant devices on + Apple hardware or on PCs with Intel Falcon Ridge or newer. + + To compile this driver a module, choose M here. The module will be + called thunderbolt. + +if USB4 + +config USB4_DEBUGFS_WRITE + bool "Enable write by debugfs to configuration spaces (DANGEROUS)" + help + Enables writing to device configuration registers through + debugfs interface. + + Only enable this if you know what you are doing! Never enable + this for production systems or distro kernels. + +config USB4_DEBUGFS_MARGINING + bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)" + depends on DEBUG_FS + depends on USB4_DEBUGFS_WRITE + help + Enables hardware and software based receiver lane margining support + under each USB4 port. Used for electrical quality and robustness + validation during manufacturing. Should not be enabled by distro + kernels. + +config USB4_KUNIT_TEST + bool "KUnit tests" if !KUNIT_ALL_TESTS + depends on USB4 && KUNIT=y + default KUNIT_ALL_TESTS + +config USB4_DMA_TEST + tristate "DMA traffic test driver" + depends on DEBUG_FS + help + This allows sending and receiving DMA traffic through loopback + connection. Loopback connection can be done by either special + dongle that has TX/RX lines crossed, or by simply connecting a + cable back to the host. Only enable this if you know what you + are doing. Normal users and distro kernels should say N here. + + To compile this driver a module, choose M here. The module will be + called thunderbolt_dma_test. + +endif # USB4 diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile new file mode 100644 index 0000000000..c8b3d7b780 --- /dev/null +++ b/drivers/thunderbolt/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-${CONFIG_USB4} := thunderbolt.o +thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o +thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o +thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o clx.o + +thunderbolt-${CONFIG_ACPI} += acpi.o +thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o +thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o +CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN) + +thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o +obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c new file mode 100644 index 0000000000..c9b6bb4611 --- /dev/null +++ b/drivers/thunderbolt/acpi.c @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI support + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/acpi.h> +#include <linux/pm_runtime.h> + +#include "tb.h" + +static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, + void **ret) +{ + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); + struct fwnode_handle *fwnode; + struct tb_nhi *nhi = data; + struct pci_dev *pdev; + struct device *dev; + + if (!adev) + return AE_OK; + + fwnode = fwnode_find_reference(acpi_fwnode_handle(adev), "usb4-host-interface", 0); + if (IS_ERR(fwnode)) + return AE_OK; + + /* It needs to reference this NHI */ + if (dev_fwnode(&nhi->pdev->dev) != fwnode) + goto out_put; + + /* + * Try to find physical device walking upwards to the hierarcy. + * We need to do this because the xHCI driver might not yet be + * bound so the USB3 SuperSpeed ports are not yet created. + */ + do { + dev = acpi_get_first_physical_node(adev); + if (dev) + break; + + adev = acpi_dev_parent(adev); + } while (adev); + + /* + * Check that the device is PCIe. This is because USB3 + * SuperSpeed ports have this property and they are not power + * managed with the xHCI and the SuperSpeed hub so we create the + * link from xHCI instead. + */ + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + + if (!dev) + goto out_put; + + /* + * Check that this actually matches the type of device we + * expect. It should either be xHCI or PCIe root/downstream + * port. + */ + pdev = to_pci_dev(dev); + if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI || + (pci_is_pcie(pdev) && + (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { + const struct device_link *link; + + /* + * Make them both active first to make sure the NHI does + * not runtime suspend before the consumer. The + * pm_runtime_put() below then allows the consumer to + * runtime suspend again (which then allows NHI runtime + * suspend too now that the device link is established). + */ + pm_runtime_get_sync(&pdev->dev); + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_RPM_ACTIVE | + DL_FLAG_PM_RUNTIME); + if (link) { + dev_dbg(&nhi->pdev->dev, "created link from %s\n", + dev_name(&pdev->dev)); + *(bool *)ret = true; + } else { + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", + dev_name(&pdev->dev)); + } + + pm_runtime_put(&pdev->dev); + } + +out_put: + fwnode_handle_put(fwnode); + return AE_OK; +} + +/** + * tb_acpi_add_links() - Add device links based on ACPI description + * @nhi: Pointer to NHI + * + * Goes over ACPI namespace finding tunneled ports that reference to + * @nhi ACPI node. For each reference a device link is added. The link + * is automatically removed by the driver core. + * + * Returns %true if at least one link was created. + */ +bool tb_acpi_add_links(struct tb_nhi *nhi) +{ + acpi_status status; + bool ret = false; + + if (!has_acpi_companion(&nhi->pdev->dev)) + return false; + + /* + * Find all devices that have usb4-host-controller interface + * property that references to this NHI. + */ + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32, + tb_acpi_add_link, NULL, nhi, (void **)&ret); + if (ACPI_FAILURE(status)) { + dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n"); + return false; + } + + return ret; +} + +/** + * tb_acpi_is_native() - Did the platform grant native TBT/USB4 control + * + * Returns %true if the platform granted OS native control over + * TBT/USB4. In this case software based connection manager can be used, + * otherwise there is firmware based connection manager running. + */ +bool tb_acpi_is_native(void) +{ + return osc_sb_native_usb4_support_confirmed && + osc_sb_native_usb4_control; +} + +/** + * tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform + * + * When software based connection manager is used, this function + * returns %true if platform allows native USB3 tunneling. + */ +bool tb_acpi_may_tunnel_usb3(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_USB3_TUNNELING; + return true; +} + +/** + * tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform + * + * When software based connection manager is used, this function + * returns %true if platform allows native DP tunneling. + */ +bool tb_acpi_may_tunnel_dp(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_DP_TUNNELING; + return true; +} + +/** + * tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform + * + * When software based connection manager is used, this function + * returns %true if platform allows native PCIe tunneling. + */ +bool tb_acpi_may_tunnel_pcie(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_PCIE_TUNNELING; + return true; +} + +/** + * tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed + * + * When software based connection manager is used, this function + * returns %true if platform allows XDomain connections. + */ +bool tb_acpi_is_xdomain_allowed(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_XDOMAIN; + return true; +} + +/* UUID for retimer _DSM: e0053122-795b-4122-8a5e-57be1d26acb3 */ +static const guid_t retimer_dsm_guid = + GUID_INIT(0xe0053122, 0x795b, 0x4122, + 0x8a, 0x5e, 0x57, 0xbe, 0x1d, 0x26, 0xac, 0xb3); + +#define RETIMER_DSM_QUERY_ONLINE_STATE 1 +#define RETIMER_DSM_SET_ONLINE_STATE 2 + +static int tb_acpi_retimer_set_power(struct tb_port *port, bool power) +{ + struct usb4_port *usb4 = port->usb4; + union acpi_object argv4[2]; + struct acpi_device *adev; + union acpi_object *obj; + int ret; + + if (!usb4->can_offline) + return 0; + + adev = ACPI_COMPANION(&usb4->dev); + if (WARN_ON(!adev)) + return 0; + + /* Check if we are already powered on (and in correct mode) */ + obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1, + RETIMER_DSM_QUERY_ONLINE_STATE, NULL, + ACPI_TYPE_INTEGER); + if (!obj) { + tb_port_warn(port, "ACPI: query online _DSM failed\n"); + return -EIO; + } + + ret = obj->integer.value; + ACPI_FREE(obj); + + if (power == ret) + return 0; + + tb_port_dbg(port, "ACPI: calling _DSM to power %s retimers\n", + power ? "on" : "off"); + + argv4[0].type = ACPI_TYPE_PACKAGE; + argv4[0].package.count = 1; + argv4[0].package.elements = &argv4[1]; + argv4[1].integer.type = ACPI_TYPE_INTEGER; + argv4[1].integer.value = power; + + obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1, + RETIMER_DSM_SET_ONLINE_STATE, argv4, + ACPI_TYPE_INTEGER); + if (!obj) { + tb_port_warn(port, + "ACPI: set online state _DSM evaluation failed\n"); + return -EIO; + } + + ret = obj->integer.value; + ACPI_FREE(obj); + + if (ret >= 0) { + if (power) + return ret == 1 ? 0 : -EBUSY; + return 0; + } + + tb_port_warn(port, "ACPI: set online state _DSM failed with error %d\n", ret); + return -EIO; +} + +/** + * tb_acpi_power_on_retimers() - Call platform to power on retimers + * @port: USB4 port + * + * Calls platform to turn on power to all retimers behind this USB4 + * port. After this function returns successfully the caller can + * continue with the normal retimer flows (as specified in the USB4 + * spec). Note if this returns %-EBUSY it means the type-C port is in + * non-USB4/TBT mode (there is non-USB4/TBT device connected). + * + * This should only be called if the USB4/TBT link is not up. + * + * Returns %0 on success. + */ +int tb_acpi_power_on_retimers(struct tb_port *port) +{ + return tb_acpi_retimer_set_power(port, true); +} + +/** + * tb_acpi_power_off_retimers() - Call platform to power off retimers + * @port: USB4 port + * + * This is the opposite of tb_acpi_power_on_retimers(). After returning + * successfully the normal operations with the @port can continue. + * + * Returns %0 on success. + */ +int tb_acpi_power_off_retimers(struct tb_port *port) +{ + return tb_acpi_retimer_set_power(port, false); +} + +static bool tb_acpi_bus_match(struct device *dev) +{ + return tb_is_switch(dev) || tb_is_usb4_port_device(dev); +} + +static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw) +{ + struct tb_switch *parent_sw = tb_switch_parent(sw); + struct acpi_device *adev = NULL; + + /* + * Device routers exists under the downstream facing USB4 port + * of the parent router. Their _ADR is always 0. + */ + if (parent_sw) { + struct tb_port *port = tb_switch_downstream_port(sw); + struct acpi_device *port_adev; + + port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev), + port->port); + if (port_adev) + adev = acpi_find_child_device(port_adev, 0, false); + } else { + struct tb_nhi *nhi = sw->tb->nhi; + struct acpi_device *parent_adev; + + parent_adev = ACPI_COMPANION(&nhi->pdev->dev); + if (parent_adev) + adev = acpi_find_child_device(parent_adev, 0, false); + } + + return adev; +} + +static struct acpi_device *tb_acpi_find_companion(struct device *dev) +{ + /* + * The Thunderbolt/USB4 hierarchy looks like following: + * + * Device (NHI) + * Device (HR) // Host router _ADR == 0 + * Device (DFP0) // Downstream port _ADR == lane 0 adapter + * Device (DR) // Device router _ADR == 0 + * Device (UFP) // Upstream port _ADR == lane 0 adapter + * Device (DFP1) // Downstream port _ADR == lane 0 adapter number + * + * At the moment we bind the host router to the corresponding + * Linux device. + */ + if (tb_is_switch(dev)) + return tb_acpi_switch_find_companion(tb_to_switch(dev)); + if (tb_is_usb4_port_device(dev)) + return acpi_find_child_by_adr(ACPI_COMPANION(dev->parent), + tb_to_usb4_port_device(dev)->port->port); + return NULL; +} + +static void tb_acpi_setup(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + + if (!adev || !usb4) + return; + + if (acpi_check_dsm(adev->handle, &retimer_dsm_guid, 1, + BIT(RETIMER_DSM_QUERY_ONLINE_STATE) | + BIT(RETIMER_DSM_SET_ONLINE_STATE))) + usb4->can_offline = true; +} + +static struct acpi_bus_type tb_acpi_bus = { + .name = "thunderbolt", + .match = tb_acpi_bus_match, + .find_companion = tb_acpi_find_companion, + .setup = tb_acpi_setup, +}; + +int tb_acpi_init(void) +{ + return register_acpi_bus_type(&tb_acpi_bus); +} + +void tb_acpi_exit(void) +{ + unregister_acpi_bus_type(&tb_acpi_bus); +} diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c new file mode 100644 index 0000000000..8ecd610c62 --- /dev/null +++ b/drivers/thunderbolt/cap.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - capabilities lookup + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#include <linux/slab.h> +#include <linux/errno.h> + +#include "tb.h" + +#define CAP_OFFSET_MAX 0xff +#define VSE_CAP_OFFSET_MAX 0xffff +#define TMU_ACCESS_EN BIT(20) + +static int tb_port_enable_tmu(struct tb_port *port, bool enable) +{ + struct tb_switch *sw = port->sw; + u32 value, offset; + int ret; + + /* + * Legacy devices need to have TMU access enabled before port + * space can be fully accessed. + */ + if (tb_switch_is_light_ridge(sw)) + offset = 0x26; + else if (tb_switch_is_eagle_ridge(sw)) + offset = 0x2a; + else + return 0; + + ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (enable) + value |= TMU_ACCESS_EN; + else + value &= ~TMU_ACCESS_EN; + + return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); +} + +static void tb_port_dummy_read(struct tb_port *port) +{ + /* + * When reading from next capability pointer location in port + * config space the read data is not cleared on LR. To avoid + * reading stale data on next read perform one dummy read after + * port capabilities are walked. + */ + if (tb_switch_is_light_ridge(port->sw)) { + u32 dummy; + + tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1); + } +} + +/** + * tb_port_next_cap() - Return next capability in the linked list + * @port: Port to find the capability for + * @offset: Previous capability offset (%0 for start) + * + * Returns dword offset of the next capability in port config space + * capability list and returns it. Passing %0 returns the first entry in + * the capability list. If no next capability is found returns %0. In case + * of failure returns negative errno. + */ +int tb_port_next_cap(struct tb_port *port, unsigned int offset) +{ + struct tb_cap_any header; + int ret; + + if (!offset) + return port->config.first_cap_offset; + + ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); + if (ret) + return ret; + + return header.basic.next; +} + +static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) +{ + int offset = 0; + + do { + struct tb_cap_any header; + int ret; + + offset = tb_port_next_cap(port, offset); + if (offset < 0) + return offset; + + ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); + if (ret) + return ret; + + if (header.basic.cap == cap) + return offset; + } while (offset > 0); + + return -ENOENT; +} + +/** + * tb_port_find_cap() - Find port capability + * @port: Port to find the capability for + * @cap: Capability to look + * + * Returns offset to start of capability or %-ENOENT if no such + * capability was found. Negative errno is returned if there was an + * error. + */ +int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) +{ + int ret; + + ret = tb_port_enable_tmu(port, true); + if (ret) + return ret; + + ret = __tb_port_find_cap(port, cap); + + tb_port_dummy_read(port); + tb_port_enable_tmu(port, false); + + return ret; +} + +/** + * tb_switch_next_cap() - Return next capability in the linked list + * @sw: Switch to find the capability for + * @offset: Previous capability offset (%0 for start) + * + * Finds dword offset of the next capability in router config space + * capability list and returns it. Passing %0 returns the first entry in + * the capability list. If no next capability is found returns %0. In case + * of failure returns negative errno. + */ +int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset) +{ + struct tb_cap_any header; + int ret; + + if (!offset) + return sw->config.first_cap_offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); + if (ret) + return ret; + + switch (header.basic.cap) { + case TB_SWITCH_CAP_TMU: + ret = header.basic.next; + break; + + case TB_SWITCH_CAP_VSE: + if (!header.extended_short.length) + ret = header.extended_long.next; + else + ret = header.extended_short.next; + break; + + default: + tb_sw_dbg(sw, "unknown capability %#x at %#x\n", + header.basic.cap, offset); + ret = -EINVAL; + break; + } + + return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret; +} + +/** + * tb_switch_find_cap() - Find switch capability + * @sw: Switch to find the capability for + * @cap: Capability to look + * + * Returns offset to start of capability or %-ENOENT if no such + * capability was found. Negative errno is returned if there was an + * error. + */ +int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) +{ + int offset = 0; + + do { + struct tb_cap_any header; + int ret; + + offset = tb_switch_next_cap(sw, offset); + if (offset < 0) + return offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (header.basic.cap == cap) + return offset; + } while (offset); + + return -ENOENT; +} + +/** + * tb_switch_find_vse_cap() - Find switch vendor specific capability + * @sw: Switch to find the capability for + * @vsec: Vendor specific capability to look + * + * Functions enumerates vendor specific capabilities (VSEC) of a switch + * and returns offset when capability matching @vsec is found. If no + * such capability is found returns %-ENOENT. In case of error returns + * negative errno. + */ +int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec) +{ + int offset = 0; + + do { + struct tb_cap_any header; + int ret; + + offset = tb_switch_next_cap(sw, offset); + if (offset < 0) + return offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (header.extended_short.cap == TB_SWITCH_CAP_VSE && + header.extended_short.vsec_id == vsec) + return offset; + } while (offset); + + return -ENOENT; +} diff --git a/drivers/thunderbolt/clx.c b/drivers/thunderbolt/clx.c new file mode 100644 index 0000000000..13d217ae98 --- /dev/null +++ b/drivers/thunderbolt/clx.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * CLx support + * + * Copyright (C) 2020 - 2023, Intel Corporation + * Authors: Gil Fine <gil.fine@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/module.h> + +#include "tb.h" + +static bool clx_enabled = true; +module_param_named(clx, clx_enabled, bool, 0444); +MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); + +static const char *clx_name(unsigned int clx) +{ + switch (clx) { + case TB_CL0S | TB_CL1 | TB_CL2: + return "CL0s/CL1/CL2"; + case TB_CL1 | TB_CL2: + return "CL1/CL2"; + case TB_CL0S | TB_CL2: + return "CL0s/CL2"; + case TB_CL0S | TB_CL1: + return "CL0s/CL1"; + case TB_CL0S: + return "CL0s"; + case 0: + return "disabled"; + default: + return "unknown"; + } +} + +static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary) +{ + u32 phy; + int ret; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (secondary) + phy |= LANE_ADP_CS_1_PMS; + else + phy &= ~LANE_ADP_CS_1_PMS; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_pm_secondary_enable(struct tb_port *port) +{ + return tb_port_pm_secondary_set(port, true); +} + +static int tb_port_pm_secondary_disable(struct tb_port *port) +{ + return tb_port_pm_secondary_set(port, false); +} + +/* Called for USB4 or Titan Ridge routers only */ +static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx) +{ + u32 val, mask = 0; + bool ret; + + /* Don't enable CLx in case of two single-lane links */ + if (!port->bonded && port->dual_link_port) + return false; + + /* Don't enable CLx in case of inter-domain link */ + if (port->xdomain) + return false; + + if (tb_switch_is_usb4(port->sw)) { + if (!usb4_port_clx_supported(port)) + return false; + } else if (!tb_lc_is_clx_supported(port)) { + return false; + } + + if (clx & TB_CL0S) + mask |= LANE_ADP_CS_0_CL0S_SUPPORT; + if (clx & TB_CL1) + mask |= LANE_ADP_CS_0_CL1_SUPPORT; + if (clx & TB_CL2) + mask |= LANE_ADP_CS_0_CL2_SUPPORT; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return false; + + return !!(val & mask); +} + +static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable) +{ + u32 phy, mask = 0; + int ret; + + if (clx & TB_CL0S) + mask |= LANE_ADP_CS_1_CL0S_ENABLE; + if (clx & TB_CL1) + mask |= LANE_ADP_CS_1_CL1_ENABLE; + if (clx & TB_CL2) + mask |= LANE_ADP_CS_1_CL2_ENABLE; + + if (!mask) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (enable) + phy |= mask; + else + phy &= ~mask; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_clx_disable(struct tb_port *port, unsigned int clx) +{ + return tb_port_clx_set(port, clx, false); +} + +static int tb_port_clx_enable(struct tb_port *port, unsigned int clx) +{ + return tb_port_clx_set(port, clx, true); +} + +static int tb_port_clx(struct tb_port *port) +{ + u32 val; + int ret; + + if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2)) + return 0; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (val & LANE_ADP_CS_1_CL0S_ENABLE) + ret |= TB_CL0S; + if (val & LANE_ADP_CS_1_CL1_ENABLE) + ret |= TB_CL1; + if (val & LANE_ADP_CS_1_CL2_ENABLE) + ret |= TB_CL2; + + return ret; +} + +/** + * tb_port_clx_is_enabled() - Is given CL state enabled + * @port: USB4 port to check + * @clx: Mask of CL states to check + * + * Returns true if any of the given CL states is enabled for @port. + */ +bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx) +{ + return !!(tb_port_clx(port) & clx); +} + +/** + * tb_switch_clx_init() - Initialize router CL states + * @sw: Router + * + * Can be called for any router. Initializes the current CL state by + * reading it from the hardware. + * + * Returns %0 in case of success and negative errno in case of failure. + */ +int tb_switch_clx_init(struct tb_switch *sw) +{ + struct tb_port *up, *down; + unsigned int clx, tmp; + + if (tb_switch_is_icm(sw)) + return 0; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_clx_is_supported(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + clx = tb_port_clx(up); + tmp = tb_port_clx(down); + if (clx != tmp) + tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n", + clx, tmp); + + tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx)); + + sw->clx = clx; + return 0; +} + +static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + ret = tb_port_pm_secondary_enable(up); + if (ret) + return ret; + + return tb_port_pm_secondary_disable(down); +} + +static int tb_switch_mask_clx_objections(struct tb_switch *sw) +{ + int up_port = sw->config.upstream_port_number; + u32 offset, val[2], mask_obj, unmask_obj; + int ret, i; + + /* Only Titan Ridge of pre-USB4 devices support CLx states */ + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + if (!tb_route(sw)) + return 0; + + /* + * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: + * Port A consists of lane adapters 1,2 and + * Port B consists of lane adapters 3,4 + * If upstream port is A, (lanes are 1,2), we mask objections from + * port B (lanes 3,4) and unmask objections from Port A and vice-versa. + */ + if (up_port == 1) { + mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + offset = TB_LOW_PWR_C1_CL1; + } else { + mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + offset = TB_LOW_PWR_C3_CL1; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(val); i++) { + val[i] |= mask_obj; + val[i] &= ~unmask_obj; + } + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); +} + +/** + * tb_switch_clx_is_supported() - Is CLx supported on this type of router + * @sw: The router to check CLx support for + */ +bool tb_switch_clx_is_supported(const struct tb_switch *sw) +{ + if (!clx_enabled) + return false; + + if (sw->quirks & QUIRK_NO_CLX) + return false; + + /* + * CLx is not enabled and validated on Intel USB4 platforms + * before Alder Lake. + */ + if (tb_switch_is_tiger_lake(sw)) + return false; + + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); +} + +static bool validate_mask(unsigned int clx) +{ + /* Previous states need to be enabled */ + if (clx & TB_CL1) + return (clx & TB_CL0S) == TB_CL0S; + return true; +} + +/** + * tb_switch_clx_enable() - Enable CLx on upstream port of specified router + * @sw: Router to enable CLx for + * @clx: The CLx state to enable + * + * CLx is enabled only if both sides of the link support CLx, and if both sides + * of the link are not configured as two single lane links and only if the link + * is not inter-domain link. The complete set of conditions is described in CM + * Guide 1.0 section 8.1. + * + * Returns %0 on success or an error code on failure. + */ +int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx) +{ + bool up_clx_support, down_clx_support; + struct tb_switch *parent_sw; + struct tb_port *up, *down; + int ret; + + if (!clx || sw->clx == clx) + return 0; + + if (!validate_mask(clx)) + return -EINVAL; + + parent_sw = tb_switch_parent(sw); + if (!parent_sw) + return 0; + + if (!tb_switch_clx_is_supported(parent_sw) || + !tb_switch_clx_is_supported(sw)) + return 0; + + /* Only support CL2 for v2 routers */ + if ((clx & TB_CL2) && + (usb4_switch_version(parent_sw) < 2 || + usb4_switch_version(sw) < 2)) + return -EOPNOTSUPP; + + ret = tb_switch_pm_secondary_resolve(sw); + if (ret) + return ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + up_clx_support = tb_port_clx_supported(up, clx); + down_clx_support = tb_port_clx_supported(down, clx); + + tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx), + up_clx_support ? "" : "not "); + tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx), + down_clx_support ? "" : "not "); + + if (!up_clx_support || !down_clx_support) + return -EOPNOTSUPP; + + ret = tb_port_clx_enable(up, clx); + if (ret) + return ret; + + ret = tb_port_clx_enable(down, clx); + if (ret) { + tb_port_clx_disable(up, clx); + return ret; + } + + ret = tb_switch_mask_clx_objections(sw); + if (ret) { + tb_port_clx_disable(up, clx); + tb_port_clx_disable(down, clx); + return ret; + } + + sw->clx |= clx; + + tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx)); + return 0; +} + +/** + * tb_switch_clx_disable() - Disable CLx on upstream port of specified router + * @sw: Router to disable CLx for + * + * Disables all CL states of the given router. Can be called on any + * router and if the states were not enabled already does nothing. + * + * Returns the CL states that were disabled or negative errno in case of + * failure. + */ +int tb_switch_clx_disable(struct tb_switch *sw) +{ + unsigned int clx = sw->clx; + struct tb_port *up, *down; + int ret; + + if (!tb_switch_clx_is_supported(sw)) + return 0; + + if (!clx) + return 0; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + ret = tb_port_clx_disable(up, clx); + if (ret) + return ret; + + ret = tb_port_clx_disable(down, clx); + if (ret) + return ret; + + sw->clx = 0; + + tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx)); + return clx; +} diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c new file mode 100644 index 0000000000..d997a4c545 --- /dev/null +++ b/drivers/thunderbolt/ctl.c @@ -0,0 +1,1144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - control channel and configuration commands + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/dmapool.h> +#include <linux/workqueue.h> + +#include "ctl.h" + + +#define TB_CTL_RX_PKG_COUNT 10 +#define TB_CTL_RETRIES 4 + +/** + * struct tb_ctl - Thunderbolt control channel + * @nhi: Pointer to the NHI structure + * @tx: Transmit ring + * @rx: Receive ring + * @frame_pool: DMA pool for control messages + * @rx_packets: Received control messages + * @request_queue_lock: Lock protecting @request_queue + * @request_queue: List of outstanding requests + * @running: Is the control channel running at the moment + * @timeout_msec: Default timeout for non-raw control messages + * @callback: Callback called when hotplug message is received + * @callback_data: Data passed to @callback + */ +struct tb_ctl { + struct tb_nhi *nhi; + struct tb_ring *tx; + struct tb_ring *rx; + + struct dma_pool *frame_pool; + struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; + struct mutex request_queue_lock; + struct list_head request_queue; + bool running; + + int timeout_msec; + event_cb callback; + void *callback_data; +}; + + +#define tb_ctl_WARN(ctl, format, arg...) \ + dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) + +#define tb_ctl_err(ctl, format, arg...) \ + dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) + +#define tb_ctl_warn(ctl, format, arg...) \ + dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) + +#define tb_ctl_info(ctl, format, arg...) \ + dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) + +#define tb_ctl_dbg(ctl, format, arg...) \ + dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg) + +static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue); +/* Serializes access to request kref_get/put */ +static DEFINE_MUTEX(tb_cfg_request_lock); + +/** + * tb_cfg_request_alloc() - Allocates a new config request + * + * This is refcounted object so when you are done with this, call + * tb_cfg_request_put() to it. + */ +struct tb_cfg_request *tb_cfg_request_alloc(void) +{ + struct tb_cfg_request *req; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return NULL; + + kref_init(&req->kref); + + return req; +} + +/** + * tb_cfg_request_get() - Increase refcount of a request + * @req: Request whose refcount is increased + */ +void tb_cfg_request_get(struct tb_cfg_request *req) +{ + mutex_lock(&tb_cfg_request_lock); + kref_get(&req->kref); + mutex_unlock(&tb_cfg_request_lock); +} + +static void tb_cfg_request_destroy(struct kref *kref) +{ + struct tb_cfg_request *req = container_of(kref, typeof(*req), kref); + + kfree(req); +} + +/** + * tb_cfg_request_put() - Decrease refcount and possibly release the request + * @req: Request whose refcount is decreased + * + * Call this function when you are done with the request. When refcount + * goes to %0 the object is released. + */ +void tb_cfg_request_put(struct tb_cfg_request *req) +{ + mutex_lock(&tb_cfg_request_lock); + kref_put(&req->kref, tb_cfg_request_destroy); + mutex_unlock(&tb_cfg_request_lock); +} + +static int tb_cfg_request_enqueue(struct tb_ctl *ctl, + struct tb_cfg_request *req) +{ + WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags)); + WARN_ON(req->ctl); + + mutex_lock(&ctl->request_queue_lock); + if (!ctl->running) { + mutex_unlock(&ctl->request_queue_lock); + return -ENOTCONN; + } + req->ctl = ctl; + list_add_tail(&req->list, &ctl->request_queue); + set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); + mutex_unlock(&ctl->request_queue_lock); + return 0; +} + +static void tb_cfg_request_dequeue(struct tb_cfg_request *req) +{ + struct tb_ctl *ctl = req->ctl; + + mutex_lock(&ctl->request_queue_lock); + list_del(&req->list); + clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); + if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) + wake_up(&tb_cfg_request_cancel_queue); + mutex_unlock(&ctl->request_queue_lock); +} + +static bool tb_cfg_request_is_active(struct tb_cfg_request *req) +{ + return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags); +} + +static struct tb_cfg_request * +tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) +{ + struct tb_cfg_request *req = NULL, *iter; + + mutex_lock(&pkg->ctl->request_queue_lock); + list_for_each_entry(iter, &pkg->ctl->request_queue, list) { + tb_cfg_request_get(iter); + if (iter->match(iter, pkg)) { + req = iter; + break; + } + tb_cfg_request_put(iter); + } + mutex_unlock(&pkg->ctl->request_queue_lock); + + return req; +} + +/* utility functions */ + + +static int check_header(const struct ctl_pkg *pkg, u32 len, + enum tb_cfg_pkg_type type, u64 route) +{ + struct tb_cfg_header *header = pkg->buffer; + + /* check frame, TODO: frame flags */ + if (WARN(len != pkg->frame.size, + "wrong framesize (expected %#x, got %#x)\n", + len, pkg->frame.size)) + return -EIO; + if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", + type, pkg->frame.eof)) + return -EIO; + if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", + pkg->frame.sof)) + return -EIO; + + /* check header */ + if (WARN(header->unknown != 1 << 9, + "header->unknown is %#x\n", header->unknown)) + return -EIO; + if (WARN(route != tb_cfg_get_route(header), + "wrong route (expected %llx, got %llx)", + route, tb_cfg_get_route(header))) + return -EIO; + return 0; +} + +static int check_config_address(struct tb_cfg_address addr, + enum tb_cfg_space space, u32 offset, + u32 length) +{ + if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) + return -EIO; + if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)", + space, addr.space)) + return -EIO; + if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)", + offset, addr.offset)) + return -EIO; + if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", + length, addr.length)) + return -EIO; + /* + * We cannot check addr->port as it is set to the upstream port of the + * sender. + */ + return 0; +} + +static struct tb_cfg_result decode_error(const struct ctl_pkg *response) +{ + struct cfg_error_pkg *pkg = response->buffer; + struct tb_cfg_result res = { 0 }; + res.response_route = tb_cfg_get_route(&pkg->header); + res.response_port = 0; + res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, + tb_cfg_get_route(&pkg->header)); + if (res.err) + return res; + + res.err = 1; + res.tb_error = pkg->error; + res.response_port = pkg->port; + return res; + +} + +static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len, + enum tb_cfg_pkg_type type, u64 route) +{ + struct tb_cfg_header *header = pkg->buffer; + struct tb_cfg_result res = { 0 }; + + if (pkg->frame.eof == TB_CFG_PKG_ERROR) + return decode_error(pkg); + + res.response_port = 0; /* will be updated later for cfg_read/write */ + res.response_route = tb_cfg_get_route(header); + res.err = check_header(pkg, len, type, route); + return res; +} + +static void tb_cfg_print_error(struct tb_ctl *ctl, + const struct tb_cfg_result *res) +{ + WARN_ON(res->err != 1); + switch (res->tb_error) { + case TB_CFG_ERROR_PORT_NOT_CONNECTED: + /* Port is not connected. This can happen during surprise + * removal. Do not warn. */ + return; + case TB_CFG_ERROR_INVALID_CONFIG_SPACE: + /* + * Invalid cfg_space/offset/length combination in + * cfg_read/cfg_write. + */ + tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n", + res->response_route, res->response_port); + return; + case TB_CFG_ERROR_NO_SUCH_PORT: + /* + * - The route contains a non-existent port. + * - The route contains a non-PHY port (e.g. PCIe). + * - The port in cfg_read/cfg_write does not exist. + */ + tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", + res->response_route, res->response_port); + return; + case TB_CFG_ERROR_LOOP: + tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", + res->response_route, res->response_port); + return; + case TB_CFG_ERROR_LOCK: + tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", + res->response_route, res->response_port); + return; + default: + /* 5,6,7,9 and 11 are also valid error codes */ + tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", + res->response_route, res->response_port); + return; + } +} + +static __be32 tb_crc(const void *data, size_t len) +{ + return cpu_to_be32(~__crc32c_le(~0, data, len)); +} + +static void tb_ctl_pkg_free(struct ctl_pkg *pkg) +{ + if (pkg) { + dma_pool_free(pkg->ctl->frame_pool, + pkg->buffer, pkg->frame.buffer_phy); + kfree(pkg); + } +} + +static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) +{ + struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); + if (!pkg) + return NULL; + pkg->ctl = ctl; + pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, + &pkg->frame.buffer_phy); + if (!pkg->buffer) { + kfree(pkg); + return NULL; + } + return pkg; +} + + +/* RX/TX handling */ + +static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); + tb_ctl_pkg_free(pkg); +} + +/* + * tb_cfg_tx() - transmit a packet on the control channel + * + * len must be a multiple of four. + * + * Return: Returns 0 on success or an error code on failure. + */ +static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, + enum tb_cfg_pkg_type type) +{ + int res; + struct ctl_pkg *pkg; + if (len % 4 != 0) { /* required for le->be conversion */ + tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); + return -EINVAL; + } + if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ + tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", + len, TB_FRAME_SIZE - 4); + return -EINVAL; + } + pkg = tb_ctl_pkg_alloc(ctl); + if (!pkg) + return -ENOMEM; + pkg->frame.callback = tb_ctl_tx_callback; + pkg->frame.size = len + 4; + pkg->frame.sof = type; + pkg->frame.eof = type; + cpu_to_be32_array(pkg->buffer, data, len / 4); + *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); + + res = tb_ring_tx(ctl->tx, &pkg->frame); + if (res) /* ring is stopped */ + tb_ctl_pkg_free(pkg); + return res; +} + +/* + * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback + */ +static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, + struct ctl_pkg *pkg, size_t size) +{ + return ctl->callback(ctl->callback_data, type, pkg->buffer, size); +} + +static void tb_ctl_rx_submit(struct ctl_pkg *pkg) +{ + tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* + * We ignore failures during stop. + * All rx packets are referenced + * from ctl->rx_packets, so we do + * not loose them. + */ +} + +static int tb_async_error(const struct ctl_pkg *pkg) +{ + const struct cfg_error_pkg *error = pkg->buffer; + + if (pkg->frame.eof != TB_CFG_PKG_ERROR) + return false; + + switch (error->error) { + case TB_CFG_ERROR_LINK_ERROR: + case TB_CFG_ERROR_HEC_ERROR_DETECTED: + case TB_CFG_ERROR_FLOW_CONTROL_ERROR: + case TB_CFG_ERROR_DP_BW: + case TB_CFG_ERROR_ROP_CMPLT: + case TB_CFG_ERROR_POP_CMPLT: + case TB_CFG_ERROR_PCIE_WAKE: + case TB_CFG_ERROR_DP_CON_CHANGE: + case TB_CFG_ERROR_DPTX_DISCOVERY: + case TB_CFG_ERROR_LINK_RECOVERY: + case TB_CFG_ERROR_ASYM_LINK: + return true; + + default: + return false; + } +} + +static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); + struct tb_cfg_request *req; + __be32 crc32; + + if (canceled) + return; /* + * ring is stopped, packet is referenced from + * ctl->rx_packets. + */ + + if (frame->size < 4 || frame->size % 4 != 0) { + tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", + frame->size); + goto rx; + } + + frame->size -= 4; /* remove checksum */ + crc32 = tb_crc(pkg->buffer, frame->size); + be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); + + switch (frame->eof) { + case TB_CFG_PKG_READ: + case TB_CFG_PKG_WRITE: + case TB_CFG_PKG_ERROR: + case TB_CFG_PKG_OVERRIDE: + case TB_CFG_PKG_RESET: + if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { + tb_ctl_err(pkg->ctl, + "RX: checksum mismatch, dropping packet\n"); + goto rx; + } + if (tb_async_error(pkg)) { + tb_ctl_handle_event(pkg->ctl, frame->eof, + pkg, frame->size); + goto rx; + } + break; + + case TB_CFG_PKG_EVENT: + case TB_CFG_PKG_XDOMAIN_RESP: + case TB_CFG_PKG_XDOMAIN_REQ: + if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { + tb_ctl_err(pkg->ctl, + "RX: checksum mismatch, dropping packet\n"); + goto rx; + } + fallthrough; + case TB_CFG_PKG_ICM_EVENT: + if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) + goto rx; + break; + + default: + break; + } + + /* + * The received packet will be processed only if there is an + * active request and that the packet is what is expected. This + * prevents packets such as replies coming after timeout has + * triggered from messing with the active requests. + */ + req = tb_cfg_request_find(pkg->ctl, pkg); + if (req) { + if (req->copy(req, pkg)) + schedule_work(&req->work); + tb_cfg_request_put(req); + } + +rx: + tb_ctl_rx_submit(pkg); +} + +static void tb_cfg_request_work(struct work_struct *work) +{ + struct tb_cfg_request *req = container_of(work, typeof(*req), work); + + if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags)) + req->callback(req->callback_data); + + tb_cfg_request_dequeue(req); + tb_cfg_request_put(req); +} + +/** + * tb_cfg_request() - Start control request not waiting for it to complete + * @ctl: Control channel to use + * @req: Request to start + * @callback: Callback called when the request is completed + * @callback_data: Data to be passed to @callback + * + * This queues @req on the given control channel without waiting for it + * to complete. When the request completes @callback is called. + */ +int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, + void (*callback)(void *), void *callback_data) +{ + int ret; + + req->flags = 0; + req->callback = callback; + req->callback_data = callback_data; + INIT_WORK(&req->work, tb_cfg_request_work); + INIT_LIST_HEAD(&req->list); + + tb_cfg_request_get(req); + ret = tb_cfg_request_enqueue(ctl, req); + if (ret) + goto err_put; + + ret = tb_ctl_tx(ctl, req->request, req->request_size, + req->request_type); + if (ret) + goto err_dequeue; + + if (!req->response) + schedule_work(&req->work); + + return 0; + +err_dequeue: + tb_cfg_request_dequeue(req); +err_put: + tb_cfg_request_put(req); + + return ret; +} + +/** + * tb_cfg_request_cancel() - Cancel a control request + * @req: Request to cancel + * @err: Error to assign to the request + * + * This function can be used to cancel ongoing request. It will wait + * until the request is not active anymore. + */ +void tb_cfg_request_cancel(struct tb_cfg_request *req, int err) +{ + set_bit(TB_CFG_REQUEST_CANCELED, &req->flags); + schedule_work(&req->work); + wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req)); + req->result.err = err; +} + +static void tb_cfg_request_complete(void *data) +{ + complete(data); +} + +/** + * tb_cfg_request_sync() - Start control request and wait until it completes + * @ctl: Control channel to use + * @req: Request to start + * @timeout_msec: Timeout how long to wait @req to complete + * + * Starts a control request and waits until it completes. If timeout + * triggers the request is canceled before function returns. Note the + * caller needs to make sure only one message for given switch is active + * at a time. + */ +struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, + struct tb_cfg_request *req, + int timeout_msec) +{ + unsigned long timeout = msecs_to_jiffies(timeout_msec); + struct tb_cfg_result res = { 0 }; + DECLARE_COMPLETION_ONSTACK(done); + int ret; + + ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); + if (ret) { + res.err = ret; + return res; + } + + if (!wait_for_completion_timeout(&done, timeout)) + tb_cfg_request_cancel(req, -ETIMEDOUT); + + flush_work(&req->work); + + return req->result; +} + +/* public interface, alloc/start/stop/free */ + +/** + * tb_ctl_alloc() - allocate a control channel + * @nhi: Pointer to NHI + * @timeout_msec: Default timeout used with non-raw control messages + * @cb: Callback called for plug events + * @cb_data: Data passed to @cb + * + * cb will be invoked once for every hot plug event. + * + * Return: Returns a pointer on success or NULL on failure. + */ +struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb, + void *cb_data) +{ + int i; + struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return NULL; + ctl->nhi = nhi; + ctl->timeout_msec = timeout_msec; + ctl->callback = cb; + ctl->callback_data = cb_data; + + mutex_init(&ctl->request_queue_lock); + INIT_LIST_HEAD(&ctl->request_queue); + ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, + TB_FRAME_SIZE, 4, 0); + if (!ctl->frame_pool) + goto err; + + ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND); + if (!ctl->tx) + goto err; + + ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, + 0xffff, NULL, NULL); + if (!ctl->rx) + goto err; + + for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { + ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); + if (!ctl->rx_packets[i]) + goto err; + ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; + } + + tb_ctl_dbg(ctl, "control channel created\n"); + return ctl; +err: + tb_ctl_free(ctl); + return NULL; +} + +/** + * tb_ctl_free() - free a control channel + * @ctl: Control channel to free + * + * Must be called after tb_ctl_stop. + * + * Must NOT be called from ctl->callback. + */ +void tb_ctl_free(struct tb_ctl *ctl) +{ + int i; + + if (!ctl) + return; + + if (ctl->rx) + tb_ring_free(ctl->rx); + if (ctl->tx) + tb_ring_free(ctl->tx); + + /* free RX packets */ + for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) + tb_ctl_pkg_free(ctl->rx_packets[i]); + + + dma_pool_destroy(ctl->frame_pool); + kfree(ctl); +} + +/** + * tb_ctl_start() - start/resume the control channel + * @ctl: Control channel to start + */ +void tb_ctl_start(struct tb_ctl *ctl) +{ + int i; + tb_ctl_dbg(ctl, "control channel starting...\n"); + tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ + tb_ring_start(ctl->rx); + for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) + tb_ctl_rx_submit(ctl->rx_packets[i]); + + ctl->running = true; +} + +/** + * tb_ctl_stop() - pause the control channel + * @ctl: Control channel to stop + * + * All invocations of ctl->callback will have finished after this method + * returns. + * + * Must NOT be called from ctl->callback. + */ +void tb_ctl_stop(struct tb_ctl *ctl) +{ + mutex_lock(&ctl->request_queue_lock); + ctl->running = false; + mutex_unlock(&ctl->request_queue_lock); + + tb_ring_stop(ctl->rx); + tb_ring_stop(ctl->tx); + + if (!list_empty(&ctl->request_queue)) + tb_ctl_WARN(ctl, "dangling request in request_queue\n"); + INIT_LIST_HEAD(&ctl->request_queue); + tb_ctl_dbg(ctl, "control channel stopped\n"); +} + +/* public interface, commands */ + +/** + * tb_cfg_ack_notification() - Ack notification + * @ctl: Control channel to use + * @route: Router that originated the event + * @error: Pointer to the notification package + * + * Call this as response for non-plug notification to ack it. Returns + * %0 on success or an error code on failure. + */ +int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, + const struct cfg_error_pkg *error) +{ + struct cfg_ack_pkg pkg = { + .header = tb_cfg_make_header(route), + }; + const char *name; + + switch (error->error) { + case TB_CFG_ERROR_LINK_ERROR: + name = "link error"; + break; + case TB_CFG_ERROR_HEC_ERROR_DETECTED: + name = "HEC error"; + break; + case TB_CFG_ERROR_FLOW_CONTROL_ERROR: + name = "flow control error"; + break; + case TB_CFG_ERROR_DP_BW: + name = "DP_BW"; + break; + case TB_CFG_ERROR_ROP_CMPLT: + name = "router operation completion"; + break; + case TB_CFG_ERROR_POP_CMPLT: + name = "port operation completion"; + break; + case TB_CFG_ERROR_PCIE_WAKE: + name = "PCIe wake"; + break; + case TB_CFG_ERROR_DP_CON_CHANGE: + name = "DP connector change"; + break; + case TB_CFG_ERROR_DPTX_DISCOVERY: + name = "DPTX discovery"; + break; + case TB_CFG_ERROR_LINK_RECOVERY: + name = "link recovery"; + break; + case TB_CFG_ERROR_ASYM_LINK: + name = "asymmetric link"; + break; + default: + name = "unknown"; + break; + } + + tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name, + error->error, route); + + return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK); +} + +/** + * tb_cfg_ack_plug() - Ack hot plug/unplug event + * @ctl: Control channel to use + * @route: Router that originated the event + * @port: Port where the hot plug/unplug happened + * @unplug: Ack hot plug or unplug + * + * Call this as response for hot plug/unplug event to ack it. + * Returns %0 on success or an error code on failure. + */ +int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug) +{ + struct cfg_error_pkg pkg = { + .header = tb_cfg_make_header(route), + .port = port, + .error = TB_CFG_ERROR_ACK_PLUG_EVENT, + .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG + : TB_CFG_ERROR_PG_HOT_PLUG, + }; + tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n", + unplug ? "un" : "", route, port); + return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); +} + +static bool tb_cfg_match(const struct tb_cfg_request *req, + const struct ctl_pkg *pkg) +{ + u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); + + if (pkg->frame.eof == TB_CFG_PKG_ERROR) + return true; + + if (pkg->frame.eof != req->response_type) + return false; + if (route != tb_cfg_get_route(req->request)) + return false; + if (pkg->frame.size != req->response_size) + return false; + + if (pkg->frame.eof == TB_CFG_PKG_READ || + pkg->frame.eof == TB_CFG_PKG_WRITE) { + const struct cfg_read_pkg *req_hdr = req->request; + const struct cfg_read_pkg *res_hdr = pkg->buffer; + + if (req_hdr->addr.seq != res_hdr->addr.seq) + return false; + } + + return true; +} + +static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) +{ + struct tb_cfg_result res; + + /* Now make sure it is in expected format */ + res = parse_header(pkg, req->response_size, req->response_type, + tb_cfg_get_route(req->request)); + if (!res.err) + memcpy(req->response, pkg->buffer, req->response_size); + + req->result = res; + + /* Always complete when first response is received */ + return true; +} + +/** + * tb_cfg_reset() - send a reset packet and wait for a response + * @ctl: Control channel pointer + * @route: Router string for the router to send reset + * + * If the switch at route is incorrectly configured then we will not receive a + * reply (even though the switch will reset). The caller should check for + * -ETIMEDOUT and attempt to reconfigure the switch. + */ +struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route) +{ + struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; + struct tb_cfg_result res = { 0 }; + struct tb_cfg_header reply; + struct tb_cfg_request *req; + + req = tb_cfg_request_alloc(); + if (!req) { + res.err = -ENOMEM; + return res; + } + + req->match = tb_cfg_match; + req->copy = tb_cfg_copy; + req->request = &request; + req->request_size = sizeof(request); + req->request_type = TB_CFG_PKG_RESET; + req->response = &reply; + req->response_size = sizeof(reply); + req->response_type = TB_CFG_PKG_RESET; + + res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec); + + tb_cfg_request_put(req); + + return res; +} + +/** + * tb_cfg_read_raw() - read from config space into buffer + * @ctl: Pointer to the control channel + * @buffer: Buffer where the data is read + * @route: Route string of the router + * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise + * @space: Config space selector + * @offset: Dword word offset of the register to start reading + * @length: Number of dwords to read + * @timeout_msec: Timeout in ms how long to wait for the response + * + * Reads from router config space without translating the possible error. + */ +struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, + u64 route, u32 port, enum tb_cfg_space space, + u32 offset, u32 length, int timeout_msec) +{ + struct tb_cfg_result res = { 0 }; + struct cfg_read_pkg request = { + .header = tb_cfg_make_header(route), + .addr = { + .port = port, + .space = space, + .offset = offset, + .length = length, + }, + }; + struct cfg_write_pkg reply; + int retries = 0; + + while (retries < TB_CTL_RETRIES) { + struct tb_cfg_request *req; + + req = tb_cfg_request_alloc(); + if (!req) { + res.err = -ENOMEM; + return res; + } + + request.addr.seq = retries++; + + req->match = tb_cfg_match; + req->copy = tb_cfg_copy; + req->request = &request; + req->request_size = sizeof(request); + req->request_type = TB_CFG_PKG_READ; + req->response = &reply; + req->response_size = 12 + 4 * length; + req->response_type = TB_CFG_PKG_READ; + + res = tb_cfg_request_sync(ctl, req, timeout_msec); + + tb_cfg_request_put(req); + + if (res.err != -ETIMEDOUT) + break; + + /* Wait a bit (arbitrary time) until we send a retry */ + usleep_range(10, 100); + } + + if (res.err) + return res; + + res.response_port = reply.addr.port; + res.err = check_config_address(reply.addr, space, offset, length); + if (!res.err) + memcpy(buffer, &reply.data, 4 * length); + return res; +} + +/** + * tb_cfg_write_raw() - write from buffer into config space + * @ctl: Pointer to the control channel + * @buffer: Data to write + * @route: Route string of the router + * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise + * @space: Config space selector + * @offset: Dword word offset of the register to start writing + * @length: Number of dwords to write + * @timeout_msec: Timeout in ms how long to wait for the response + * + * Writes to router config space without translating the possible error. + */ +struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, + u64 route, u32 port, enum tb_cfg_space space, + u32 offset, u32 length, int timeout_msec) +{ + struct tb_cfg_result res = { 0 }; + struct cfg_write_pkg request = { + .header = tb_cfg_make_header(route), + .addr = { + .port = port, + .space = space, + .offset = offset, + .length = length, + }, + }; + struct cfg_read_pkg reply; + int retries = 0; + + memcpy(&request.data, buffer, length * 4); + + while (retries < TB_CTL_RETRIES) { + struct tb_cfg_request *req; + + req = tb_cfg_request_alloc(); + if (!req) { + res.err = -ENOMEM; + return res; + } + + request.addr.seq = retries++; + + req->match = tb_cfg_match; + req->copy = tb_cfg_copy; + req->request = &request; + req->request_size = 12 + 4 * length; + req->request_type = TB_CFG_PKG_WRITE; + req->response = &reply; + req->response_size = sizeof(reply); + req->response_type = TB_CFG_PKG_WRITE; + + res = tb_cfg_request_sync(ctl, req, timeout_msec); + + tb_cfg_request_put(req); + + if (res.err != -ETIMEDOUT) + break; + + /* Wait a bit (arbitrary time) until we send a retry */ + usleep_range(10, 100); + } + + if (res.err) + return res; + + res.response_port = reply.addr.port; + res.err = check_config_address(reply.addr, space, offset, length); + return res; +} + +static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, + const struct tb_cfg_result *res) +{ + /* + * For unimplemented ports access to port config space may return + * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is + * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so + * that the caller can mark the port as disabled. + */ + if (space == TB_CFG_PORT && + res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) + return -ENODEV; + + tb_cfg_print_error(ctl, res); + + if (res->tb_error == TB_CFG_ERROR_LOCK) + return -EACCES; + if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) + return -ENOTCONN; + + return -EIO; +} + +int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, + enum tb_cfg_space space, u32 offset, u32 length) +{ + struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, + space, offset, length, ctl->timeout_msec); + switch (res.err) { + case 0: + /* Success */ + break; + + case 1: + /* Thunderbolt error, tb_error holds the actual number */ + return tb_cfg_get_error(ctl, space, &res); + + case -ETIMEDOUT: + tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", + route, space, offset); + break; + + default: + WARN(1, "tb_cfg_read: %d\n", res.err); + break; + } + return res.err; +} + +int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, + enum tb_cfg_space space, u32 offset, u32 length) +{ + struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, + space, offset, length, ctl->timeout_msec); + switch (res.err) { + case 0: + /* Success */ + break; + + case 1: + /* Thunderbolt error, tb_error holds the actual number */ + return tb_cfg_get_error(ctl, space, &res); + + case -ETIMEDOUT: + tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", + route, space, offset); + break; + + default: + WARN(1, "tb_cfg_write: %d\n", res.err); + break; + } + return res.err; +} + +/** + * tb_cfg_get_upstream_port() - get upstream port number of switch at route + * @ctl: Pointer to the control channel + * @route: Route string of the router + * + * Reads the first dword from the switches TB_CFG_SWITCH config area and + * returns the port number from which the reply originated. + * + * Return: Returns the upstream port number on success or an error code on + * failure. + */ +int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) +{ + u32 dummy; + struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, + TB_CFG_SWITCH, 0, 1, + ctl->timeout_msec); + if (res.err == 1) + return -EIO; + if (res.err) + return res.err; + return res.response_port; +} diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h new file mode 100644 index 0000000000..eec5c953c7 --- /dev/null +++ b/drivers/thunderbolt/ctl.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt driver - control channel and configuration commands + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#ifndef _TB_CFG +#define _TB_CFG + +#include <linux/kref.h> +#include <linux/thunderbolt.h> + +#include "nhi.h" +#include "tb_msgs.h" + +/* control channel */ +struct tb_ctl; + +typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type, + const void *buf, size_t size); + +struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb, + void *cb_data); +void tb_ctl_start(struct tb_ctl *ctl); +void tb_ctl_stop(struct tb_ctl *ctl); +void tb_ctl_free(struct tb_ctl *ctl); + +/* configuration commands */ + +struct tb_cfg_result { + u64 response_route; + u32 response_port; /* + * If err = 1 then this is the port that send the + * error. + * If err = 0 and if this was a cfg_read/write then + * this is the upstream port of the responding + * switch. + * Otherwise the field is set to zero. + */ + int err; /* negative errors, 0 for success, 1 for tb errors */ + enum tb_cfg_error tb_error; /* valid if err == 1 */ +}; + +struct ctl_pkg { + struct tb_ctl *ctl; + void *buffer; + struct ring_frame frame; +}; + +/** + * struct tb_cfg_request - Control channel request + * @kref: Reference count + * @ctl: Pointer to the control channel structure. Only set when the + * request is queued. + * @request_size: Size of the request packet (in bytes) + * @request_type: Type of the request packet + * @response: Response is stored here + * @response_size: Maximum size of one response packet + * @response_type: Expected type of the response packet + * @npackets: Number of packets expected to be returned with this request + * @match: Function used to match the incoming packet + * @copy: Function used to copy the incoming packet to @response + * @callback: Callback called when the request is finished successfully + * @callback_data: Data to be passed to @callback + * @flags: Flags for the request + * @work: Work item used to complete the request + * @result: Result after the request has been completed + * @list: Requests are queued using this field + * + * An arbitrary request over Thunderbolt control channel. For standard + * control channel message, one should use tb_cfg_read/write() and + * friends if possible. + */ +struct tb_cfg_request { + struct kref kref; + struct tb_ctl *ctl; + const void *request; + size_t request_size; + enum tb_cfg_pkg_type request_type; + void *response; + size_t response_size; + enum tb_cfg_pkg_type response_type; + size_t npackets; + bool (*match)(const struct tb_cfg_request *req, + const struct ctl_pkg *pkg); + bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg); + void (*callback)(void *callback_data); + void *callback_data; + unsigned long flags; + struct work_struct work; + struct tb_cfg_result result; + struct list_head list; +}; + +#define TB_CFG_REQUEST_ACTIVE 0 +#define TB_CFG_REQUEST_CANCELED 1 + +struct tb_cfg_request *tb_cfg_request_alloc(void); +void tb_cfg_request_get(struct tb_cfg_request *req); +void tb_cfg_request_put(struct tb_cfg_request *req); +int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, + void (*callback)(void *), void *callback_data); +void tb_cfg_request_cancel(struct tb_cfg_request *req, int err); +struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, + struct tb_cfg_request *req, int timeout_msec); + +static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header) +{ + return (u64) header->route_hi << 32 | header->route_lo; +} + +static inline struct tb_cfg_header tb_cfg_make_header(u64 route) +{ + struct tb_cfg_header header = { + .route_hi = route >> 32, + .route_lo = route, + }; + /* check for overflow, route_hi is not 32 bits! */ + WARN_ON(tb_cfg_get_route(&header) != route); + return header; +} + +int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, + const struct cfg_error_pkg *error); +int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug); +struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route); +struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, + u64 route, u32 port, + enum tb_cfg_space space, u32 offset, + u32 length, int timeout_msec); +struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, + u64 route, u32 port, + enum tb_cfg_space space, u32 offset, + u32 length, int timeout_msec); +int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, + enum tb_cfg_space space, u32 offset, u32 length); +int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, + enum tb_cfg_space space, u32 offset, u32 length); +int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route); + + +#endif diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c new file mode 100644 index 0000000000..e324cd8997 --- /dev/null +++ b/drivers/thunderbolt/debugfs.c @@ -0,0 +1,1600 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Debugfs interface + * + * Copyright (C) 2020, Intel Corporation + * Authors: Gil Fine <gil.fine@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> +#include <linux/uaccess.h> + +#include "tb.h" +#include "sb_regs.h" + +#define PORT_CAP_V1_PCIE_LEN 1 +#define PORT_CAP_V2_PCIE_LEN 2 +#define PORT_CAP_POWER_LEN 2 +#define PORT_CAP_LANE_LEN 3 +#define PORT_CAP_USB3_LEN 5 +#define PORT_CAP_DP_V1_LEN 9 +#define PORT_CAP_DP_V2_LEN 14 +#define PORT_CAP_TMU_V1_LEN 8 +#define PORT_CAP_TMU_V2_LEN 10 +#define PORT_CAP_BASIC_LEN 9 +#define PORT_CAP_USB4_LEN 20 + +#define SWITCH_CAP_TMU_LEN 26 +#define SWITCH_CAP_BASIC_LEN 27 + +#define PATH_LEN 2 + +#define COUNTER_SET_LEN 3 + +#define DEBUGFS_ATTR(__space, __write) \ +static int __space ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __space ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __space ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __space ## _open, \ + .release = single_release, \ + .read = seq_read, \ + .write = __write, \ + .llseek = seq_lseek, \ +} + +#define DEBUGFS_ATTR_RO(__space) \ + DEBUGFS_ATTR(__space, NULL) + +#define DEBUGFS_ATTR_RW(__space) \ + DEBUGFS_ATTR(__space, __space ## _write) + +static struct dentry *tb_debugfs_root; + +static void *validate_and_copy_from_user(const void __user *user_buf, + size_t *count) +{ + size_t nbytes; + void *buf; + + if (!*count) + return ERR_PTR(-EINVAL); + + if (!access_ok(user_buf, *count)) + return ERR_PTR(-EFAULT); + + buf = (void *)get_zeroed_page(GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + nbytes = min_t(size_t, *count, PAGE_SIZE); + if (copy_from_user(buf, user_buf, nbytes)) { + free_page((unsigned long)buf); + return ERR_PTR(-EFAULT); + } + + *count = nbytes; + return buf; +} + +static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len, + int long_fmt_len) +{ + char *token; + u32 v[5]; + int ret; + + token = strsep(line, "\n"); + if (!token) + return false; + + /* + * For Adapter/Router configuration space: + * Short format is: offset value\n + * v[0] v[1] + * Long format as produced from the read side: + * offset relative_offset cap_id vs_cap_id value\n + * v[0] v[1] v[2] v[3] v[4] + * + * For Counter configuration space: + * Short format is: offset\n + * v[0] + * Long format as produced from the read side: + * offset relative_offset counter_id value\n + * v[0] v[1] v[2] v[3] + */ + ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]); + /* In case of Counters, clear counter, "val" content is NA */ + if (ret == short_fmt_len) { + *offs = v[0]; + *val = v[short_fmt_len - 1]; + return true; + } else if (ret == long_fmt_len) { + *offs = v[0]; + *val = v[long_fmt_len - 1]; + return true; + } + + return false; +} + +#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE) +static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct tb *tb = sw->tb; + char *line, *buf; + u32 val, offset; + int ret = 0; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + /* User did hardware changes behind the driver's back */ + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + + line = buf; + while (parse_line(&line, &offset, &val, 2, 5)) { + if (port) + ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1); + else + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + break; + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + free_page((unsigned long)buf); + + return ret < 0 ? ret : count; +} + +static ssize_t port_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + + return regs_write(port->sw, port, user_buf, count, ppos); +} + +static ssize_t switch_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_switch *sw = s->private; + + return regs_write(sw, NULL, user_buf, count, ppos); +} +#define DEBUGFS_MODE 0600 +#else +#define port_regs_write NULL +#define switch_regs_write NULL +#define DEBUGFS_MODE 0400 +#endif + +#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING) +/** + * struct tb_margining - Lane margining support + * @caps: Port lane margining capabilities + * @results: Last lane margining results + * @lanes: %0, %1 or %7 (all) + * @min_ber_level: Minimum supported BER level contour value + * @max_ber_level: Maximum supported BER level contour value + * @ber_level: Current BER level contour value + * @voltage_steps: Number of mandatory voltage steps + * @max_voltage_offset: Maximum mandatory voltage offset (in mV) + * @time_steps: Number of time margin steps + * @max_time_offset: Maximum time margin offset (in mUI) + * @software: %true if software margining is used instead of hardware + * @time: %true if time margining is used instead of voltage + * @right_high: %false if left/low margin test is performed, %true if + * right/high + */ +struct tb_margining { + u32 caps[2]; + u32 results[2]; + unsigned int lanes; + unsigned int min_ber_level; + unsigned int max_ber_level; + unsigned int ber_level; + unsigned int voltage_steps; + unsigned int max_voltage_offset; + unsigned int time_steps; + unsigned int max_time_offset; + bool software; + bool time; + bool right_high; +}; + +static bool supports_software(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW; +} + +static bool supports_hardware(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW; +} + +static bool both_lanes(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES; +} + +static unsigned int independent_voltage_margins(const struct usb4_port *usb4) +{ + return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >> + USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT; +} + +static bool supports_time(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME; +} + +/* Only applicable if supports_time() returns true */ +static unsigned int independent_time_margins(const struct usb4_port *usb4) +{ + return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >> + USB4_MARGIN_CAP_1_TIME_INDP_SHIFT; +} + +static ssize_t +margining_ber_level_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + unsigned int val; + int ret = 0; + char *buf; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (usb4->margining->software) { + ret = -EINVAL; + goto out_unlock; + } + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_unlock; + } + + buf[count - 1] = '\0'; + + ret = kstrtouint(buf, 10, &val); + if (ret) + goto out_free; + + if (val < usb4->margining->min_ber_level || + val > usb4->margining->max_ber_level) { + ret = -EINVAL; + goto out_free; + } + + usb4->margining->ber_level = val; + +out_free: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&tb->lock); + + return ret < 0 ? ret : count; +} + +static void ber_level_show(struct seq_file *s, unsigned int val) +{ + if (val % 2) + seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val); + else + seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val); +} + +static int margining_ber_level_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + + if (usb4->margining->software) + return -EINVAL; + ber_level_show(s, usb4->margining->ber_level); + return 0; +} +DEBUGFS_ATTR_RW(margining_ber_level); + +static int margining_caps_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + u32 cap0, cap1; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + /* Dump the raw caps first */ + cap0 = usb4->margining->caps[0]; + seq_printf(s, "0x%08x\n", cap0); + cap1 = usb4->margining->caps[1]; + seq_printf(s, "0x%08x\n", cap1); + + seq_printf(s, "# software margining: %s\n", + supports_software(usb4) ? "yes" : "no"); + if (supports_hardware(usb4)) { + seq_puts(s, "# hardware margining: yes\n"); + seq_puts(s, "# minimum BER level contour: "); + ber_level_show(s, usb4->margining->min_ber_level); + seq_puts(s, "# maximum BER level contour: "); + ber_level_show(s, usb4->margining->max_ber_level); + } else { + seq_puts(s, "# hardware margining: no\n"); + } + + seq_printf(s, "# both lanes simultaneously: %s\n", + both_lanes(usb4) ? "yes" : "no"); + seq_printf(s, "# voltage margin steps: %u\n", + usb4->margining->voltage_steps); + seq_printf(s, "# maximum voltage offset: %u mV\n", + usb4->margining->max_voltage_offset); + + switch (independent_voltage_margins(usb4)) { + case USB4_MARGIN_CAP_0_VOLTAGE_MIN: + seq_puts(s, "# returns minimum between high and low voltage margins\n"); + break; + case USB4_MARGIN_CAP_0_VOLTAGE_HL: + seq_puts(s, "# returns high or low voltage margin\n"); + break; + case USB4_MARGIN_CAP_0_VOLTAGE_BOTH: + seq_puts(s, "# returns both high and low margins\n"); + break; + } + + if (supports_time(usb4)) { + seq_puts(s, "# time margining: yes\n"); + seq_printf(s, "# time margining is destructive: %s\n", + cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no"); + + switch (independent_time_margins(usb4)) { + case USB4_MARGIN_CAP_1_TIME_MIN: + seq_puts(s, "# returns minimum between left and right time margins\n"); + break; + case USB4_MARGIN_CAP_1_TIME_LR: + seq_puts(s, "# returns left or right margin\n"); + break; + case USB4_MARGIN_CAP_1_TIME_BOTH: + seq_puts(s, "# returns both left and right margins\n"); + break; + } + + seq_printf(s, "# time margin steps: %u\n", + usb4->margining->time_steps); + seq_printf(s, "# maximum time offset: %u mUI\n", + usb4->margining->max_time_offset); + } else { + seq_puts(s, "# time margining: no\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RO(margining_caps); + +static ssize_t +margining_lanes_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (!strcmp(buf, "0")) { + usb4->margining->lanes = 0; + } else if (!strcmp(buf, "1")) { + usb4->margining->lanes = 1; + } else if (!strcmp(buf, "all")) { + /* Needs to be supported */ + if (both_lanes(usb4)) + usb4->margining->lanes = 7; + else + ret = -EINVAL; + } else { + ret = -EINVAL; + } + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret < 0 ? ret : count; +} + +static int margining_lanes_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + unsigned int lanes; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + lanes = usb4->margining->lanes; + if (both_lanes(usb4)) { + if (!lanes) + seq_puts(s, "[0] 1 all\n"); + else if (lanes == 1) + seq_puts(s, "0 [1] all\n"); + else + seq_puts(s, "0 1 [all]\n"); + } else { + if (!lanes) + seq_puts(s, "[0] 1\n"); + else + seq_puts(s, "0 [1]\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_lanes); + +static ssize_t margining_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (!strcmp(buf, "software")) { + if (supports_software(usb4)) + usb4->margining->software = true; + else + ret = -EINVAL; + } else if (!strcmp(buf, "hardware")) { + if (supports_hardware(usb4)) + usb4->margining->software = false; + else + ret = -EINVAL; + } else { + ret = -EINVAL; + } + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret ? ret : count; +} + +static int margining_mode_show(struct seq_file *s, void *not_used) +{ + const struct tb_port *port = s->private; + const struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + const char *space = ""; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (supports_software(usb4)) { + if (usb4->margining->software) + seq_puts(s, "[software]"); + else + seq_puts(s, "software"); + space = " "; + } + if (supports_hardware(usb4)) { + if (usb4->margining->software) + seq_printf(s, "%shardware", space); + else + seq_printf(s, "%s[hardware]", space); + } + + mutex_unlock(&tb->lock); + + seq_puts(s, "\n"); + return 0; +} +DEBUGFS_ATTR_RW(margining_mode); + +static int margining_run_write(void *data, u64 val) +{ + struct tb_port *port = data; + struct usb4_port *usb4 = port->usb4; + struct tb_switch *sw = port->sw; + struct tb_margining *margining; + struct tb_switch *down_sw; + struct tb *tb = sw->tb; + int ret, clx; + + if (val != 1) + return -EINVAL; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + if (tb_is_upstream_port(port)) + down_sw = sw; + else if (port->remote) + down_sw = port->remote->sw; + else + down_sw = NULL; + + if (down_sw) { + /* + * CL states may interfere with lane margining so + * disable them temporarily now. + */ + ret = tb_switch_clx_disable(down_sw); + if (ret < 0) { + tb_sw_warn(down_sw, "failed to disable CL states\n"); + goto out_unlock; + } + clx = ret; + } + + margining = usb4->margining; + + if (margining->software) { + tb_port_dbg(port, "running software %s lane margining for lanes %u\n", + margining->time ? "time" : "voltage", margining->lanes); + ret = usb4_port_sw_margin(port, margining->lanes, margining->time, + margining->right_high, + USB4_MARGIN_SW_COUNTER_CLEAR); + if (ret) + goto out_clx; + + ret = usb4_port_sw_margin_errors(port, &margining->results[0]); + } else { + tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n", + margining->time ? "time" : "voltage", margining->lanes); + /* Clear the results */ + margining->results[0] = 0; + margining->results[1] = 0; + ret = usb4_port_hw_margin(port, margining->lanes, + margining->ber_level, margining->time, + margining->right_high, margining->results); + } + +out_clx: + if (down_sw) + tb_switch_clx_enable(down_sw, clx); +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write, + "%llu\n"); + +static ssize_t margining_results_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + /* Just clear the results */ + usb4->margining->results[0] = 0; + usb4->margining->results[1] = 0; + + mutex_unlock(&tb->lock); + return count; +} + +static void voltage_margin_show(struct seq_file *s, + const struct tb_margining *margining, u8 val) +{ + unsigned int tmp, voltage; + + tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK; + voltage = tmp * margining->max_voltage_offset / margining->voltage_steps; + seq_printf(s, "%u mV (%u)", voltage, tmp); + if (val & USB4_MARGIN_HW_RES_1_EXCEEDS) + seq_puts(s, " exceeds maximum"); + seq_puts(s, "\n"); +} + +static void time_margin_show(struct seq_file *s, + const struct tb_margining *margining, u8 val) +{ + unsigned int tmp, interval; + + tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK; + interval = tmp * margining->max_time_offset / margining->time_steps; + seq_printf(s, "%u mUI (%u)", interval, tmp); + if (val & USB4_MARGIN_HW_RES_1_EXCEEDS) + seq_puts(s, " exceeds maximum"); + seq_puts(s, "\n"); +} + +static int margining_results_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb_margining *margining; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + margining = usb4->margining; + /* Dump the raw results first */ + seq_printf(s, "0x%08x\n", margining->results[0]); + /* Only the hardware margining has two result dwords */ + if (!margining->software) { + unsigned int val; + + seq_printf(s, "0x%08x\n", margining->results[1]); + + if (margining->time) { + if (!margining->lanes || margining->lanes == 7) { + val = margining->results[1]; + seq_puts(s, "# lane 0 right time margin: "); + time_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 0 left time margin: "); + time_margin_show(s, margining, val); + } + if (margining->lanes == 1 || margining->lanes == 7) { + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT; + seq_puts(s, "# lane 1 right time margin: "); + time_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 1 left time margin: "); + time_margin_show(s, margining, val); + } + } else { + if (!margining->lanes || margining->lanes == 7) { + val = margining->results[1]; + seq_puts(s, "# lane 0 high voltage margin: "); + voltage_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 0 low voltage margin: "); + voltage_margin_show(s, margining, val); + } + if (margining->lanes == 1 || margining->lanes == 7) { + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT; + seq_puts(s, "# lane 1 high voltage margin: "); + voltage_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 1 low voltage margin: "); + voltage_margin_show(s, margining, val); + } + } + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_results); + +static ssize_t margining_test_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (!strcmp(buf, "time") && supports_time(usb4)) + usb4->margining->time = true; + else if (!strcmp(buf, "voltage")) + usb4->margining->time = false; + else + ret = -EINVAL; + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret ? ret : count; +} + +static int margining_test_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (supports_time(usb4)) { + if (usb4->margining->time) + seq_puts(s, "voltage [time]\n"); + else + seq_puts(s, "[voltage] time\n"); + } else { + seq_puts(s, "[voltage]\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_test); + +static ssize_t margining_margin_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (usb4->margining->time) { + if (!strcmp(buf, "left")) + usb4->margining->right_high = false; + else if (!strcmp(buf, "right")) + usb4->margining->right_high = true; + else + ret = -EINVAL; + } else { + if (!strcmp(buf, "low")) + usb4->margining->right_high = false; + else if (!strcmp(buf, "high")) + usb4->margining->right_high = true; + else + ret = -EINVAL; + } + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret ? ret : count; +} + +static int margining_margin_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (usb4->margining->time) { + if (usb4->margining->right_high) + seq_puts(s, "left [right]\n"); + else + seq_puts(s, "[left] right\n"); + } else { + if (usb4->margining->right_high) + seq_puts(s, "low [high]\n"); + else + seq_puts(s, "[low] high\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_margin); + +static void margining_port_init(struct tb_port *port) +{ + struct tb_margining *margining; + struct dentry *dir, *parent; + struct usb4_port *usb4; + char dir_name[10]; + unsigned int val; + int ret; + + usb4 = port->usb4; + if (!usb4) + return; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); + + margining = kzalloc(sizeof(*margining), GFP_KERNEL); + if (!margining) + return; + + ret = usb4_port_margining_caps(port, margining->caps); + if (ret) { + kfree(margining); + return; + } + + usb4->margining = margining; + + /* Set the initial mode */ + if (supports_software(usb4)) + margining->software = true; + + val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >> + USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT; + margining->voltage_steps = val; + val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >> + USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT; + margining->max_voltage_offset = 74 + val * 2; + + if (supports_time(usb4)) { + val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >> + USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT; + margining->time_steps = val; + val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >> + USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT; + /* + * Store it as mUI (milli Unit Interval) because we want + * to keep it as integer. + */ + margining->max_time_offset = 200 + 10 * val; + } + + dir = debugfs_create_dir("margining", parent); + if (supports_hardware(usb4)) { + val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >> + USB4_MARGIN_CAP_1_MIN_BER_SHIFT; + margining->min_ber_level = val; + val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >> + USB4_MARGIN_CAP_1_MAX_BER_SHIFT; + margining->max_ber_level = val; + + /* Set the default to minimum */ + margining->ber_level = margining->min_ber_level; + + debugfs_create_file("ber_level_contour", 0400, dir, port, + &margining_ber_level_fops); + } + debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops); + debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops); + debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops); + debugfs_create_file("run", 0600, dir, port, &margining_run_fops); + debugfs_create_file("results", 0600, dir, port, &margining_results_fops); + debugfs_create_file("test", 0600, dir, port, &margining_test_fops); + if (independent_voltage_margins(usb4) || + (supports_time(usb4) && independent_time_margins(usb4))) + debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops); +} + +static void margining_port_remove(struct tb_port *port) +{ + struct dentry *parent; + char dir_name[10]; + + if (!port->usb4) + return; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); + if (parent) + debugfs_lookup_and_remove("margining", parent); + + kfree(port->usb4->margining); + port->usb4->margining = NULL; +} + +static void margining_switch_init(struct tb_switch *sw) +{ + struct tb_port *upstream, *downstream; + struct tb_switch *parent_sw; + u64 route = tb_route(sw); + + if (!route) + return; + + upstream = tb_upstream_port(sw); + parent_sw = tb_switch_parent(sw); + downstream = tb_port_at(route, parent_sw); + + margining_port_init(downstream); + margining_port_init(upstream); +} + +static void margining_switch_remove(struct tb_switch *sw) +{ + struct tb_port *upstream, *downstream; + struct tb_switch *parent_sw; + u64 route = tb_route(sw); + + if (!route) + return; + + upstream = tb_upstream_port(sw); + parent_sw = tb_switch_parent(sw); + downstream = tb_port_at(route, parent_sw); + + margining_port_remove(upstream); + margining_port_remove(downstream); +} + +static void margining_xdomain_init(struct tb_xdomain *xd) +{ + struct tb_switch *parent_sw; + struct tb_port *downstream; + + parent_sw = tb_xdomain_parent(xd); + downstream = tb_port_at(xd->route, parent_sw); + + margining_port_init(downstream); +} + +static void margining_xdomain_remove(struct tb_xdomain *xd) +{ + struct tb_switch *parent_sw; + struct tb_port *downstream; + + parent_sw = tb_xdomain_parent(xd); + downstream = tb_port_at(xd->route, parent_sw); + margining_port_remove(downstream); +} +#else +static inline void margining_switch_init(struct tb_switch *sw) { } +static inline void margining_switch_remove(struct tb_switch *sw) { } +static inline void margining_xdomain_init(struct tb_xdomain *xd) { } +static inline void margining_xdomain_remove(struct tb_xdomain *xd) { } +#endif + +static int port_clear_all_counters(struct tb_port *port) +{ + u32 *buf; + int ret; + + buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0, + COUNTER_SET_LEN * port->config.max_counters); + kfree(buf); + + return ret; +} + +static ssize_t counters_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = port->sw->tb; + char *buf; + int ret; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + /* If written delimiter only, clear all counters in one shot */ + if (buf[0] == '\n') { + ret = port_clear_all_counters(port); + } else { + char *line = buf; + u32 val, offset; + + ret = -EINVAL; + while (parse_line(&line, &offset, &val, 1, 4)) { + ret = tb_port_write(port, &val, TB_CFG_COUNTERS, + offset, 1); + if (ret) + break; + } + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + free_page((unsigned long)buf); + + return ret < 0 ? ret : count; +} + +static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw, + struct tb_port *port, unsigned int cap, + unsigned int offset, u8 cap_id, u8 vsec_id, + int dwords) +{ + int i, ret; + u32 data; + + for (i = 0; i < dwords; i++) { + if (port) + ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1); + else + ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i); + continue; + } + + seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i, + offset + i, cap_id, vsec_id, data); + } +} + +static void cap_show(struct seq_file *s, struct tb_switch *sw, + struct tb_port *port, unsigned int cap, u8 cap_id, + u8 vsec_id, int length) +{ + int ret, offset = 0; + + while (length > 0) { + int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH); + u32 data[TB_MAX_CONFIG_RW_LENGTH]; + + if (port) + ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset, + dwords); + else + ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords); + if (ret) { + cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length); + return; + } + + for (i = 0; i < dwords; i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", + cap + offset + i, offset + i, + cap_id, vsec_id, data[i]); + } + + length -= dwords; + offset += dwords; + } +} + +static void port_cap_show(struct tb_port *port, struct seq_file *s, + unsigned int cap) +{ + struct tb_cap_any header; + u8 vsec_id = 0; + size_t length; + int ret; + + ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", cap); + return; + } + + switch (header.basic.cap) { + case TB_PORT_CAP_PHY: + length = PORT_CAP_LANE_LEN; + break; + + case TB_PORT_CAP_TIME1: + if (usb4_switch_version(port->sw) < 2) + length = PORT_CAP_TMU_V1_LEN; + else + length = PORT_CAP_TMU_V2_LEN; + break; + + case TB_PORT_CAP_POWER: + length = PORT_CAP_POWER_LEN; + break; + + case TB_PORT_CAP_ADAP: + if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) { + if (usb4_switch_version(port->sw) < 2) + length = PORT_CAP_V1_PCIE_LEN; + else + length = PORT_CAP_V2_PCIE_LEN; + } else if (tb_port_is_dpin(port)) { + if (usb4_switch_version(port->sw) < 2) + length = PORT_CAP_DP_V1_LEN; + else + length = PORT_CAP_DP_V2_LEN; + } else if (tb_port_is_dpout(port)) { + length = PORT_CAP_DP_V1_LEN; + } else if (tb_port_is_usb3_down(port) || + tb_port_is_usb3_up(port)) { + length = PORT_CAP_USB3_LEN; + } else { + seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + break; + + case TB_PORT_CAP_VSE: + if (!header.extended_short.length) { + ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT, + cap + 1, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", + cap + 1); + return; + } + length = header.extended_long.length; + vsec_id = header.extended_short.vsec_id; + } else { + length = header.extended_short.length; + vsec_id = header.extended_short.vsec_id; + } + break; + + case TB_PORT_CAP_USB4: + length = PORT_CAP_USB4_LEN; + break; + + default: + seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + + cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length); +} + +static void port_caps_show(struct tb_port *port, struct seq_file *s) +{ + int cap; + + cap = tb_port_next_cap(port, 0); + while (cap > 0) { + port_cap_show(port, s, cap); + cap = tb_port_next_cap(port, cap); + } +} + +static int port_basic_regs_show(struct tb_port *port, struct seq_file *s) +{ + u32 data[PORT_CAP_BASIC_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(data); i++) + seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]); + + return 0; +} + +static int port_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n"); + + ret = port_basic_regs_show(port, s); + if (ret) + goto out_unlock; + + port_caps_show(port, s); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(port_regs); + +static void switch_cap_show(struct tb_switch *sw, struct seq_file *s, + unsigned int cap) +{ + struct tb_cap_any header; + int ret, length; + u8 vsec_id = 0; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", cap); + return; + } + + if (header.basic.cap == TB_SWITCH_CAP_VSE) { + if (!header.extended_short.length) { + ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH, + cap + 1, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", + cap + 1); + return; + } + length = header.extended_long.length; + } else { + length = header.extended_short.length; + } + vsec_id = header.extended_short.vsec_id; + } else { + if (header.basic.cap == TB_SWITCH_CAP_TMU) { + length = SWITCH_CAP_TMU_LEN; + } else { + seq_printf(s, "0x%04x <unknown capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + } + + cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length); +} + +static void switch_caps_show(struct tb_switch *sw, struct seq_file *s) +{ + int cap; + + cap = tb_switch_next_cap(sw, 0); + while (cap > 0) { + switch_cap_show(sw, s, cap); + cap = tb_switch_next_cap(sw, cap); + } +} + +static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s) +{ + u32 data[SWITCH_CAP_BASIC_LEN]; + size_t dwords; + int ret, i; + + /* Only USB4 has the additional registers */ + if (tb_switch_is_usb4(sw)) + dwords = ARRAY_SIZE(data); + else + dwords = 7; + + ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]); + + return 0; +} + +static int switch_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_switch *sw = s->private; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n"); + + ret = switch_basic_regs_show(sw, s); + if (ret) + goto out_unlock; + + switch_caps_show(sw, s); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(switch_regs); + +static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid) +{ + u32 data[PATH_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN, + ARRAY_SIZE(data)); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n", + hopid * PATH_LEN + i, i, hopid, data[i]); + } + + return 0; +} + +static int path_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int start, i, ret = 0; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset in_hop_id value\n"); + + /* NHI and lane adapters have entry for path 0 */ + if (tb_port_is_null(port) || tb_port_is_nhi(port)) { + ret = path_show_one(port, s, 0); + if (ret) + goto out_unlock; + } + + start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID; + + for (i = start; i <= port->config.max_in_hop_id; i++) { + ret = path_show_one(port, s, i); + if (ret) + break; + } + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RO(path); + +static int counter_set_regs_show(struct tb_port *port, struct seq_file *s, + int counter) +{ + u32 data[COUNTER_SET_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_COUNTERS, + counter * COUNTER_SET_LEN, ARRAY_SIZE(data)); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", + counter * COUNTER_SET_LEN); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n", + counter * COUNTER_SET_LEN + i, i, counter, data[i]); + } + + return 0; +} + +static int counters_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int i, ret = 0; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + seq_puts(s, "# offset relative_offset counter_id value\n"); + + for (i = 0; i < port->config.max_counters; i++) { + ret = counter_set_regs_show(port, s, i); + if (ret) + break; + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(counters); + +/** + * tb_switch_debugfs_init() - Add debugfs entries for router + * @sw: Pointer to the router + * + * Adds debugfs directories and files for given router. + */ +void tb_switch_debugfs_init(struct tb_switch *sw) +{ + struct dentry *debugfs_dir; + struct tb_port *port; + + debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root); + sw->debugfs_dir = debugfs_dir; + debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw, + &switch_regs_fops); + + tb_switch_for_each_port(sw, port) { + struct dentry *debugfs_dir; + char dir_name[10]; + + if (port->disabled) + continue; + if (port->config.type == TB_TYPE_INACTIVE) + continue; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir); + debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, + port, &port_regs_fops); + debugfs_create_file("path", 0400, debugfs_dir, port, + &path_fops); + if (port->config.counters_support) + debugfs_create_file("counters", 0600, debugfs_dir, port, + &counters_fops); + } + + margining_switch_init(sw); +} + +/** + * tb_switch_debugfs_remove() - Remove all router debugfs entries + * @sw: Pointer to the router + * + * Removes all previously added debugfs entries under this router. + */ +void tb_switch_debugfs_remove(struct tb_switch *sw) +{ + margining_switch_remove(sw); + debugfs_remove_recursive(sw->debugfs_dir); +} + +void tb_xdomain_debugfs_init(struct tb_xdomain *xd) +{ + margining_xdomain_init(xd); +} + +void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) +{ + margining_xdomain_remove(xd); +} + +/** + * tb_service_debugfs_init() - Add debugfs directory for service + * @svc: Thunderbolt service pointer + * + * Adds debugfs directory for service. + */ +void tb_service_debugfs_init(struct tb_service *svc) +{ + svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev), + tb_debugfs_root); +} + +/** + * tb_service_debugfs_remove() - Remove service debugfs directory + * @svc: Thunderbolt service pointer + * + * Removes the previously created debugfs directory for @svc. + */ +void tb_service_debugfs_remove(struct tb_service *svc) +{ + debugfs_remove_recursive(svc->debugfs_dir); + svc->debugfs_dir = NULL; +} + +void tb_debugfs_init(void) +{ + tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL); +} + +void tb_debugfs_exit(void) +{ + debugfs_remove_recursive(tb_debugfs_root); +} diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c new file mode 100644 index 0000000000..9f20c7bbf0 --- /dev/null +++ b/drivers/thunderbolt/dma_port.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt DMA configuration based mailbox support + * + * Copyright (C) 2017, Intel Corporation + * Authors: Michael Jamet <michael.jamet@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/delay.h> +#include <linux/slab.h> + +#include "dma_port.h" +#include "tb_regs.h" + +#define DMA_PORT_CAP 0x3e + +#define MAIL_DATA 1 +#define MAIL_DATA_DWORDS 16 + +#define MAIL_IN 17 +#define MAIL_IN_CMD_SHIFT 28 +#define MAIL_IN_CMD_MASK GENMASK(31, 28) +#define MAIL_IN_CMD_FLASH_WRITE 0x0 +#define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1 +#define MAIL_IN_CMD_FLASH_READ 0x2 +#define MAIL_IN_CMD_POWER_CYCLE 0x4 +#define MAIL_IN_DWORDS_SHIFT 24 +#define MAIL_IN_DWORDS_MASK GENMASK(27, 24) +#define MAIL_IN_ADDRESS_SHIFT 2 +#define MAIL_IN_ADDRESS_MASK GENMASK(23, 2) +#define MAIL_IN_CSS BIT(1) +#define MAIL_IN_OP_REQUEST BIT(0) + +#define MAIL_OUT 18 +#define MAIL_OUT_STATUS_RESPONSE BIT(29) +#define MAIL_OUT_STATUS_CMD_SHIFT 4 +#define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4) +#define MAIL_OUT_STATUS_MASK GENMASK(3, 0) +#define MAIL_OUT_STATUS_COMPLETED 0 +#define MAIL_OUT_STATUS_ERR_AUTH 1 +#define MAIL_OUT_STATUS_ERR_ACCESS 2 + +#define DMA_PORT_TIMEOUT 5000 /* ms */ +#define DMA_PORT_RETRIES 3 + +/** + * struct tb_dma_port - DMA control port + * @sw: Switch the DMA port belongs to + * @port: Switch port number where DMA capability is found + * @base: Start offset of the mailbox registers + * @buf: Temporary buffer to store a single block + */ +struct tb_dma_port { + struct tb_switch *sw; + u8 port; + u32 base; + u8 *buf; +}; + +/* + * When the switch is in safe mode it supports very little functionality + * so we don't validate that much here. + */ +static bool dma_port_match(const struct tb_cfg_request *req, + const struct ctl_pkg *pkg) +{ + u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); + + if (pkg->frame.eof == TB_CFG_PKG_ERROR) + return true; + if (pkg->frame.eof != req->response_type) + return false; + if (route != tb_cfg_get_route(req->request)) + return false; + if (pkg->frame.size != req->response_size) + return false; + + return true; +} + +static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) +{ + memcpy(req->response, pkg->buffer, req->response_size); + return true; +} + +static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route, + u32 port, u32 offset, u32 length, int timeout_msec) +{ + struct cfg_read_pkg request = { + .header = tb_cfg_make_header(route), + .addr = { + .seq = 1, + .port = port, + .space = TB_CFG_PORT, + .offset = offset, + .length = length, + }, + }; + struct tb_cfg_request *req; + struct cfg_write_pkg reply; + struct tb_cfg_result res; + + req = tb_cfg_request_alloc(); + if (!req) + return -ENOMEM; + + req->match = dma_port_match; + req->copy = dma_port_copy; + req->request = &request; + req->request_size = sizeof(request); + req->request_type = TB_CFG_PKG_READ; + req->response = &reply; + req->response_size = 12 + 4 * length; + req->response_type = TB_CFG_PKG_READ; + + res = tb_cfg_request_sync(ctl, req, timeout_msec); + + tb_cfg_request_put(req); + + if (res.err) + return res.err; + + memcpy(buffer, &reply.data, 4 * length); + return 0; +} + +static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route, + u32 port, u32 offset, u32 length, int timeout_msec) +{ + struct cfg_write_pkg request = { + .header = tb_cfg_make_header(route), + .addr = { + .seq = 1, + .port = port, + .space = TB_CFG_PORT, + .offset = offset, + .length = length, + }, + }; + struct tb_cfg_request *req; + struct cfg_read_pkg reply; + struct tb_cfg_result res; + + memcpy(&request.data, buffer, length * 4); + + req = tb_cfg_request_alloc(); + if (!req) + return -ENOMEM; + + req->match = dma_port_match; + req->copy = dma_port_copy; + req->request = &request; + req->request_size = 12 + 4 * length; + req->request_type = TB_CFG_PKG_WRITE; + req->response = &reply; + req->response_size = sizeof(reply); + req->response_type = TB_CFG_PKG_WRITE; + + res = tb_cfg_request_sync(ctl, req, timeout_msec); + + tb_cfg_request_put(req); + + return res.err; +} + +static int dma_find_port(struct tb_switch *sw) +{ + static const int ports[] = { 3, 5, 7 }; + int i; + + /* + * The DMA (NHI) port is either 3, 5 or 7 depending on the + * controller. Try all of them. + */ + for (i = 0; i < ARRAY_SIZE(ports); i++) { + u32 type; + int ret; + + ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i], + 2, 1, DMA_PORT_TIMEOUT); + if (!ret && (type & 0xffffff) == TB_TYPE_NHI) + return ports[i]; + } + + return -ENODEV; +} + +/** + * dma_port_alloc() - Finds DMA control port from a switch pointed by route + * @sw: Switch from where find the DMA port + * + * Function checks if the switch NHI port supports DMA configuration + * based mailbox capability and if it does, allocates and initializes + * DMA port structure. Returns %NULL if the capabity was not found. + * + * The DMA control port is functional also when the switch is in safe + * mode. + */ +struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) +{ + struct tb_dma_port *dma; + int port; + + port = dma_find_port(sw); + if (port < 0) + return NULL; + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return NULL; + + dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL); + if (!dma->buf) { + kfree(dma); + return NULL; + } + + dma->sw = sw; + dma->port = port; + dma->base = DMA_PORT_CAP; + + return dma; +} + +/** + * dma_port_free() - Release DMA control port structure + * @dma: DMA control port + */ +void dma_port_free(struct tb_dma_port *dma) +{ + if (dma) { + kfree(dma->buf); + kfree(dma); + } +} + +static int dma_port_wait_for_completion(struct tb_dma_port *dma, + unsigned int timeout) +{ + unsigned long end = jiffies + msecs_to_jiffies(timeout); + struct tb_switch *sw = dma->sw; + + do { + int ret; + u32 in; + + ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port, + dma->base + MAIL_IN, 1, 50); + if (ret) { + if (ret != -ETIMEDOUT) + return ret; + } else if (!(in & MAIL_IN_OP_REQUEST)) { + return 0; + } + + usleep_range(50, 100); + } while (time_before(jiffies, end)); + + return -ETIMEDOUT; +} + +static int status_to_errno(u32 status) +{ + switch (status & MAIL_OUT_STATUS_MASK) { + case MAIL_OUT_STATUS_COMPLETED: + return 0; + case MAIL_OUT_STATUS_ERR_AUTH: + return -EINVAL; + case MAIL_OUT_STATUS_ERR_ACCESS: + return -EACCES; + } + + return -EIO; +} + +static int dma_port_request(struct tb_dma_port *dma, u32 in, + unsigned int timeout) +{ + struct tb_switch *sw = dma->sw; + u32 out; + int ret; + + ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port, + dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT); + if (ret) + return ret; + + ret = dma_port_wait_for_completion(dma, timeout); + if (ret) + return ret; + + ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, + dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); + if (ret) + return ret; + + return status_to_errno(out); +} + +static int dma_port_flash_read_block(void *data, unsigned int dwaddress, + void *buf, size_t dwords) +{ + struct tb_dma_port *dma = data; + struct tb_switch *sw = dma->sw; + int ret; + u32 in; + + in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT; + if (dwords < MAIL_DATA_DWORDS) + in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; + in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; + in |= MAIL_IN_OP_REQUEST; + + ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT); + if (ret) + return ret; + + return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port, + dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); +} + +static int dma_port_flash_write_block(void *data, unsigned int dwaddress, + const void *buf, size_t dwords) +{ + struct tb_dma_port *dma = data; + struct tb_switch *sw = dma->sw; + int ret; + u32 in; + + /* Write the block to MAIL_DATA registers */ + ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port, + dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); + if (ret) + return ret; + + in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT; + + /* CSS header write is always done to the same magic address */ + if (dwaddress >= DMA_PORT_CSS_ADDRESS) + in |= MAIL_IN_CSS; + + in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; + in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; + in |= MAIL_IN_OP_REQUEST; + + return dma_port_request(dma, in, DMA_PORT_TIMEOUT); +} + +/** + * dma_port_flash_read() - Read from active flash region + * @dma: DMA control port + * @address: Address relative to the start of active region + * @buf: Buffer where the data is read + * @size: Size of the buffer + */ +int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, + void *buf, size_t size) +{ + return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES, + dma_port_flash_read_block, dma); +} + +/** + * dma_port_flash_write() - Write to non-active flash region + * @dma: DMA control port + * @address: Address relative to the start of non-active region + * @buf: Data to write + * @size: Size of the buffer + * + * Writes block of data to the non-active flash region of the switch. If + * the address is given as %DMA_PORT_CSS_ADDRESS the block is written + * using CSS command. + */ +int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address, + const void *buf, size_t size) +{ + if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE) + return -E2BIG; + + return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES, + dma_port_flash_write_block, dma); +} + +/** + * dma_port_flash_update_auth() - Starts flash authenticate cycle + * @dma: DMA control port + * + * Starts the flash update authentication cycle. If the image in the + * non-active area was valid, the switch starts upgrade process where + * active and non-active area get swapped in the end. Caller should call + * dma_port_flash_update_auth_status() to get status of this command. + * This is because if the switch in question is root switch the + * thunderbolt host controller gets reset as well. + */ +int dma_port_flash_update_auth(struct tb_dma_port *dma) +{ + u32 in; + + in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT; + in |= MAIL_IN_OP_REQUEST; + + return dma_port_request(dma, in, 150); +} + +/** + * dma_port_flash_update_auth_status() - Reads status of update auth command + * @dma: DMA control port + * @status: Status code of the operation + * + * The function checks if there is status available from the last update + * auth command. Returns %0 if there is no status and no further + * action is required. If there is status, %1 is returned instead and + * @status holds the failure code. + * + * Negative return means there was an error reading status from the + * switch. + */ +int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status) +{ + struct tb_switch *sw = dma->sw; + u32 out, cmd; + int ret; + + ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, + dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); + if (ret) + return ret; + + /* Check if the status relates to flash update auth */ + cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT; + if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) { + if (status) + *status = out & MAIL_OUT_STATUS_MASK; + + /* Reset is needed in any case */ + return 1; + } + + return 0; +} + +/** + * dma_port_power_cycle() - Power cycles the switch + * @dma: DMA control port + * + * Triggers power cycle to the switch. + */ +int dma_port_power_cycle(struct tb_dma_port *dma) +{ + u32 in; + + in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT; + in |= MAIL_IN_OP_REQUEST; + + return dma_port_request(dma, in, 150); +} diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h new file mode 100644 index 0000000000..7deadd97ce --- /dev/null +++ b/drivers/thunderbolt/dma_port.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt DMA configuration based mailbox support + * + * Copyright (C) 2017, Intel Corporation + * Authors: Michael Jamet <michael.jamet@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#ifndef DMA_PORT_H_ +#define DMA_PORT_H_ + +#include "tb.h" + +struct tb_switch; +struct tb_dma_port; + +#define DMA_PORT_CSS_ADDRESS 0x3fffff +#define DMA_PORT_CSS_MAX_SIZE SZ_128 + +struct tb_dma_port *dma_port_alloc(struct tb_switch *sw); +void dma_port_free(struct tb_dma_port *dma); +int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, + void *buf, size_t size); +int dma_port_flash_update_auth(struct tb_dma_port *dma); +int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status); +int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address, + const void *buf, size_t size); +int dma_port_power_cycle(struct tb_dma_port *dma); + +#endif diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c new file mode 100644 index 0000000000..39476fc488 --- /dev/null +++ b/drivers/thunderbolt/dma_test.c @@ -0,0 +1,764 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA traffic test driver + * + * Copyright (C) 2020, Intel Corporation + * Authors: Isaac Hazan <isaac.hazan@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/completion.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/sizes.h> +#include <linux/thunderbolt.h> + +#define DMA_TEST_TX_RING_SIZE 64 +#define DMA_TEST_RX_RING_SIZE 256 +#define DMA_TEST_FRAME_SIZE SZ_4K +#define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL +#define DMA_TEST_MAX_PACKETS 1000 + +enum dma_test_frame_pdf { + DMA_TEST_PDF_FRAME_START = 1, + DMA_TEST_PDF_FRAME_END, +}; + +struct dma_test_frame { + struct dma_test *dma_test; + void *data; + struct ring_frame frame; +}; + +enum dma_test_test_error { + DMA_TEST_NO_ERROR, + DMA_TEST_INTERRUPTED, + DMA_TEST_BUFFER_ERROR, + DMA_TEST_DMA_ERROR, + DMA_TEST_CONFIG_ERROR, + DMA_TEST_SPEED_ERROR, + DMA_TEST_WIDTH_ERROR, + DMA_TEST_BONDING_ERROR, + DMA_TEST_PACKET_ERROR, +}; + +static const char * const dma_test_error_names[] = { + [DMA_TEST_NO_ERROR] = "no errors", + [DMA_TEST_INTERRUPTED] = "interrupted by signal", + [DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers", + [DMA_TEST_DMA_ERROR] = "DMA ring setup failed", + [DMA_TEST_CONFIG_ERROR] = "configuration is not valid", + [DMA_TEST_SPEED_ERROR] = "unexpected link speed", + [DMA_TEST_WIDTH_ERROR] = "unexpected link width", + [DMA_TEST_BONDING_ERROR] = "lane bonding configuration error", + [DMA_TEST_PACKET_ERROR] = "packet check failed", +}; + +enum dma_test_result { + DMA_TEST_NOT_RUN, + DMA_TEST_SUCCESS, + DMA_TEST_FAIL, +}; + +static const char * const dma_test_result_names[] = { + [DMA_TEST_NOT_RUN] = "not run", + [DMA_TEST_SUCCESS] = "success", + [DMA_TEST_FAIL] = "failed", +}; + +/** + * struct dma_test - DMA test device driver private data + * @svc: XDomain service the driver is bound to + * @xd: XDomain the service belongs to + * @rx_ring: Software ring holding RX frames + * @rx_hopid: HopID used for receiving frames + * @tx_ring: Software ring holding TX frames + * @tx_hopid: HopID used for sending fames + * @packets_to_send: Number of packets to send + * @packets_to_receive: Number of packets to receive + * @packets_sent: Actual number of packets sent + * @packets_received: Actual number of packets received + * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated + * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated + * @crc_errors: Number of CRC errors during the test run + * @buffer_overflow_errors: Number of buffer overflow errors during the test + * run + * @result: Result of the last run + * @error_code: Error code of the last run + * @complete: Used to wait for the Rx to complete + * @lock: Lock serializing access to this structure + * @debugfs_dir: dentry of this dma_test + */ +struct dma_test { + const struct tb_service *svc; + struct tb_xdomain *xd; + struct tb_ring *rx_ring; + int rx_hopid; + struct tb_ring *tx_ring; + int tx_hopid; + unsigned int packets_to_send; + unsigned int packets_to_receive; + unsigned int packets_sent; + unsigned int packets_received; + unsigned int link_speed; + unsigned int link_width; + unsigned int crc_errors; + unsigned int buffer_overflow_errors; + enum dma_test_result result; + enum dma_test_test_error error_code; + struct completion complete; + struct mutex lock; + struct dentry *debugfs_dir; +}; + +/* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */ +static const uuid_t dma_test_dir_uuid = + UUID_INIT(0x3188cd10, 0x6523, 0x4a5a, + 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8); + +static struct tb_property_dir *dma_test_dir; +static void *dma_test_pattern; + +static void dma_test_free_rings(struct dma_test *dt) +{ + if (dt->rx_ring) { + tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid); + tb_ring_free(dt->rx_ring); + dt->rx_ring = NULL; + } + if (dt->tx_ring) { + tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid); + tb_ring_free(dt->tx_ring); + dt->tx_ring = NULL; + } +} + +static int dma_test_start_rings(struct dma_test *dt) +{ + unsigned int flags = RING_FLAG_FRAME; + struct tb_xdomain *xd = dt->xd; + int ret, e2e_tx_hop = 0; + struct tb_ring *ring; + + /* + * If we are both sender and receiver (traffic goes over a + * special loopback dongle) enable E2E flow control. This avoids + * losing packets. + */ + if (dt->packets_to_send && dt->packets_to_receive) + flags |= RING_FLAG_E2E; + + if (dt->packets_to_send) { + ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE, + flags); + if (!ring) + return -ENOMEM; + + dt->tx_ring = ring; + e2e_tx_hop = ring->hop; + + ret = tb_xdomain_alloc_out_hopid(xd, -1); + if (ret < 0) { + dma_test_free_rings(dt); + return ret; + } + + dt->tx_hopid = ret; + } + + if (dt->packets_to_receive) { + u16 sof_mask, eof_mask; + + sof_mask = BIT(DMA_TEST_PDF_FRAME_START); + eof_mask = BIT(DMA_TEST_PDF_FRAME_END); + + ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE, + flags, e2e_tx_hop, sof_mask, eof_mask, + NULL, NULL); + if (!ring) { + dma_test_free_rings(dt); + return -ENOMEM; + } + + dt->rx_ring = ring; + + ret = tb_xdomain_alloc_in_hopid(xd, -1); + if (ret < 0) { + dma_test_free_rings(dt); + return ret; + } + + dt->rx_hopid = ret; + } + + ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, + dt->tx_ring ? dt->tx_ring->hop : -1, + dt->rx_hopid, + dt->rx_ring ? dt->rx_ring->hop : -1); + if (ret) { + dma_test_free_rings(dt); + return ret; + } + + if (dt->tx_ring) + tb_ring_start(dt->tx_ring); + if (dt->rx_ring) + tb_ring_start(dt->rx_ring); + + return 0; +} + +static void dma_test_stop_rings(struct dma_test *dt) +{ + int ret; + + if (dt->rx_ring) + tb_ring_stop(dt->rx_ring); + if (dt->tx_ring) + tb_ring_stop(dt->tx_ring); + + ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, + dt->tx_ring ? dt->tx_ring->hop : -1, + dt->rx_hopid, + dt->rx_ring ? dt->rx_ring->hop : -1); + if (ret) + dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); + + dma_test_free_rings(dt); +} + +static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); + struct dma_test *dt = tf->dma_test; + struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); + + dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, + DMA_FROM_DEVICE); + kfree(tf->data); + + if (canceled) { + kfree(tf); + return; + } + + dt->packets_received++; + dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received, + dt->packets_to_receive); + + if (tf->frame.flags & RING_DESC_CRC_ERROR) + dt->crc_errors++; + if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) + dt->buffer_overflow_errors++; + + kfree(tf); + + if (dt->packets_received == dt->packets_to_receive) + complete(&dt->complete); +} + +static int dma_test_submit_rx(struct dma_test *dt, size_t npackets) +{ + struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); + int i; + + for (i = 0; i < npackets; i++) { + struct dma_test_frame *tf; + dma_addr_t dma_addr; + + tf = kzalloc(sizeof(*tf), GFP_KERNEL); + if (!tf) + return -ENOMEM; + + tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!tf->data) { + kfree(tf); + return -ENOMEM; + } + + dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + kfree(tf->data); + kfree(tf); + return -ENOMEM; + } + + tf->frame.buffer_phy = dma_addr; + tf->frame.callback = dma_test_rx_callback; + tf->dma_test = dt; + INIT_LIST_HEAD(&tf->frame.list); + + tb_ring_rx(dt->rx_ring, &tf->frame); + } + + return 0; +} + +static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); + struct dma_test *dt = tf->dma_test; + struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); + + dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, + DMA_TO_DEVICE); + kfree(tf->data); + kfree(tf); +} + +static int dma_test_submit_tx(struct dma_test *dt, size_t npackets) +{ + struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); + int i; + + for (i = 0; i < npackets; i++) { + struct dma_test_frame *tf; + dma_addr_t dma_addr; + + tf = kzalloc(sizeof(*tf), GFP_KERNEL); + if (!tf) + return -ENOMEM; + + tf->frame.size = 0; /* means 4096 */ + tf->dma_test = dt; + + tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!tf->data) { + kfree(tf); + return -ENOMEM; + } + + dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + kfree(tf->data); + kfree(tf); + return -ENOMEM; + } + + tf->frame.buffer_phy = dma_addr; + tf->frame.callback = dma_test_tx_callback; + tf->frame.sof = DMA_TEST_PDF_FRAME_START; + tf->frame.eof = DMA_TEST_PDF_FRAME_END; + INIT_LIST_HEAD(&tf->frame.list); + + dt->packets_sent++; + dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent, + dt->packets_to_send); + + tb_ring_tx(dt->tx_ring, &tf->frame); + } + + return 0; +} + +#define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \ +static int __fops ## _show(void *data, u64 *val) \ +{ \ + struct tb_service *svc = data; \ + struct dma_test *dt = tb_service_get_drvdata(svc); \ + int ret; \ + \ + ret = mutex_lock_interruptible(&dt->lock); \ + if (ret) \ + return ret; \ + __get(dt, val); \ + mutex_unlock(&dt->lock); \ + return 0; \ +} \ +static int __fops ## _store(void *data, u64 val) \ +{ \ + struct tb_service *svc = data; \ + struct dma_test *dt = tb_service_get_drvdata(svc); \ + int ret; \ + \ + ret = __validate(val); \ + if (ret) \ + return ret; \ + ret = mutex_lock_interruptible(&dt->lock); \ + if (ret) \ + return ret; \ + __set(dt, val); \ + mutex_unlock(&dt->lock); \ + return 0; \ +} \ +DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \ + __fops ## _store, "%llu\n") + +static void lanes_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->link_width; +} + +static int lanes_validate(u64 val) +{ + return val > 2 ? -EINVAL : 0; +} + +static void lanes_set(struct dma_test *dt, u64 val) +{ + dt->link_width = val; +} +DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set); + +static void speed_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->link_speed; +} + +static int speed_validate(u64 val) +{ + switch (val) { + case 40: + case 20: + case 10: + case 0: + return 0; + default: + return -EINVAL; + } +} + +static void speed_set(struct dma_test *dt, u64 val) +{ + dt->link_speed = val; +} +DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set); + +static void packets_to_receive_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->packets_to_receive; +} + +static int packets_to_receive_validate(u64 val) +{ + return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; +} + +static void packets_to_receive_set(struct dma_test *dt, u64 val) +{ + dt->packets_to_receive = val; +} +DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get, + packets_to_receive_validate, packets_to_receive_set); + +static void packets_to_send_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->packets_to_send; +} + +static int packets_to_send_validate(u64 val) +{ + return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; +} + +static void packets_to_send_set(struct dma_test *dt, u64 val) +{ + dt->packets_to_send = val; +} +DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get, + packets_to_send_validate, packets_to_send_set); + +static int dma_test_set_bonding(struct dma_test *dt) +{ + switch (dt->link_width) { + case 2: + return tb_xdomain_lane_bonding_enable(dt->xd); + case 1: + tb_xdomain_lane_bonding_disable(dt->xd); + fallthrough; + default: + return 0; + } +} + +static bool dma_test_validate_config(struct dma_test *dt) +{ + if (!dt->packets_to_send && !dt->packets_to_receive) + return false; + if (dt->packets_to_send && dt->packets_to_receive && + dt->packets_to_send != dt->packets_to_receive) + return false; + return true; +} + +static void dma_test_check_errors(struct dma_test *dt, int ret) +{ + if (!dt->error_code) { + if (dt->link_speed && dt->xd->link_speed != dt->link_speed) { + dt->error_code = DMA_TEST_SPEED_ERROR; + } else if (dt->link_width) { + const struct tb_xdomain *xd = dt->xd; + + if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) || + (dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL)) + dt->error_code = DMA_TEST_WIDTH_ERROR; + } else if (dt->packets_to_send != dt->packets_sent || + dt->packets_to_receive != dt->packets_received || + dt->crc_errors || dt->buffer_overflow_errors) { + dt->error_code = DMA_TEST_PACKET_ERROR; + } else { + return; + } + } + + dt->result = DMA_TEST_FAIL; +} + +static int test_store(void *data, u64 val) +{ + struct tb_service *svc = data; + struct dma_test *dt = tb_service_get_drvdata(svc); + int ret; + + if (val != 1) + return -EINVAL; + + ret = mutex_lock_interruptible(&dt->lock); + if (ret) + return ret; + + dt->packets_sent = 0; + dt->packets_received = 0; + dt->crc_errors = 0; + dt->buffer_overflow_errors = 0; + dt->result = DMA_TEST_SUCCESS; + dt->error_code = DMA_TEST_NO_ERROR; + + dev_dbg(&svc->dev, "DMA test starting\n"); + if (dt->link_speed) + dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed); + if (dt->link_width) + dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width); + dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send); + dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive); + + if (!dma_test_validate_config(dt)) { + dev_err(&svc->dev, "invalid test configuration\n"); + dt->error_code = DMA_TEST_CONFIG_ERROR; + goto out_unlock; + } + + ret = dma_test_set_bonding(dt); + if (ret) { + dev_err(&svc->dev, "failed to set lanes\n"); + dt->error_code = DMA_TEST_BONDING_ERROR; + goto out_unlock; + } + + ret = dma_test_start_rings(dt); + if (ret) { + dev_err(&svc->dev, "failed to enable DMA rings\n"); + dt->error_code = DMA_TEST_DMA_ERROR; + goto out_unlock; + } + + if (dt->packets_to_receive) { + reinit_completion(&dt->complete); + ret = dma_test_submit_rx(dt, dt->packets_to_receive); + if (ret) { + dev_err(&svc->dev, "failed to submit receive buffers\n"); + dt->error_code = DMA_TEST_BUFFER_ERROR; + goto out_stop; + } + } + + if (dt->packets_to_send) { + ret = dma_test_submit_tx(dt, dt->packets_to_send); + if (ret) { + dev_err(&svc->dev, "failed to submit transmit buffers\n"); + dt->error_code = DMA_TEST_BUFFER_ERROR; + goto out_stop; + } + } + + if (dt->packets_to_receive) { + ret = wait_for_completion_interruptible(&dt->complete); + if (ret) { + dt->error_code = DMA_TEST_INTERRUPTED; + goto out_stop; + } + } + +out_stop: + dma_test_stop_rings(dt); +out_unlock: + dma_test_check_errors(dt, ret); + mutex_unlock(&dt->lock); + + dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]); + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n"); + +static int status_show(struct seq_file *s, void *not_used) +{ + struct tb_service *svc = s->private; + struct dma_test *dt = tb_service_get_drvdata(svc); + int ret; + + ret = mutex_lock_interruptible(&dt->lock); + if (ret) + return ret; + + seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]); + if (dt->result == DMA_TEST_NOT_RUN) + goto out_unlock; + + seq_printf(s, "packets received: %u\n", dt->packets_received); + seq_printf(s, "packets sent: %u\n", dt->packets_sent); + seq_printf(s, "CRC errors: %u\n", dt->crc_errors); + seq_printf(s, "buffer overflow errors: %u\n", + dt->buffer_overflow_errors); + seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]); + +out_unlock: + mutex_unlock(&dt->lock); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(status); + +static void dma_test_debugfs_init(struct tb_service *svc) +{ + struct dma_test *dt = tb_service_get_drvdata(svc); + + dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir); + + debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops); + debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops); + debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc, + &packets_to_receive_fops); + debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc, + &packets_to_send_fops); + debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops); + debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops); +} + +static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id) +{ + struct tb_xdomain *xd = tb_service_parent(svc); + struct dma_test *dt; + + dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL); + if (!dt) + return -ENOMEM; + + dt->svc = svc; + dt->xd = xd; + mutex_init(&dt->lock); + init_completion(&dt->complete); + + tb_service_set_drvdata(svc, dt); + dma_test_debugfs_init(svc); + + return 0; +} + +static void dma_test_remove(struct tb_service *svc) +{ + struct dma_test *dt = tb_service_get_drvdata(svc); + + mutex_lock(&dt->lock); + debugfs_remove_recursive(dt->debugfs_dir); + mutex_unlock(&dt->lock); +} + +static int __maybe_unused dma_test_suspend(struct device *dev) +{ + /* + * No need to do anything special here. If userspace is writing + * to the test attribute when suspend started, it comes out from + * wait_for_completion_interruptible() with -ERESTARTSYS and the + * DMA test fails tearing down the rings. Once userspace is + * thawed the kernel restarts the write syscall effectively + * re-running the test. + */ + return 0; +} + +static int __maybe_unused dma_test_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops dma_test_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume) +}; + +static const struct tb_service_id dma_test_ids[] = { + { TB_SERVICE("dma_test", 1) }, + { }, +}; +MODULE_DEVICE_TABLE(tbsvc, dma_test_ids); + +static struct tb_service_driver dma_test_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "thunderbolt_dma_test", + .pm = &dma_test_pm_ops, + }, + .probe = dma_test_probe, + .remove = dma_test_remove, + .id_table = dma_test_ids, +}; + +static int __init dma_test_init(void) +{ + u64 data_value = DMA_TEST_DATA_PATTERN; + int i, ret; + + dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!dma_test_pattern) + return -ENOMEM; + + for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++) + ((u32 *)dma_test_pattern)[i] = data_value++; + + dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid); + if (!dma_test_dir) { + ret = -ENOMEM; + goto err_free_pattern; + } + + tb_property_add_immediate(dma_test_dir, "prtcid", 1); + tb_property_add_immediate(dma_test_dir, "prtcvers", 1); + tb_property_add_immediate(dma_test_dir, "prtcrevs", 0); + tb_property_add_immediate(dma_test_dir, "prtcstns", 0); + + ret = tb_register_property_dir("dma_test", dma_test_dir); + if (ret) + goto err_free_dir; + + ret = tb_register_service_driver(&dma_test_driver); + if (ret) + goto err_unregister_dir; + + return 0; + +err_unregister_dir: + tb_unregister_property_dir("dma_test", dma_test_dir); +err_free_dir: + tb_property_free_dir(dma_test_dir); +err_free_pattern: + kfree(dma_test_pattern); + + return ret; +} +module_init(dma_test_init); + +static void __exit dma_test_exit(void) +{ + tb_unregister_service_driver(&dma_test_driver); + tb_unregister_property_dir("dma_test", dma_test_dir); + tb_property_free_dir(dma_test_dir); + kfree(dma_test_pattern); +} +module_exit(dma_test_exit); + +MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c new file mode 100644 index 0000000000..ec7b5f6580 --- /dev/null +++ b/drivers/thunderbolt/domain.c @@ -0,0 +1,902 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt bus support + * + * Copyright (C) 2017, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/device.h> +#include <linux/idr.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <crypto/hash.h> + +#include "tb.h" + +static DEFINE_IDA(tb_domain_ida); + +static bool match_service_id(const struct tb_service_id *id, + const struct tb_service *svc) +{ + if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) { + if (strcmp(id->protocol_key, svc->key)) + return false; + } + + if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) { + if (id->protocol_id != svc->prtcid) + return false; + } + + if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { + if (id->protocol_version != svc->prtcvers) + return false; + } + + if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) { + if (id->protocol_revision != svc->prtcrevs) + return false; + } + + return true; +} + +static const struct tb_service_id *__tb_service_match(struct device *dev, + struct device_driver *drv) +{ + struct tb_service_driver *driver; + const struct tb_service_id *ids; + struct tb_service *svc; + + svc = tb_to_service(dev); + if (!svc) + return NULL; + + driver = container_of(drv, struct tb_service_driver, driver); + if (!driver->id_table) + return NULL; + + for (ids = driver->id_table; ids->match_flags != 0; ids++) { + if (match_service_id(ids, svc)) + return ids; + } + + return NULL; +} + +static int tb_service_match(struct device *dev, struct device_driver *drv) +{ + return !!__tb_service_match(dev, drv); +} + +static int tb_service_probe(struct device *dev) +{ + struct tb_service *svc = tb_to_service(dev); + struct tb_service_driver *driver; + const struct tb_service_id *id; + + driver = container_of(dev->driver, struct tb_service_driver, driver); + id = __tb_service_match(dev, &driver->driver); + + return driver->probe(svc, id); +} + +static void tb_service_remove(struct device *dev) +{ + struct tb_service *svc = tb_to_service(dev); + struct tb_service_driver *driver; + + driver = container_of(dev->driver, struct tb_service_driver, driver); + if (driver->remove) + driver->remove(svc); +} + +static void tb_service_shutdown(struct device *dev) +{ + struct tb_service_driver *driver; + struct tb_service *svc; + + svc = tb_to_service(dev); + if (!svc || !dev->driver) + return; + + driver = container_of(dev->driver, struct tb_service_driver, driver); + if (driver->shutdown) + driver->shutdown(svc); +} + +static const char * const tb_security_names[] = { + [TB_SECURITY_NONE] = "none", + [TB_SECURITY_USER] = "user", + [TB_SECURITY_SECURE] = "secure", + [TB_SECURITY_DPONLY] = "dponly", + [TB_SECURITY_USBONLY] = "usbonly", + [TB_SECURITY_NOPCIE] = "nopcie", +}; + +static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb *tb = container_of(dev, struct tb, dev); + uuid_t *uuids; + ssize_t ret; + int i; + + uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); + if (!uuids) + return -ENOMEM; + + pm_runtime_get_sync(&tb->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl); + if (ret) { + mutex_unlock(&tb->lock); + goto out; + } + mutex_unlock(&tb->lock); + + for (ret = 0, i = 0; i < tb->nboot_acl; i++) { + if (!uuid_is_null(&uuids[i])) + ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]); + + ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); + } + +out: + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + kfree(uuids); + + return ret; +} + +static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct tb *tb = container_of(dev, struct tb, dev); + char *str, *s, *uuid_str; + ssize_t ret = 0; + uuid_t *acl; + int i = 0; + + /* + * Make sure the value is not bigger than tb->nboot_acl * UUID + * length + commas and optional "\n". Also the smallest allowable + * string is tb->nboot_acl * ",". + */ + if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1) + return -EINVAL; + if (count < tb->nboot_acl - 1) + return -EINVAL; + + str = kstrdup(buf, GFP_KERNEL); + if (!str) + return -ENOMEM; + + acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL); + if (!acl) { + ret = -ENOMEM; + goto err_free_str; + } + + uuid_str = strim(str); + while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) { + size_t len = strlen(s); + + if (len) { + if (len != UUID_STRING_LEN) { + ret = -EINVAL; + goto err_free_acl; + } + ret = uuid_parse(s, &acl[i]); + if (ret) + goto err_free_acl; + } + + i++; + } + + if (s || i < tb->nboot_acl) { + ret = -EINVAL; + goto err_free_acl; + } + + pm_runtime_get_sync(&tb->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto err_rpm_put; + } + ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); + if (!ret) { + /* Notify userspace about the change */ + kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE); + } + mutex_unlock(&tb->lock); + +err_rpm_put: + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); +err_free_acl: + kfree(acl); +err_free_str: + kfree(str); + + return ret ?: count; +} +static DEVICE_ATTR_RW(boot_acl); + +static ssize_t deauthorization_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + const struct tb *tb = container_of(dev, struct tb, dev); + bool deauthorization = false; + + /* Only meaningful if authorization is supported */ + if (tb->security_level == TB_SECURITY_USER || + tb->security_level == TB_SECURITY_SECURE) + deauthorization = !!tb->cm_ops->disapprove_switch; + + return sysfs_emit(buf, "%d\n", deauthorization); +} +static DEVICE_ATTR_RO(deauthorization); + +static ssize_t iommu_dma_protection_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tb *tb = container_of(dev, struct tb, dev); + + return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection); +} +static DEVICE_ATTR_RO(iommu_dma_protection); + +static ssize_t security_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb *tb = container_of(dev, struct tb, dev); + const char *name = "unknown"; + + if (tb->security_level < ARRAY_SIZE(tb_security_names)) + name = tb_security_names[tb->security_level]; + + return sysfs_emit(buf, "%s\n", name); +} +static DEVICE_ATTR_RO(security); + +static struct attribute *domain_attrs[] = { + &dev_attr_boot_acl.attr, + &dev_attr_deauthorization.attr, + &dev_attr_iommu_dma_protection.attr, + &dev_attr_security.attr, + NULL, +}; + +static umode_t domain_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct tb *tb = container_of(dev, struct tb, dev); + + if (attr == &dev_attr_boot_acl.attr) { + if (tb->nboot_acl && + tb->cm_ops->get_boot_acl && + tb->cm_ops->set_boot_acl) + return attr->mode; + return 0; + } + + return attr->mode; +} + +static const struct attribute_group domain_attr_group = { + .is_visible = domain_attr_is_visible, + .attrs = domain_attrs, +}; + +static const struct attribute_group *domain_attr_groups[] = { + &domain_attr_group, + NULL, +}; + +struct bus_type tb_bus_type = { + .name = "thunderbolt", + .match = tb_service_match, + .probe = tb_service_probe, + .remove = tb_service_remove, + .shutdown = tb_service_shutdown, +}; + +static void tb_domain_release(struct device *dev) +{ + struct tb *tb = container_of(dev, struct tb, dev); + + tb_ctl_free(tb->ctl); + destroy_workqueue(tb->wq); + ida_simple_remove(&tb_domain_ida, tb->index); + mutex_destroy(&tb->lock); + kfree(tb); +} + +struct device_type tb_domain_type = { + .name = "thunderbolt_domain", + .release = tb_domain_release, +}; + +static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, + const void *buf, size_t size) +{ + struct tb *tb = data; + + if (!tb->cm_ops->handle_event) { + tb_warn(tb, "domain does not have event handler\n"); + return true; + } + + switch (type) { + case TB_CFG_PKG_XDOMAIN_REQ: + case TB_CFG_PKG_XDOMAIN_RESP: + if (tb_is_xdomain_enabled()) + return tb_xdomain_handle_request(tb, type, buf, size); + break; + + default: + tb->cm_ops->handle_event(tb, type, buf, size); + } + + return true; +} + +/** + * tb_domain_alloc() - Allocate a domain + * @nhi: Pointer to the host controller + * @timeout_msec: Control channel timeout for non-raw messages + * @privsize: Size of the connection manager private data + * + * Allocates and initializes a new Thunderbolt domain. Connection + * managers are expected to call this and then fill in @cm_ops + * accordingly. + * + * Call tb_domain_put() to release the domain before it has been added + * to the system. + * + * Return: allocated domain structure on %NULL in case of error + */ +struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize) +{ + struct tb *tb; + + /* + * Make sure the structure sizes map with that the hardware + * expects because bit-fields are being used. + */ + BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); + BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4); + BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4); + + tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL); + if (!tb) + return NULL; + + tb->nhi = nhi; + mutex_init(&tb->lock); + + tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL); + if (tb->index < 0) + goto err_free; + + tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); + if (!tb->wq) + goto err_remove_ida; + + tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb); + if (!tb->ctl) + goto err_destroy_wq; + + tb->dev.parent = &nhi->pdev->dev; + tb->dev.bus = &tb_bus_type; + tb->dev.type = &tb_domain_type; + tb->dev.groups = domain_attr_groups; + dev_set_name(&tb->dev, "domain%d", tb->index); + device_initialize(&tb->dev); + + return tb; + +err_destroy_wq: + destroy_workqueue(tb->wq); +err_remove_ida: + ida_simple_remove(&tb_domain_ida, tb->index); +err_free: + kfree(tb); + + return NULL; +} + +/** + * tb_domain_add() - Add domain to the system + * @tb: Domain to add + * + * Starts the domain and adds it to the system. Hotplugging devices will + * work after this has been returned successfully. In order to remove + * and release the domain after this function has been called, call + * tb_domain_remove(). + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_domain_add(struct tb *tb) +{ + int ret; + + if (WARN_ON(!tb->cm_ops)) + return -EINVAL; + + mutex_lock(&tb->lock); + /* + * tb_schedule_hotplug_handler may be called as soon as the config + * channel is started. Thats why we have to hold the lock here. + */ + tb_ctl_start(tb->ctl); + + if (tb->cm_ops->driver_ready) { + ret = tb->cm_ops->driver_ready(tb); + if (ret) + goto err_ctl_stop; + } + + tb_dbg(tb, "security level set to %s\n", + tb_security_names[tb->security_level]); + + ret = device_add(&tb->dev); + if (ret) + goto err_ctl_stop; + + /* Start the domain */ + if (tb->cm_ops->start) { + ret = tb->cm_ops->start(tb); + if (ret) + goto err_domain_del; + } + + /* This starts event processing */ + mutex_unlock(&tb->lock); + + device_init_wakeup(&tb->dev, true); + + pm_runtime_no_callbacks(&tb->dev); + pm_runtime_set_active(&tb->dev); + pm_runtime_enable(&tb->dev); + pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_use_autosuspend(&tb->dev); + + return 0; + +err_domain_del: + device_del(&tb->dev); +err_ctl_stop: + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + return ret; +} + +/** + * tb_domain_remove() - Removes and releases a domain + * @tb: Domain to remove + * + * Stops the domain, removes it from the system and releases all + * resources once the last reference has been released. + */ +void tb_domain_remove(struct tb *tb) +{ + mutex_lock(&tb->lock); + if (tb->cm_ops->stop) + tb->cm_ops->stop(tb); + /* Stop the domain control traffic */ + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + flush_workqueue(tb->wq); + device_unregister(&tb->dev); +} + +/** + * tb_domain_suspend_noirq() - Suspend a domain + * @tb: Domain to suspend + * + * Suspends all devices in the domain and stops the control channel. + */ +int tb_domain_suspend_noirq(struct tb *tb) +{ + int ret = 0; + + /* + * The control channel interrupt is left enabled during suspend + * and taking the lock here prevents any events happening before + * we actually have stopped the domain and the control channel. + */ + mutex_lock(&tb->lock); + if (tb->cm_ops->suspend_noirq) + ret = tb->cm_ops->suspend_noirq(tb); + if (!ret) + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + return ret; +} + +/** + * tb_domain_resume_noirq() - Resume a domain + * @tb: Domain to resume + * + * Re-starts the control channel, and resumes all devices connected to + * the domain. + */ +int tb_domain_resume_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + tb_ctl_start(tb->ctl); + if (tb->cm_ops->resume_noirq) + ret = tb->cm_ops->resume_noirq(tb); + mutex_unlock(&tb->lock); + + return ret; +} + +int tb_domain_suspend(struct tb *tb) +{ + return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; +} + +int tb_domain_freeze_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + if (tb->cm_ops->freeze_noirq) + ret = tb->cm_ops->freeze_noirq(tb); + if (!ret) + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + return ret; +} + +int tb_domain_thaw_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + tb_ctl_start(tb->ctl); + if (tb->cm_ops->thaw_noirq) + ret = tb->cm_ops->thaw_noirq(tb); + mutex_unlock(&tb->lock); + + return ret; +} + +void tb_domain_complete(struct tb *tb) +{ + if (tb->cm_ops->complete) + tb->cm_ops->complete(tb); +} + +int tb_domain_runtime_suspend(struct tb *tb) +{ + if (tb->cm_ops->runtime_suspend) { + int ret = tb->cm_ops->runtime_suspend(tb); + if (ret) + return ret; + } + tb_ctl_stop(tb->ctl); + return 0; +} + +int tb_domain_runtime_resume(struct tb *tb) +{ + tb_ctl_start(tb->ctl); + if (tb->cm_ops->runtime_resume) { + int ret = tb->cm_ops->runtime_resume(tb); + if (ret) + return ret; + } + return 0; +} + +/** + * tb_domain_disapprove_switch() - Disapprove switch + * @tb: Domain the switch belongs to + * @sw: Switch to disapprove + * + * This will disconnect PCIe tunnel from parent to this @sw. + * + * Return: %0 on success and negative errno in case of failure. + */ +int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw) +{ + if (!tb->cm_ops->disapprove_switch) + return -EPERM; + + return tb->cm_ops->disapprove_switch(tb, sw); +} + +/** + * tb_domain_approve_switch() - Approve switch + * @tb: Domain the switch belongs to + * @sw: Switch to approve + * + * This will approve switch by connection manager specific means. In + * case of success the connection manager will create PCIe tunnel from + * parent to @sw. + */ +int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) +{ + struct tb_switch *parent_sw; + + if (!tb->cm_ops->approve_switch) + return -EPERM; + + /* The parent switch must be authorized before this one */ + parent_sw = tb_to_switch(sw->dev.parent); + if (!parent_sw || !parent_sw->authorized) + return -EINVAL; + + return tb->cm_ops->approve_switch(tb, sw); +} + +/** + * tb_domain_approve_switch_key() - Approve switch and add key + * @tb: Domain the switch belongs to + * @sw: Switch to approve + * + * For switches that support secure connect, this function first adds + * key to the switch NVM using connection manager specific means. If + * adding the key is successful, the switch is approved and connected. + * + * Return: %0 on success and negative errno in case of failure. + */ +int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw) +{ + struct tb_switch *parent_sw; + int ret; + + if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key) + return -EPERM; + + /* The parent switch must be authorized before this one */ + parent_sw = tb_to_switch(sw->dev.parent); + if (!parent_sw || !parent_sw->authorized) + return -EINVAL; + + ret = tb->cm_ops->add_switch_key(tb, sw); + if (ret) + return ret; + + return tb->cm_ops->approve_switch(tb, sw); +} + +/** + * tb_domain_challenge_switch_key() - Challenge and approve switch + * @tb: Domain the switch belongs to + * @sw: Switch to approve + * + * For switches that support secure connect, this function generates + * random challenge and sends it to the switch. The switch responds to + * this and if the response matches our random challenge, the switch is + * approved and connected. + * + * Return: %0 on success and negative errno in case of failure. + */ +int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) +{ + u8 challenge[TB_SWITCH_KEY_SIZE]; + u8 response[TB_SWITCH_KEY_SIZE]; + u8 hmac[TB_SWITCH_KEY_SIZE]; + struct tb_switch *parent_sw; + struct crypto_shash *tfm; + struct shash_desc *shash; + int ret; + + if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key) + return -EPERM; + + /* The parent switch must be authorized before this one */ + parent_sw = tb_to_switch(sw->dev.parent); + if (!parent_sw || !parent_sw->authorized) + return -EINVAL; + + get_random_bytes(challenge, sizeof(challenge)); + ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); + if (ret) + return ret; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); + if (ret) + goto err_free_tfm; + + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!shash) { + ret = -ENOMEM; + goto err_free_tfm; + } + + shash->tfm = tfm; + + memset(hmac, 0, sizeof(hmac)); + ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac); + if (ret) + goto err_free_shash; + + /* The returned HMAC must match the one we calculated */ + if (memcmp(response, hmac, sizeof(hmac))) { + ret = -EKEYREJECTED; + goto err_free_shash; + } + + crypto_free_shash(tfm); + kfree(shash); + + return tb->cm_ops->approve_switch(tb, sw); + +err_free_shash: + kfree(shash); +err_free_tfm: + crypto_free_shash(tfm); + + return ret; +} + +/** + * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths + * @tb: Domain whose PCIe paths to disconnect + * + * This needs to be called in preparation for NVM upgrade of the host + * controller. Makes sure all PCIe paths are disconnected. + * + * Return %0 on success and negative errno in case of error. + */ +int tb_domain_disconnect_pcie_paths(struct tb *tb) +{ + if (!tb->cm_ops->disconnect_pcie_paths) + return -EPERM; + + return tb->cm_ops->disconnect_pcie_paths(tb); +} + +/** + * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain + * @tb: Domain enabling the DMA paths + * @xd: XDomain DMA paths are created to + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path + * + * Calls connection manager specific method to enable DMA paths to the + * XDomain in question. + * + * Return: 0% in case of success and negative errno otherwise. In + * particular returns %-ENOTSUPP if the connection manager + * implementation does not support XDomains. + */ +int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + if (!tb->cm_ops->approve_xdomain_paths) + return -ENOTSUPP; + + return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, receive_ring); +} + +/** + * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain + * @tb: Domain disabling the DMA paths + * @xd: XDomain whose DMA paths are disconnected + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path + * + * Calls connection manager specific method to disconnect DMA paths to + * the XDomain in question. + * + * Return: 0% in case of success and negative errno otherwise. In + * particular returns %-ENOTSUPP if the connection manager + * implementation does not support XDomains. + */ +int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + if (!tb->cm_ops->disconnect_xdomain_paths) + return -ENOTSUPP; + + return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, receive_ring); +} + +static int disconnect_xdomain(struct device *dev, void *data) +{ + struct tb_xdomain *xd; + struct tb *tb = data; + int ret = 0; + + xd = tb_to_xdomain(dev); + if (xd && xd->tb == tb) + ret = tb_xdomain_disable_all_paths(xd); + + return ret; +} + +/** + * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain + * @tb: Domain whose paths are disconnected + * + * This function can be used to disconnect all paths (PCIe, XDomain) for + * example in preparation for host NVM firmware upgrade. After this is + * called the paths cannot be established without resetting the switch. + * + * Return: %0 in case of success and negative errno otherwise. + */ +int tb_domain_disconnect_all_paths(struct tb *tb) +{ + int ret; + + ret = tb_domain_disconnect_pcie_paths(tb); + if (ret) + return ret; + + return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain); +} + +int tb_domain_init(void) +{ + int ret; + + tb_debugfs_init(); + tb_acpi_init(); + + ret = tb_xdomain_init(); + if (ret) + goto err_acpi; + ret = bus_register(&tb_bus_type); + if (ret) + goto err_xdomain; + + return 0; + +err_xdomain: + tb_xdomain_exit(); +err_acpi: + tb_acpi_exit(); + tb_debugfs_exit(); + + return ret; +} + +void tb_domain_exit(void) +{ + bus_unregister(&tb_bus_type); + ida_destroy(&tb_domain_ida); + tb_nvm_exit(); + tb_xdomain_exit(); + tb_acpi_exit(); + tb_debugfs_exit(); +} diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c new file mode 100644 index 0000000000..eb241b270f --- /dev/null +++ b/drivers/thunderbolt/eeprom.c @@ -0,0 +1,712 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - eeprom access + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/property.h> +#include <linux/slab.h> +#include "tb.h" + +/* + * tb_eeprom_ctl_write() - write control word + */ +static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) +{ + return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1); +} + +/* + * tb_eeprom_ctl_write() - read control word + */ +static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) +{ + return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1); +} + +enum tb_eeprom_transfer { + TB_EEPROM_IN, + TB_EEPROM_OUT, +}; + +/* + * tb_eeprom_active - enable rom access + * + * WARNING: Always disable access after usage. Otherwise the controller will + * fail to reprobe. + */ +static int tb_eeprom_active(struct tb_switch *sw, bool enable) +{ + struct tb_eeprom_ctl ctl; + int res = tb_eeprom_ctl_read(sw, &ctl); + if (res) + return res; + if (enable) { + ctl.bit_banging_enable = 1; + res = tb_eeprom_ctl_write(sw, &ctl); + if (res) + return res; + ctl.fl_cs = 0; + return tb_eeprom_ctl_write(sw, &ctl); + } else { + ctl.fl_cs = 1; + res = tb_eeprom_ctl_write(sw, &ctl); + if (res) + return res; + ctl.bit_banging_enable = 0; + return tb_eeprom_ctl_write(sw, &ctl); + } +} + +/* + * tb_eeprom_transfer - transfer one bit + * + * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->fl_do. + * If TB_EEPROM_OUT is passed, then ctl->fl_di will be written. + */ +static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, + enum tb_eeprom_transfer direction) +{ + int res; + if (direction == TB_EEPROM_OUT) { + res = tb_eeprom_ctl_write(sw, ctl); + if (res) + return res; + } + ctl->fl_sk = 1; + res = tb_eeprom_ctl_write(sw, ctl); + if (res) + return res; + if (direction == TB_EEPROM_IN) { + res = tb_eeprom_ctl_read(sw, ctl); + if (res) + return res; + } + ctl->fl_sk = 0; + return tb_eeprom_ctl_write(sw, ctl); +} + +/* + * tb_eeprom_out - write one byte to the bus + */ +static int tb_eeprom_out(struct tb_switch *sw, u8 val) +{ + struct tb_eeprom_ctl ctl; + int i; + int res = tb_eeprom_ctl_read(sw, &ctl); + if (res) + return res; + for (i = 0; i < 8; i++) { + ctl.fl_di = val & 0x80; + res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT); + if (res) + return res; + val <<= 1; + } + return 0; +} + +/* + * tb_eeprom_in - read one byte from the bus + */ +static int tb_eeprom_in(struct tb_switch *sw, u8 *val) +{ + struct tb_eeprom_ctl ctl; + int i; + int res = tb_eeprom_ctl_read(sw, &ctl); + if (res) + return res; + *val = 0; + for (i = 0; i < 8; i++) { + *val <<= 1; + res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN); + if (res) + return res; + *val |= ctl.fl_do; + } + return 0; +} + +/* + * tb_eeprom_get_drom_offset - get drom offset within eeprom + */ +static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) +{ + struct tb_cap_plug_events cap; + int res; + + if (!sw->cap_plug_events) { + tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); + return -ENODEV; + } + res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, + sizeof(cap) / 4); + if (res) + return res; + + if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { + tb_sw_warn(sw, "no NVM\n"); + return -ENODEV; + } + + if (cap.drom_offset > 0xffff) { + tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", + cap.drom_offset); + return -ENXIO; + } + *offset = cap.drom_offset; + return 0; +} + +/* + * tb_eeprom_read_n - read count bytes from offset into val + */ +static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, + size_t count) +{ + u16 drom_offset; + int i, res; + + res = tb_eeprom_get_drom_offset(sw, &drom_offset); + if (res) + return res; + + offset += drom_offset; + + res = tb_eeprom_active(sw, true); + if (res) + return res; + res = tb_eeprom_out(sw, 3); + if (res) + return res; + res = tb_eeprom_out(sw, offset >> 8); + if (res) + return res; + res = tb_eeprom_out(sw, offset); + if (res) + return res; + for (i = 0; i < count; i++) { + res = tb_eeprom_in(sw, val + i); + if (res) + return res; + } + return tb_eeprom_active(sw, false); +} + +static u8 tb_crc8(u8 *data, int len) +{ + int i, j; + u8 val = 0xff; + for (i = 0; i < len; i++) { + val ^= data[i]; + for (j = 0; j < 8; j++) + val = (val << 1) ^ ((val & 0x80) ? 7 : 0); + } + return val; +} + +static u32 tb_crc32(void *data, size_t len) +{ + return ~__crc32c_le(~0, data, len); +} + +#define TB_DROM_DATA_START 13 +#define TB_DROM_HEADER_SIZE 22 +#define USB4_DROM_HEADER_SIZE 16 + +struct tb_drom_header { + /* BYTE 0 */ + u8 uid_crc8; /* checksum for uid */ + /* BYTES 1-8 */ + u64 uid; + /* BYTES 9-12 */ + u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */ + /* BYTE 13 */ + u8 device_rom_revision; /* should be <= 1 */ + u16 data_len:12; + u8 reserved:4; + /* BYTES 16-21 - Only for TBT DROM, nonexistent in USB4 DROM */ + u16 vendor_id; + u16 model_id; + u8 model_rev; + u8 eeprom_rev; +} __packed; + +enum tb_drom_entry_type { + /* force unsigned to prevent "one-bit signed bitfield" warning */ + TB_DROM_ENTRY_GENERIC = 0U, + TB_DROM_ENTRY_PORT, +}; + +struct tb_drom_entry_header { + u8 len; + u8 index:6; + bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */ + enum tb_drom_entry_type type:1; +} __packed; + +struct tb_drom_entry_generic { + struct tb_drom_entry_header header; + u8 data[]; +} __packed; + +struct tb_drom_entry_port { + /* BYTES 0-1 */ + struct tb_drom_entry_header header; + /* BYTE 2 */ + u8 dual_link_port_rid:4; + u8 link_nr:1; + u8 unknown1:2; + bool has_dual_link_port:1; + + /* BYTE 3 */ + u8 dual_link_port_nr:6; + u8 unknown2:2; + + /* BYTES 4 - 5 TODO decode */ + u8 micro2:4; + u8 micro1:4; + u8 micro3; + + /* BYTES 6-7, TODO: verify (find hardware that has these set) */ + u8 peer_port_rid:4; + u8 unknown3:3; + bool has_peer_port:1; + u8 peer_port_nr:6; + u8 unknown4:2; +} __packed; + +/* USB4 product descriptor */ +struct tb_drom_entry_desc { + struct tb_drom_entry_header header; + u16 bcdUSBSpec; + u16 idVendor; + u16 idProduct; + u16 bcdProductFWRevision; + u32 TID; + u8 productHWRevision; +}; + +/** + * tb_drom_read_uid_only() - Read UID directly from DROM + * @sw: Router whose UID to read + * @uid: UID is placed here + * + * Does not use the cached copy in sw->drom. Used during resume to check switch + * identity. + */ +int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid) +{ + u8 data[9]; + u8 crc; + int res; + + /* read uid */ + res = tb_eeprom_read_n(sw, 0, data, 9); + if (res) + return res; + + crc = tb_crc8(data + 1, 8); + if (crc != data[0]) { + tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n", + data[0], crc); + return -EIO; + } + + *uid = *(u64 *)(data+1); + return 0; +} + +static int tb_drom_parse_entry_generic(struct tb_switch *sw, + struct tb_drom_entry_header *header) +{ + const struct tb_drom_entry_generic *entry = + (const struct tb_drom_entry_generic *)header; + + switch (header->index) { + case 1: + /* Length includes 2 bytes header so remove it before copy */ + sw->vendor_name = kstrndup(entry->data, + header->len - sizeof(*header), GFP_KERNEL); + if (!sw->vendor_name) + return -ENOMEM; + break; + + case 2: + sw->device_name = kstrndup(entry->data, + header->len - sizeof(*header), GFP_KERNEL); + if (!sw->device_name) + return -ENOMEM; + break; + case 9: { + const struct tb_drom_entry_desc *desc = + (const struct tb_drom_entry_desc *)entry; + + if (!sw->vendor && !sw->device) { + sw->vendor = desc->idVendor; + sw->device = desc->idProduct; + } + break; + } + } + + return 0; +} + +static int tb_drom_parse_entry_port(struct tb_switch *sw, + struct tb_drom_entry_header *header) +{ + struct tb_port *port; + int res; + enum tb_port_type type; + + /* + * Some DROMs list more ports than the controller actually has + * so we skip those but allow the parser to continue. + */ + if (header->index > sw->config.max_port_number) { + dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n"); + return 0; + } + + port = &sw->ports[header->index]; + port->disabled = header->port_disabled; + if (port->disabled) + return 0; + + res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1); + if (res) + return res; + type &= 0xffffff; + + if (type == TB_TYPE_PORT) { + struct tb_drom_entry_port *entry = (void *) header; + if (header->len != sizeof(*entry)) { + tb_sw_warn(sw, + "port entry has size %#x (expected %#zx)\n", + header->len, sizeof(struct tb_drom_entry_port)); + return -EIO; + } + port->link_nr = entry->link_nr; + if (entry->has_dual_link_port) + port->dual_link_port = + &port->sw->ports[entry->dual_link_port_nr]; + } + return 0; +} + +/* + * tb_drom_parse_entries - parse the linked list of drom entries + * + * Drom must have been copied to sw->drom. + */ +static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size) +{ + struct tb_drom_header *header = (void *) sw->drom; + u16 pos = header_size; + u16 drom_size = header->data_len + TB_DROM_DATA_START; + int res; + + while (pos < drom_size) { + struct tb_drom_entry_header *entry = (void *) (sw->drom + pos); + if (pos + 1 == drom_size || pos + entry->len > drom_size + || !entry->len) { + tb_sw_warn(sw, "DROM buffer overrun\n"); + return -EIO; + } + + switch (entry->type) { + case TB_DROM_ENTRY_GENERIC: + res = tb_drom_parse_entry_generic(sw, entry); + break; + case TB_DROM_ENTRY_PORT: + res = tb_drom_parse_entry_port(sw, entry); + break; + } + if (res) + return res; + + pos += entry->len; + } + return 0; +} + +/* + * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present + */ +static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size) +{ + struct device *dev = &sw->tb->nhi->pdev->dev; + int len, res; + + len = device_property_count_u8(dev, "ThunderboltDROM"); + if (len < 0 || len < sizeof(struct tb_drom_header)) + return -EINVAL; + + sw->drom = kmalloc(len, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom, + len); + if (res) + goto err; + + *size = ((struct tb_drom_header *)sw->drom)->data_len + + TB_DROM_DATA_START; + if (*size > len) + goto err; + + return 0; + +err: + kfree(sw->drom); + sw->drom = NULL; + return -EINVAL; +} + +static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size) +{ + u16 drom_offset; + int ret; + + if (!sw->dma_port) + return -ENODEV; + + ret = tb_eeprom_get_drom_offset(sw, &drom_offset); + if (ret) + return ret; + + if (!drom_offset) + return -ENODEV; + + ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size, + sizeof(*size)); + if (ret) + return ret; + + /* Size includes CRC8 + UID + CRC32 */ + *size += 1 + 8 + 4; + sw->drom = kzalloc(*size, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size); + if (ret) + goto err_free; + + /* + * Read UID from the minimal DROM because the one in NVM is just + * a placeholder. + */ + tb_drom_read_uid_only(sw, &sw->uid); + return 0; + +err_free: + kfree(sw->drom); + sw->drom = NULL; + return ret; +} + +static int usb4_copy_drom(struct tb_switch *sw, u16 *size) +{ + int ret; + + ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size)); + if (ret) + return ret; + + /* Size includes CRC8 + UID + CRC32 */ + *size += 1 + 8 + 4; + sw->drom = kzalloc(*size, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + ret = usb4_switch_drom_read(sw, 0, sw->drom, *size); + if (ret) { + kfree(sw->drom); + sw->drom = NULL; + } + + return ret; +} + +static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size) +{ + int ret; + + ret = tb_eeprom_read_n(sw, 14, (u8 *)size, 2); + if (ret) + return ret; + + *size &= 0x3ff; + *size += TB_DROM_DATA_START; + + tb_sw_dbg(sw, "reading DROM (length: %#x)\n", *size); + if (*size < sizeof(struct tb_drom_header)) { + tb_sw_warn(sw, "DROM too small, aborting\n"); + return -EIO; + } + + sw->drom = kzalloc(*size, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + ret = tb_eeprom_read_n(sw, 0, sw->drom, *size); + if (ret) + goto err; + + return 0; + +err: + kfree(sw->drom); + sw->drom = NULL; + return ret; +} + +static int tb_drom_parse_v1(struct tb_switch *sw) +{ + const struct tb_drom_header *header = + (const struct tb_drom_header *)sw->drom; + u32 crc; + + crc = tb_crc8((u8 *) &header->uid, 8); + if (crc != header->uid_crc8) { + tb_sw_warn(sw, + "DROM UID CRC8 mismatch (expected: %#x, got: %#x)\n", + header->uid_crc8, crc); + return -EIO; + } + if (!sw->uid) + sw->uid = header->uid; + sw->vendor = header->vendor_id; + sw->device = header->model_id; + + crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); + if (crc != header->data_crc32) { + tb_sw_warn(sw, + "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n", + header->data_crc32, crc); + } + + return tb_drom_parse_entries(sw, TB_DROM_HEADER_SIZE); +} + +static int usb4_drom_parse(struct tb_switch *sw) +{ + const struct tb_drom_header *header = + (const struct tb_drom_header *)sw->drom; + u32 crc; + + crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); + if (crc != header->data_crc32) { + tb_sw_warn(sw, + "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n", + header->data_crc32, crc); + } + + return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE); +} + +static int tb_drom_parse(struct tb_switch *sw, u16 size) +{ + const struct tb_drom_header *header = (const void *)sw->drom; + int ret; + + if (header->data_len + TB_DROM_DATA_START != size) { + tb_sw_warn(sw, "DROM size mismatch\n"); + ret = -EIO; + goto err; + } + + tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision); + + switch (header->device_rom_revision) { + case 3: + ret = usb4_drom_parse(sw); + break; + default: + tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n", + header->device_rom_revision); + fallthrough; + case 1: + ret = tb_drom_parse_v1(sw); + break; + } + + if (ret) { + tb_sw_warn(sw, "parsing DROM failed\n"); + goto err; + } + + return 0; + +err: + kfree(sw->drom); + sw->drom = NULL; + + return ret; +} + +static int tb_drom_host_read(struct tb_switch *sw) +{ + u16 size; + + if (tb_switch_is_usb4(sw)) { + usb4_switch_read_uid(sw, &sw->uid); + if (!usb4_copy_drom(sw, &size)) + return tb_drom_parse(sw, size); + } else { + if (!tb_drom_copy_efi(sw, &size)) + return tb_drom_parse(sw, size); + + if (!tb_drom_copy_nvm(sw, &size)) + return tb_drom_parse(sw, size); + + tb_drom_read_uid_only(sw, &sw->uid); + } + + return 0; +} + +static int tb_drom_device_read(struct tb_switch *sw) +{ + u16 size; + int ret; + + if (tb_switch_is_usb4(sw)) { + usb4_switch_read_uid(sw, &sw->uid); + ret = usb4_copy_drom(sw, &size); + } else { + ret = tb_drom_bit_bang(sw, &size); + } + + if (ret) + return ret; + + return tb_drom_parse(sw, size); +} + +/** + * tb_drom_read() - Copy DROM to sw->drom and parse it + * @sw: Router whose DROM to read and parse + * + * This function reads router DROM and if successful parses the entries and + * populates the fields in @sw accordingly. Can be called for any router + * generation. + * + * Returns %0 in case of success and negative errno otherwise. + */ +int tb_drom_read(struct tb_switch *sw) +{ + if (sw->drom) + return 0; + + if (!tb_route(sw)) + return tb_drom_host_read(sw); + return tb_drom_device_read(sw); +} diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c new file mode 100644 index 0000000000..d8b9c734ab --- /dev/null +++ b/drivers/thunderbolt/icm.c @@ -0,0 +1,2555 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Internal Thunderbolt Connection Manager. This is a firmware running on + * the Thunderbolt host controller performing most of the low-level + * handling. + * + * Copyright (C) 2017, Intel Corporation + * Authors: Michael Jamet <michael.jamet@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/platform_data/x86/apple.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include "ctl.h" +#include "nhi_regs.h" +#include "tb.h" + +#define PCIE2CIO_CMD 0x30 +#define PCIE2CIO_CMD_TIMEOUT BIT(31) +#define PCIE2CIO_CMD_START BIT(30) +#define PCIE2CIO_CMD_WRITE BIT(21) +#define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19) +#define PCIE2CIO_CMD_CS_SHIFT 19 +#define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13) +#define PCIE2CIO_CMD_PORT_SHIFT 13 + +#define PCIE2CIO_WRDATA 0x34 +#define PCIE2CIO_RDDATA 0x38 + +#define PHY_PORT_CS1 0x37 +#define PHY_PORT_CS1_LINK_DISABLE BIT(14) +#define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26) +#define PHY_PORT_CS1_LINK_STATE_SHIFT 26 + +#define ICM_TIMEOUT 5000 /* ms */ +#define ICM_RETRIES 3 +#define ICM_APPROVE_TIMEOUT 10000 /* ms */ +#define ICM_MAX_LINK 4 + +static bool start_icm; +module_param(start_icm, bool, 0444); +MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); + +/** + * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status + * @reply: Reply from ICM firmware is placed here + * @request: Request that is sent to ICM firmware + * @icm: Pointer to ICM private data + */ +struct usb4_switch_nvm_auth { + struct icm_usb4_switch_op_response reply; + struct icm_usb4_switch_op request; + struct icm *icm; +}; + +/** + * struct icm - Internal connection manager private data + * @request_lock: Makes sure only one message is send to ICM at time + * @rescan_work: Work used to rescan the surviving switches after resume + * @upstream_port: Pointer to the PCIe upstream port this host + * controller is connected. This is only set for systems + * where ICM needs to be started manually + * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides + * (only set when @upstream_port is not %NULL) + * @safe_mode: ICM is in safe mode + * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) + * @rpm: Does the controller support runtime PM (RTD3) + * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller + * @proto_version: Firmware protocol version + * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set) + * @veto: Is RTD3 veto in effect + * @is_supported: Checks if we can support ICM on this controller + * @cio_reset: Trigger CIO reset + * @get_mode: Read and return the ICM firmware mode (optional) + * @get_route: Find a route string for given switch + * @save_devices: Ask ICM to save devices to ACL when suspending (optional) + * @driver_ready: Send driver ready message to ICM + * @set_uuid: Set UUID for the root switch (optional) + * @device_connected: Handle device connected ICM message + * @device_disconnected: Handle device disconnected ICM message + * @xdomain_connected: Handle XDomain connected ICM message + * @xdomain_disconnected: Handle XDomain disconnected ICM message + * @rtd3_veto: Handle RTD3 veto notification ICM message + */ +struct icm { + struct mutex request_lock; + struct delayed_work rescan_work; + struct pci_dev *upstream_port; + int vnd_cap; + bool safe_mode; + size_t max_boot_acl; + bool rpm; + bool can_upgrade_nvm; + u8 proto_version; + struct usb4_switch_nvm_auth *last_nvm_auth; + bool veto; + bool (*is_supported)(struct tb *tb); + int (*cio_reset)(struct tb *tb); + int (*get_mode)(struct tb *tb); + int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); + void (*save_devices)(struct tb *tb); + int (*driver_ready)(struct tb *tb, + enum tb_security_level *security_level, + u8 *proto_version, size_t *nboot_acl, bool *rpm); + void (*set_uuid)(struct tb *tb); + void (*device_connected)(struct tb *tb, + const struct icm_pkg_header *hdr); + void (*device_disconnected)(struct tb *tb, + const struct icm_pkg_header *hdr); + void (*xdomain_connected)(struct tb *tb, + const struct icm_pkg_header *hdr); + void (*xdomain_disconnected)(struct tb *tb, + const struct icm_pkg_header *hdr); + void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr); +}; + +struct icm_notification { + struct work_struct work; + struct icm_pkg_header *pkg; + struct tb *tb; +}; + +struct ep_name_entry { + u8 len; + u8 type; + u8 data[]; +}; + +#define EP_NAME_INTEL_VSS 0x10 + +/* Intel Vendor specific structure */ +struct intel_vss { + u16 vendor; + u16 model; + u8 mc; + u8 flags; + u16 pci_devid; + u32 nvm_version; +}; + +#define INTEL_VSS_FLAGS_RTD3 BIT(0) + +static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size) +{ + const void *end = ep_name + size; + + while (ep_name < end) { + const struct ep_name_entry *ep = ep_name; + + if (!ep->len) + break; + if (ep_name + ep->len > end) + break; + + if (ep->type == EP_NAME_INTEL_VSS) + return (const struct intel_vss *)ep->data; + + ep_name += ep->len; + } + + return NULL; +} + +static bool intel_vss_is_rtd3(const void *ep_name, size_t size) +{ + const struct intel_vss *vss; + + vss = parse_intel_vss(ep_name, size); + if (vss) + return !!(vss->flags & INTEL_VSS_FLAGS_RTD3); + + return false; +} + +static inline struct tb *icm_to_tb(struct icm *icm) +{ + return ((void *)icm - sizeof(struct tb)); +} + +static inline u8 phy_port_from_route(u64 route, u8 depth) +{ + u8 link; + + link = depth ? route >> ((depth - 1) * 8) : route; + return tb_phy_port_from_link(link); +} + +static inline u8 dual_link_from_link(u8 link) +{ + return link ? ((link - 1) ^ 0x01) + 1 : 0; +} + +static inline u64 get_route(u32 route_hi, u32 route_lo) +{ + return (u64)route_hi << 32 | route_lo; +} + +static inline u64 get_parent_route(u64 route) +{ + int depth = tb_route_length(route); + return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; +} + +static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec) +{ + unsigned long end = jiffies + msecs_to_jiffies(timeout_msec); + u32 cmd; + + do { + pci_read_config_dword(icm->upstream_port, + icm->vnd_cap + PCIE2CIO_CMD, &cmd); + if (!(cmd & PCIE2CIO_CMD_START)) { + if (cmd & PCIE2CIO_CMD_TIMEOUT) + break; + return 0; + } + + msleep(50); + } while (time_before(jiffies, end)); + + return -ETIMEDOUT; +} + +static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs, + unsigned int port, unsigned int index, u32 *data) +{ + struct pci_dev *pdev = icm->upstream_port; + int ret, vnd_cap = icm->vnd_cap; + u32 cmd; + + cmd = index; + cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; + cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; + cmd |= PCIE2CIO_CMD_START; + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); + + ret = pci2cio_wait_completion(icm, 5000); + if (ret) + return ret; + + pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data); + return 0; +} + +static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs, + unsigned int port, unsigned int index, u32 data) +{ + struct pci_dev *pdev = icm->upstream_port; + int vnd_cap = icm->vnd_cap; + u32 cmd; + + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data); + + cmd = index; + cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK; + cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK; + cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START; + pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd); + + return pci2cio_wait_completion(icm, 5000); +} + +static bool icm_match(const struct tb_cfg_request *req, + const struct ctl_pkg *pkg) +{ + const struct icm_pkg_header *res_hdr = pkg->buffer; + const struct icm_pkg_header *req_hdr = req->request; + + if (pkg->frame.eof != req->response_type) + return false; + if (res_hdr->code != req_hdr->code) + return false; + + return true; +} + +static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) +{ + const struct icm_pkg_header *hdr = pkg->buffer; + + if (hdr->packet_id < req->npackets) { + size_t offset = hdr->packet_id * req->response_size; + + memcpy(req->response + offset, pkg->buffer, req->response_size); + } + + return hdr->packet_id == hdr->total_packets - 1; +} + +static int icm_request(struct tb *tb, const void *request, size_t request_size, + void *response, size_t response_size, size_t npackets, + int retries, unsigned int timeout_msec) +{ + struct icm *icm = tb_priv(tb); + + do { + struct tb_cfg_request *req; + struct tb_cfg_result res; + + req = tb_cfg_request_alloc(); + if (!req) + return -ENOMEM; + + req->match = icm_match; + req->copy = icm_copy; + req->request = request; + req->request_size = request_size; + req->request_type = TB_CFG_PKG_ICM_CMD; + req->response = response; + req->npackets = npackets; + req->response_size = response_size; + req->response_type = TB_CFG_PKG_ICM_RESP; + + mutex_lock(&icm->request_lock); + res = tb_cfg_request_sync(tb->ctl, req, timeout_msec); + mutex_unlock(&icm->request_lock); + + tb_cfg_request_put(req); + + if (res.err != -ETIMEDOUT) + return res.err == 1 ? -EIO : res.err; + + usleep_range(20, 50); + } while (retries--); + + return -ETIMEDOUT; +} + +/* + * If rescan is queued to run (we are resuming), postpone it to give the + * firmware some more time to send device connected notifications for next + * devices in the chain. + */ +static void icm_postpone_rescan(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (delayed_work_pending(&icm->rescan_work)) + mod_delayed_work(tb->wq, &icm->rescan_work, + msecs_to_jiffies(500)); +} + +static void icm_veto_begin(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (!icm->veto) { + icm->veto = true; + /* Keep the domain powered while veto is in effect */ + pm_runtime_get(&tb->dev); + } +} + +static void icm_veto_end(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (icm->veto) { + icm->veto = false; + /* Allow the domain suspend now */ + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + } +} + +static bool icm_firmware_running(const struct tb_nhi *nhi) +{ + u32 val; + + val = ioread32(nhi->iobase + REG_FW_STS); + return !!(val & REG_FW_STS_ICM_EN); +} + +static bool icm_fr_is_supported(struct tb *tb) +{ + return !x86_apple_machine; +} + +static inline int icm_fr_get_switch_index(u32 port) +{ + int index; + + if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT) + return 0; + + index = port >> ICM_PORT_INDEX_SHIFT; + return index != 0xff ? index : 0; +} + +static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) +{ + struct icm_fr_pkg_get_topology_response *switches, *sw; + struct icm_fr_pkg_get_topology request = { + .hdr = { .code = ICM_GET_TOPOLOGY }, + }; + size_t npackets = ICM_GET_TOPOLOGY_PACKETS; + int ret, index; + u8 i; + + switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL); + if (!switches) + return -ENOMEM; + + ret = icm_request(tb, &request, sizeof(request), switches, + sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + goto err_free; + + sw = &switches[0]; + index = icm_fr_get_switch_index(sw->ports[link]); + if (!index) { + ret = -ENODEV; + goto err_free; + } + + sw = &switches[index]; + for (i = 1; i < depth; i++) { + unsigned int j; + + if (!(sw->first_data & ICM_SWITCH_USED)) { + ret = -ENODEV; + goto err_free; + } + + for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { + index = icm_fr_get_switch_index(sw->ports[j]); + if (index > sw->switch_index) { + sw = &switches[index]; + break; + } + } + } + + *route = get_route(sw->route_hi, sw->route_lo); + +err_free: + kfree(switches); + return ret; +} + +static void icm_fr_save_devices(struct tb *tb) +{ + nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0); +} + +static int +icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, + u8 *proto_version, size_t *nboot_acl, bool *rpm) +{ + struct icm_fr_pkg_driver_ready_response reply; + struct icm_pkg_driver_ready request = { + .hdr.code = ICM_DRIVER_READY, + }; + int ret; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (security_level) + *security_level = reply.security_level & ICM_FR_SLEVEL_MASK; + + return 0; +} + +static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) +{ + struct icm_fr_pkg_approve_device request; + struct icm_fr_pkg_approve_device reply; + int ret; + + memset(&request, 0, sizeof(request)); + memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); + request.hdr.code = ICM_APPROVE_DEVICE; + request.connection_id = sw->connection_id; + request.connection_key = sw->connection_key; + + memset(&reply, 0, sizeof(reply)); + /* Use larger timeout as establishing tunnels can take some time */ + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) { + tb_warn(tb, "PCIe tunnel creation failed\n"); + return -EIO; + } + + return 0; +} + +static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) +{ + struct icm_fr_pkg_add_device_key request; + struct icm_fr_pkg_add_device_key_response reply; + int ret; + + memset(&request, 0, sizeof(request)); + memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); + request.hdr.code = ICM_ADD_DEVICE_KEY; + request.connection_id = sw->connection_id; + request.connection_key = sw->connection_key; + memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) { + tb_warn(tb, "Adding key to switch failed\n"); + return -EIO; + } + + return 0; +} + +static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, + const u8 *challenge, u8 *response) +{ + struct icm_fr_pkg_challenge_device request; + struct icm_fr_pkg_challenge_device_response reply; + int ret; + + memset(&request, 0, sizeof(request)); + memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); + request.hdr.code = ICM_CHALLENGE_DEVICE; + request.connection_id = sw->connection_id; + request.connection_key = sw->connection_key; + memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EKEYREJECTED; + if (reply.hdr.flags & ICM_FLAGS_NO_KEY) + return -ENOKEY; + + memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); + + return 0; +} + +static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + struct icm_fr_pkg_approve_xdomain_response reply; + struct icm_fr_pkg_approve_xdomain request; + int ret; + + memset(&request, 0, sizeof(request)); + request.hdr.code = ICM_APPROVE_XDOMAIN; + request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; + memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); + + request.transmit_path = transmit_path; + request.transmit_ring = transmit_ring; + request.receive_path = receive_path; + request.receive_ring = receive_ring; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + return 0; +} + +static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + u8 phy_port; + u8 cmd; + + phy_port = tb_phy_port_from_link(xd->link); + if (phy_port == 0) + cmd = NHI_MAILBOX_DISCONNECT_PA; + else + cmd = NHI_MAILBOX_DISCONNECT_PB; + + nhi_mailbox_cmd(tb->nhi, cmd, 1); + usleep_range(10, 50); + nhi_mailbox_cmd(tb->nhi, cmd, 2); + return 0; +} + +static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route, + const uuid_t *uuid) +{ + struct tb *tb = parent_sw->tb; + struct tb_switch *sw; + + sw = tb_switch_alloc(tb, &parent_sw->dev, route); + if (IS_ERR(sw)) { + tb_warn(tb, "failed to allocate switch at %llx\n", route); + return sw; + } + + sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); + if (!sw->uuid) { + tb_switch_put(sw); + return ERR_PTR(-ENOMEM); + } + + init_completion(&sw->rpm_complete); + return sw; +} + +static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw) +{ + u64 route = tb_route(sw); + int ret; + + /* Link the two switches now */ + tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); + tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); + + ret = tb_switch_add(sw); + if (ret) + tb_port_at(tb_route(sw), parent_sw)->remote = NULL; + + return ret; +} + +static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id, + u8 connection_key, u8 link, u8 depth, bool boot) +{ + struct tb_switch *parent_sw = tb_switch_parent(sw); + + /* Disconnect from parent */ + tb_switch_downstream_port(sw)->remote = NULL; + /* Re-connect via updated port */ + tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); + + /* Update with the new addressing information */ + sw->config.route_hi = upper_32_bits(route); + sw->config.route_lo = lower_32_bits(route); + sw->connection_id = connection_id; + sw->connection_key = connection_key; + sw->link = link; + sw->depth = depth; + sw->boot = boot; + + /* This switch still exists */ + sw->is_unplugged = false; + + /* Runtime resume is now complete */ + complete(&sw->rpm_complete); +} + +static void remove_switch(struct tb_switch *sw) +{ + tb_switch_downstream_port(sw)->remote = NULL; + tb_switch_remove(sw); +} + +static void add_xdomain(struct tb_switch *sw, u64 route, + const uuid_t *local_uuid, const uuid_t *remote_uuid, + u8 link, u8 depth) +{ + struct tb_xdomain *xd; + + pm_runtime_get_sync(&sw->dev); + + xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); + if (!xd) + goto out; + + xd->link = link; + xd->depth = depth; + + tb_port_at(route, sw)->xdomain = xd; + + tb_xdomain_add(xd); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); +} + +static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) +{ + xd->link = link; + xd->route = route; + xd->is_unplugged = false; +} + +static void remove_xdomain(struct tb_xdomain *xd) +{ + struct tb_switch *sw; + + sw = tb_to_switch(xd->dev.parent); + tb_port_at(xd->route, sw)->xdomain = NULL; + tb_xdomain_remove(xd); +} + +static void +icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_fr_event_device_connected *pkg = + (const struct icm_fr_event_device_connected *)hdr; + enum tb_security_level security_level; + struct tb_switch *sw, *parent_sw; + bool boot, dual_lane, speed_gen3; + struct icm *icm = tb_priv(tb); + bool authorized = false; + struct tb_xdomain *xd; + u8 link, depth; + u64 route; + int ret; + + icm_postpone_rescan(tb); + + link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; + depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> + ICM_LINK_INFO_DEPTH_SHIFT; + authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; + security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> + ICM_FLAGS_SLEVEL_SHIFT; + boot = pkg->link_info & ICM_LINK_INFO_BOOT; + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; + + if (pkg->link_info & ICM_LINK_INFO_REJECTED) { + tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", + link, depth); + return; + } + + sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); + if (sw) { + u8 phy_port, sw_phy_port; + + sw_phy_port = tb_phy_port_from_link(sw->link); + phy_port = tb_phy_port_from_link(link); + + /* + * On resume ICM will send us connected events for the + * devices that still are present. However, that + * information might have changed for example by the + * fact that a switch on a dual-link connection might + * have been enumerated using the other link now. Make + * sure our book keeping matches that. + */ + if (sw->depth == depth && sw_phy_port == phy_port && + !!sw->authorized == authorized) { + /* + * It was enumerated through another link so update + * route string accordingly. + */ + if (sw->link != link) { + ret = icm->get_route(tb, link, depth, &route); + if (ret) { + tb_err(tb, "failed to update route string for switch at %u.%u\n", + link, depth); + tb_switch_put(sw); + return; + } + } else { + route = tb_route(sw); + } + + update_switch(sw, route, pkg->connection_id, + pkg->connection_key, link, depth, boot); + tb_switch_put(sw); + return; + } + + /* + * User connected the same switch to another physical + * port or to another part of the topology. Remove the + * existing switch now before adding the new one. + */ + remove_switch(sw); + tb_switch_put(sw); + } + + /* + * If the switch was not found by UUID, look for a switch on + * same physical port (taking possible link aggregation into + * account) and depth. If we found one it is definitely a stale + * one so remove it first. + */ + sw = tb_switch_find_by_link_depth(tb, link, depth); + if (!sw) { + u8 dual_link; + + dual_link = dual_link_from_link(link); + if (dual_link) + sw = tb_switch_find_by_link_depth(tb, dual_link, depth); + } + if (sw) { + remove_switch(sw); + tb_switch_put(sw); + } + + /* Remove existing XDomain connection if found */ + xd = tb_xdomain_find_by_link_depth(tb, link, depth); + if (xd) { + remove_xdomain(xd); + tb_xdomain_put(xd); + } + + parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1); + if (!parent_sw) { + tb_err(tb, "failed to find parent switch for %u.%u\n", + link, depth); + return; + } + + ret = icm->get_route(tb, link, depth, &route); + if (ret) { + tb_err(tb, "failed to find route string for switch at %u.%u\n", + link, depth); + tb_switch_put(parent_sw); + return; + } + + pm_runtime_get_sync(&parent_sw->dev); + + sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); + if (!IS_ERR(sw)) { + sw->connection_id = pkg->connection_id; + sw->connection_key = pkg->connection_key; + sw->link = link; + sw->depth = depth; + sw->authorized = authorized; + sw->security_level = security_level; + sw->boot = boot; + sw->link_speed = speed_gen3 ? 20 : 10; + sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL : + TB_LINK_WIDTH_SINGLE; + sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name)); + + if (add_switch(parent_sw, sw)) + tb_switch_put(sw); + } + + pm_runtime_mark_last_busy(&parent_sw->dev); + pm_runtime_put_autosuspend(&parent_sw->dev); + + tb_switch_put(parent_sw); +} + +static void +icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_fr_event_device_disconnected *pkg = + (const struct icm_fr_event_device_disconnected *)hdr; + struct tb_switch *sw; + u8 link, depth; + + link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; + depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> + ICM_LINK_INFO_DEPTH_SHIFT; + + if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { + tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); + return; + } + + sw = tb_switch_find_by_link_depth(tb, link, depth); + if (!sw) { + tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, + depth); + return; + } + + pm_runtime_get_sync(sw->dev.parent); + + remove_switch(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); + + tb_switch_put(sw); +} + +static void +icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_fr_event_xdomain_connected *pkg = + (const struct icm_fr_event_xdomain_connected *)hdr; + struct tb_xdomain *xd; + struct tb_switch *sw; + u8 link, depth; + u64 route; + + link = pkg->link_info & ICM_LINK_INFO_LINK_MASK; + depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >> + ICM_LINK_INFO_DEPTH_SHIFT; + + if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) { + tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth); + return; + } + + route = get_route(pkg->local_route_hi, pkg->local_route_lo); + + xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); + if (xd) { + u8 xd_phy_port, phy_port; + + xd_phy_port = phy_port_from_route(xd->route, xd->depth); + phy_port = phy_port_from_route(route, depth); + + if (xd->depth == depth && xd_phy_port == phy_port) { + update_xdomain(xd, route, link); + tb_xdomain_put(xd); + return; + } + + /* + * If we find an existing XDomain connection remove it + * now. We need to go through login handshake and + * everything anyway to be able to re-establish the + * connection. + */ + remove_xdomain(xd); + tb_xdomain_put(xd); + } + + /* + * Look if there already exists an XDomain in the same place + * than the new one and in that case remove it because it is + * most likely another host that got disconnected. + */ + xd = tb_xdomain_find_by_link_depth(tb, link, depth); + if (!xd) { + u8 dual_link; + + dual_link = dual_link_from_link(link); + if (dual_link) + xd = tb_xdomain_find_by_link_depth(tb, dual_link, + depth); + } + if (xd) { + remove_xdomain(xd); + tb_xdomain_put(xd); + } + + /* + * If the user disconnected a switch during suspend and + * connected another host to the same port, remove the switch + * first. + */ + sw = tb_switch_find_by_route(tb, route); + if (sw) { + remove_switch(sw); + tb_switch_put(sw); + } + + sw = tb_switch_find_by_link_depth(tb, link, depth); + if (!sw) { + tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link, + depth); + return; + } + + add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, + depth); + tb_switch_put(sw); +} + +static void +icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_fr_event_xdomain_disconnected *pkg = + (const struct icm_fr_event_xdomain_disconnected *)hdr; + struct tb_xdomain *xd; + + /* + * If the connection is through one or multiple devices, the + * XDomain device is removed along with them so it is fine if we + * cannot find it here. + */ + xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); + if (xd) { + remove_xdomain(xd); + tb_xdomain_put(xd); + } +} + +static int icm_tr_cio_reset(struct tb *tb) +{ + return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1)); +} + +static int +icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, + u8 *proto_version, size_t *nboot_acl, bool *rpm) +{ + struct icm_tr_pkg_driver_ready_response reply; + struct icm_pkg_driver_ready request = { + .hdr.code = ICM_DRIVER_READY, + }; + int ret; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, 10, 2000); + if (ret) + return ret; + + if (security_level) + *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; + if (proto_version) + *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> + ICM_TR_INFO_PROTO_VERSION_SHIFT; + if (nboot_acl) + *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> + ICM_TR_INFO_BOOT_ACL_SHIFT; + if (rpm) + *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3); + + return 0; +} + +static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) +{ + struct icm_tr_pkg_approve_device request; + struct icm_tr_pkg_approve_device reply; + int ret; + + memset(&request, 0, sizeof(request)); + memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); + request.hdr.code = ICM_APPROVE_DEVICE; + request.route_lo = sw->config.route_lo; + request.route_hi = sw->config.route_hi; + request.connection_id = sw->connection_id; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) { + tb_warn(tb, "PCIe tunnel creation failed\n"); + return -EIO; + } + + return 0; +} + +static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) +{ + struct icm_tr_pkg_add_device_key_response reply; + struct icm_tr_pkg_add_device_key request; + int ret; + + memset(&request, 0, sizeof(request)); + memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); + request.hdr.code = ICM_ADD_DEVICE_KEY; + request.route_lo = sw->config.route_lo; + request.route_hi = sw->config.route_hi; + request.connection_id = sw->connection_id; + memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) { + tb_warn(tb, "Adding key to switch failed\n"); + return -EIO; + } + + return 0; +} + +static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, + const u8 *challenge, u8 *response) +{ + struct icm_tr_pkg_challenge_device_response reply; + struct icm_tr_pkg_challenge_device request; + int ret; + + memset(&request, 0, sizeof(request)); + memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); + request.hdr.code = ICM_CHALLENGE_DEVICE; + request.route_lo = sw->config.route_lo; + request.route_hi = sw->config.route_hi; + request.connection_id = sw->connection_id; + memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE); + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EKEYREJECTED; + if (reply.hdr.flags & ICM_FLAGS_NO_KEY) + return -ENOKEY; + + memcpy(response, reply.response, TB_SWITCH_KEY_SIZE); + + return 0; +} + +static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + struct icm_tr_pkg_approve_xdomain_response reply; + struct icm_tr_pkg_approve_xdomain request; + int ret; + + memset(&request, 0, sizeof(request)); + request.hdr.code = ICM_APPROVE_XDOMAIN; + request.route_hi = upper_32_bits(xd->route); + request.route_lo = lower_32_bits(xd->route); + request.transmit_path = transmit_path; + request.transmit_ring = transmit_ring; + request.receive_path = receive_path; + request.receive_ring = receive_ring; + memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + return 0; +} + +static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, + int stage) +{ + struct icm_tr_pkg_disconnect_xdomain_response reply; + struct icm_tr_pkg_disconnect_xdomain request; + int ret; + + memset(&request, 0, sizeof(request)); + request.hdr.code = ICM_DISCONNECT_XDOMAIN; + request.stage = stage; + request.route_hi = upper_32_bits(xd->route); + request.route_lo = lower_32_bits(xd->route); + memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + return 0; +} + +static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + int ret; + + ret = icm_tr_xdomain_tear_down(tb, xd, 1); + if (ret) + return ret; + + usleep_range(10, 50); + return icm_tr_xdomain_tear_down(tb, xd, 2); +} + +static void +__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, + bool force_rtd3) +{ + const struct icm_tr_event_device_connected *pkg = + (const struct icm_tr_event_device_connected *)hdr; + bool authorized, boot, dual_lane, speed_gen3; + enum tb_security_level security_level; + struct tb_switch *sw, *parent_sw; + struct tb_xdomain *xd; + u64 route; + + icm_postpone_rescan(tb); + + /* + * Currently we don't use the QoS information coming with the + * device connected message so simply just ignore that extra + * packet for now. + */ + if (pkg->hdr.packet_id) + return; + + route = get_route(pkg->route_hi, pkg->route_lo); + authorized = pkg->link_info & ICM_LINK_INFO_APPROVED; + security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> + ICM_FLAGS_SLEVEL_SHIFT; + boot = pkg->link_info & ICM_LINK_INFO_BOOT; + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; + + if (pkg->link_info & ICM_LINK_INFO_REJECTED) { + tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", + route); + return; + } + + sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); + if (sw) { + /* Update the switch if it is still in the same place */ + if (tb_route(sw) == route && !!sw->authorized == authorized) { + update_switch(sw, route, pkg->connection_id, 0, 0, 0, + boot); + tb_switch_put(sw); + return; + } + + remove_switch(sw); + tb_switch_put(sw); + } + + /* Another switch with the same address */ + sw = tb_switch_find_by_route(tb, route); + if (sw) { + remove_switch(sw); + tb_switch_put(sw); + } + + /* XDomain connection with the same address */ + xd = tb_xdomain_find_by_route(tb, route); + if (xd) { + remove_xdomain(xd); + tb_xdomain_put(xd); + } + + parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); + if (!parent_sw) { + tb_err(tb, "failed to find parent switch for %llx\n", route); + return; + } + + pm_runtime_get_sync(&parent_sw->dev); + + sw = alloc_switch(parent_sw, route, &pkg->ep_uuid); + if (!IS_ERR(sw)) { + sw->connection_id = pkg->connection_id; + sw->authorized = authorized; + sw->security_level = security_level; + sw->boot = boot; + sw->link_speed = speed_gen3 ? 20 : 10; + sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL : + TB_LINK_WIDTH_SINGLE; + sw->rpm = force_rtd3; + if (!sw->rpm) + sw->rpm = intel_vss_is_rtd3(pkg->ep_name, + sizeof(pkg->ep_name)); + + if (add_switch(parent_sw, sw)) + tb_switch_put(sw); + } + + pm_runtime_mark_last_busy(&parent_sw->dev); + pm_runtime_put_autosuspend(&parent_sw->dev); + + tb_switch_put(parent_sw); +} + +static void +icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + __icm_tr_device_connected(tb, hdr, false); +} + +static void +icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_tr_event_device_disconnected *pkg = + (const struct icm_tr_event_device_disconnected *)hdr; + struct tb_switch *sw; + u64 route; + + route = get_route(pkg->route_hi, pkg->route_lo); + + sw = tb_switch_find_by_route(tb, route); + if (!sw) { + tb_warn(tb, "no switch exists at %llx, ignoring\n", route); + return; + } + pm_runtime_get_sync(sw->dev.parent); + + remove_switch(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); + + tb_switch_put(sw); +} + +static void +icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_tr_event_xdomain_connected *pkg = + (const struct icm_tr_event_xdomain_connected *)hdr; + struct tb_xdomain *xd; + struct tb_switch *sw; + u64 route; + + if (!tb->root_switch) + return; + + route = get_route(pkg->local_route_hi, pkg->local_route_lo); + + xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid); + if (xd) { + if (xd->route == route) { + update_xdomain(xd, route, 0); + tb_xdomain_put(xd); + return; + } + + remove_xdomain(xd); + tb_xdomain_put(xd); + } + + /* An existing xdomain with the same address */ + xd = tb_xdomain_find_by_route(tb, route); + if (xd) { + remove_xdomain(xd); + tb_xdomain_put(xd); + } + + /* + * If the user disconnected a switch during suspend and + * connected another host to the same port, remove the switch + * first. + */ + sw = tb_switch_find_by_route(tb, route); + if (sw) { + remove_switch(sw); + tb_switch_put(sw); + } + + sw = tb_switch_find_by_route(tb, get_parent_route(route)); + if (!sw) { + tb_warn(tb, "no switch exists at %llx, ignoring\n", route); + return; + } + + add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); + tb_switch_put(sw); +} + +static void +icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_tr_event_xdomain_disconnected *pkg = + (const struct icm_tr_event_xdomain_disconnected *)hdr; + struct tb_xdomain *xd; + u64 route; + + route = get_route(pkg->route_hi, pkg->route_lo); + + xd = tb_xdomain_find_by_route(tb, route); + if (xd) { + remove_xdomain(xd); + tb_xdomain_put(xd); + } +} + +static struct pci_dev *get_upstream_port(struct pci_dev *pdev) +{ + struct pci_dev *parent; + + parent = pci_upstream_bridge(pdev); + while (parent) { + if (!pci_is_pcie(parent)) + return NULL; + if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) + break; + parent = pci_upstream_bridge(parent); + } + + if (!parent) + return NULL; + + switch (parent->device) { + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + return parent; + } + + return NULL; +} + +static bool icm_ar_is_supported(struct tb *tb) +{ + struct pci_dev *upstream_port; + struct icm *icm = tb_priv(tb); + + /* + * Starting from Alpine Ridge we can use ICM on Apple machines + * as well. We just need to reset and re-enable it first. + * However, only start it if explicitly asked by the user. + */ + if (icm_firmware_running(tb->nhi)) + return true; + if (!start_icm) + return false; + + /* + * Find the upstream PCIe port in case we need to do reset + * through its vendor specific registers. + */ + upstream_port = get_upstream_port(tb->nhi->pdev); + if (upstream_port) { + int cap; + + cap = pci_find_ext_capability(upstream_port, + PCI_EXT_CAP_ID_VNDR); + if (cap > 0) { + icm->upstream_port = upstream_port; + icm->vnd_cap = cap; + + return true; + } + } + + return false; +} + +static int icm_ar_cio_reset(struct tb *tb) +{ + return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9)); +} + +static int icm_ar_get_mode(struct tb *tb) +{ + struct tb_nhi *nhi = tb->nhi; + int retries = 60; + u32 val; + + do { + val = ioread32(nhi->iobase + REG_FW_STS); + if (val & REG_FW_STS_NVM_AUTH_DONE) + break; + msleep(50); + } while (--retries); + + if (!retries) { + dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n"); + return -ENODEV; + } + + return nhi_mailbox_mode(nhi); +} + +static int +icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, + u8 *proto_version, size_t *nboot_acl, bool *rpm) +{ + struct icm_ar_pkg_driver_ready_response reply; + struct icm_pkg_driver_ready request = { + .hdr.code = ICM_DRIVER_READY, + }; + int ret; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (security_level) + *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK; + if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED)) + *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >> + ICM_AR_INFO_BOOT_ACL_SHIFT; + if (rpm) + *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3); + + return 0; +} + +static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) +{ + struct icm_ar_pkg_get_route_response reply; + struct icm_ar_pkg_get_route request = { + .hdr = { .code = ICM_GET_ROUTE }, + .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link, + }; + int ret; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + *route = get_route(reply.route_hi, reply.route_lo); + return 0; +} + +static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids) +{ + struct icm_ar_pkg_preboot_acl_response reply; + struct icm_ar_pkg_preboot_acl request = { + .hdr = { .code = ICM_PREBOOT_ACL }, + }; + int ret, i; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + for (i = 0; i < nuuids; i++) { + u32 *uuid = (u32 *)&uuids[i]; + + uuid[0] = reply.acl[i].uuid_lo; + uuid[1] = reply.acl[i].uuid_hi; + + if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) { + /* Map empty entries to null UUID */ + uuid[0] = 0; + uuid[1] = 0; + } else if (uuid[0] != 0 || uuid[1] != 0) { + /* Upper two DWs are always one's */ + uuid[2] = 0xffffffff; + uuid[3] = 0xffffffff; + } + } + + return ret; +} + +static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, + size_t nuuids) +{ + struct icm_ar_pkg_preboot_acl_response reply; + struct icm_ar_pkg_preboot_acl request = { + .hdr = { + .code = ICM_PREBOOT_ACL, + .flags = ICM_FLAGS_WRITE, + }, + }; + int ret, i; + + for (i = 0; i < nuuids; i++) { + const u32 *uuid = (const u32 *)&uuids[i]; + + if (uuid_is_null(&uuids[i])) { + /* + * Map null UUID to the empty (all one) entries + * for ICM. + */ + request.acl[i].uuid_lo = 0xffffffff; + request.acl[i].uuid_hi = 0xffffffff; + } else { + /* Two high DWs need to be set to all one */ + if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff) + return -EINVAL; + + request.acl[i].uuid_lo = uuid[0]; + request.acl[i].uuid_hi = uuid[1]; + } + } + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + return 0; +} + +static int +icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, + u8 *proto_version, size_t *nboot_acl, bool *rpm) +{ + struct icm_tr_pkg_driver_ready_response reply; + struct icm_pkg_driver_ready request = { + .hdr.code = ICM_DRIVER_READY, + }; + int ret; + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, 20000); + if (ret) + return ret; + + if (proto_version) + *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> + ICM_TR_INFO_PROTO_VERSION_SHIFT; + + /* Ice Lake always supports RTD3 */ + if (rpm) + *rpm = true; + + return 0; +} + +static void icm_icl_set_uuid(struct tb *tb) +{ + struct tb_nhi *nhi = tb->nhi; + u32 uuid[4]; + + pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]); + pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]); + uuid[2] = 0xffffffff; + uuid[3] = 0xffffffff; + + tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); +} + +static void +icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) +{ + __icm_tr_device_connected(tb, hdr, true); +} + +static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) +{ + const struct icm_icl_event_rtd3_veto *pkg = + (const struct icm_icl_event_rtd3_veto *)hdr; + + tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason); + + if (pkg->veto_reason) + icm_veto_begin(tb); + else + icm_veto_end(tb); +} + +static bool icm_tgl_is_supported(struct tb *tb) +{ + unsigned long end = jiffies + msecs_to_jiffies(10); + + do { + u32 val; + + val = ioread32(tb->nhi->iobase + REG_FW_STS); + if (val & REG_FW_STS_NVM_AUTH_DONE) + return true; + usleep_range(100, 500); + } while (time_before(jiffies, end)); + + return false; +} + +static void icm_handle_notification(struct work_struct *work) +{ + struct icm_notification *n = container_of(work, typeof(*n), work); + struct tb *tb = n->tb; + struct icm *icm = tb_priv(tb); + + mutex_lock(&tb->lock); + + /* + * When the domain is stopped we flush its workqueue but before + * that the root switch is removed. In that case we should treat + * the queued events as being canceled. + */ + if (tb->root_switch) { + switch (n->pkg->code) { + case ICM_EVENT_DEVICE_CONNECTED: + icm->device_connected(tb, n->pkg); + break; + case ICM_EVENT_DEVICE_DISCONNECTED: + icm->device_disconnected(tb, n->pkg); + break; + case ICM_EVENT_XDOMAIN_CONNECTED: + if (tb_is_xdomain_enabled()) + icm->xdomain_connected(tb, n->pkg); + break; + case ICM_EVENT_XDOMAIN_DISCONNECTED: + if (tb_is_xdomain_enabled()) + icm->xdomain_disconnected(tb, n->pkg); + break; + case ICM_EVENT_RTD3_VETO: + icm->rtd3_veto(tb, n->pkg); + break; + } + } + + mutex_unlock(&tb->lock); + + kfree(n->pkg); + kfree(n); +} + +static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, + const void *buf, size_t size) +{ + struct icm_notification *n; + + n = kmalloc(sizeof(*n), GFP_KERNEL); + if (!n) + return; + + n->pkg = kmemdup(buf, size, GFP_KERNEL); + if (!n->pkg) { + kfree(n); + return; + } + + INIT_WORK(&n->work, icm_handle_notification); + n->tb = tb; + + queue_work(tb->wq, &n->work); +} + +static int +__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, + u8 *proto_version, size_t *nboot_acl, bool *rpm) +{ + struct icm *icm = tb_priv(tb); + unsigned int retries = 50; + int ret; + + ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl, + rpm); + if (ret) { + tb_err(tb, "failed to send driver ready to ICM\n"); + return ret; + } + + /* + * Hold on here until the switch config space is accessible so + * that we can read root switch config successfully. + */ + do { + struct tb_cfg_result res; + u32 tmp; + + res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH, + 0, 1, 100); + if (!res.err) + return 0; + + msleep(50); + } while (--retries); + + tb_err(tb, "failed to read root switch config space, giving up\n"); + return -ETIMEDOUT; +} + +static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi) +{ + struct icm *icm = tb_priv(tb); + u32 val; + + if (!icm->upstream_port) + return -ENODEV; + + /* Put ARC to wait for CIO reset event to happen */ + val = ioread32(nhi->iobase + REG_FW_STS); + val |= REG_FW_STS_CIO_RESET_REQ; + iowrite32(val, nhi->iobase + REG_FW_STS); + + /* Re-start ARC */ + val = ioread32(nhi->iobase + REG_FW_STS); + val |= REG_FW_STS_ICM_EN_INVERT; + val |= REG_FW_STS_ICM_EN_CPU; + iowrite32(val, nhi->iobase + REG_FW_STS); + + /* Trigger CIO reset now */ + return icm->cio_reset(tb); +} + +static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi) +{ + unsigned int retries = 10; + int ret; + u32 val; + + /* Check if the ICM firmware is already running */ + if (icm_firmware_running(nhi)) + return 0; + + dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n"); + + ret = icm_firmware_reset(tb, nhi); + if (ret) + return ret; + + /* Wait until the ICM firmware tells us it is up and running */ + do { + /* Check that the ICM firmware is running */ + val = ioread32(nhi->iobase + REG_FW_STS); + if (val & REG_FW_STS_NVM_AUTH_DONE) + return 0; + + msleep(300); + } while (--retries); + + return -ETIMEDOUT; +} + +static int icm_reset_phy_port(struct tb *tb, int phy_port) +{ + struct icm *icm = tb_priv(tb); + u32 state0, state1; + int port0, port1; + u32 val0, val1; + int ret; + + if (!icm->upstream_port) + return 0; + + if (phy_port) { + port0 = 3; + port1 = 4; + } else { + port0 = 1; + port1 = 2; + } + + /* + * Read link status of both null ports belonging to a single + * physical port. + */ + ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); + if (ret) + return ret; + ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); + if (ret) + return ret; + + state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK; + state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; + state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK; + state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT; + + /* If they are both up we need to reset them now */ + if (state0 != TB_PORT_UP || state1 != TB_PORT_UP) + return 0; + + val0 |= PHY_PORT_CS1_LINK_DISABLE; + ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); + if (ret) + return ret; + + val1 |= PHY_PORT_CS1_LINK_DISABLE; + ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); + if (ret) + return ret; + + /* Wait a bit and then re-enable both ports */ + usleep_range(10, 100); + + ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0); + if (ret) + return ret; + ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1); + if (ret) + return ret; + + val0 &= ~PHY_PORT_CS1_LINK_DISABLE; + ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0); + if (ret) + return ret; + + val1 &= ~PHY_PORT_CS1_LINK_DISABLE; + return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1); +} + +static int icm_firmware_init(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + struct tb_nhi *nhi = tb->nhi; + int ret; + + ret = icm_firmware_start(tb, nhi); + if (ret) { + dev_err(&nhi->pdev->dev, "could not start ICM firmware\n"); + return ret; + } + + if (icm->get_mode) { + ret = icm->get_mode(tb); + + switch (ret) { + case NHI_FW_SAFE_MODE: + icm->safe_mode = true; + break; + + case NHI_FW_CM_MODE: + /* Ask ICM to accept all Thunderbolt devices */ + nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); + break; + + default: + if (ret < 0) + return ret; + + tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret); + return -ENODEV; + } + } + + /* + * Reset both physical ports if there is anything connected to + * them already. + */ + ret = icm_reset_phy_port(tb, 0); + if (ret) + dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n"); + ret = icm_reset_phy_port(tb, 1); + if (ret) + dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n"); + + return 0; +} + +static int icm_driver_ready(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + int ret; + + ret = icm_firmware_init(tb); + if (ret) + return ret; + + if (icm->safe_mode) { + tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); + tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); + tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); + return 0; + } + + ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version, + &tb->nboot_acl, &icm->rpm); + if (ret) + return ret; + + /* + * Make sure the number of supported preboot ACL matches what we + * expect or disable the whole feature. + */ + if (tb->nboot_acl > icm->max_boot_acl) + tb->nboot_acl = 0; + + if (icm->proto_version >= 3) + tb_dbg(tb, "USB4 proxy operations supported\n"); + + return 0; +} + +static int icm_suspend(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (icm->save_devices) + icm->save_devices(tb); + + nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); + return 0; +} + +/* + * Mark all switches (except root switch) below this one unplugged. ICM + * firmware will send us an updated list of switches after we have send + * it driver ready command. If a switch is not in that list it will be + * removed when we perform rescan. + */ +static void icm_unplug_children(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_route(sw)) + sw->is_unplugged = true; + + tb_switch_for_each_port(sw, port) { + if (port->xdomain) + port->xdomain->is_unplugged = true; + else if (tb_port_has_remote(port)) + icm_unplug_children(port->remote->sw); + } +} + +static int complete_rpm(struct device *dev, void *data) +{ + struct tb_switch *sw = tb_to_switch(dev); + + if (sw) + complete(&sw->rpm_complete); + return 0; +} + +static void remove_unplugged_switch(struct tb_switch *sw) +{ + struct device *parent = get_device(sw->dev.parent); + + pm_runtime_get_sync(parent); + + /* + * Signal this and switches below for rpm_complete because + * tb_switch_remove() calls pm_runtime_get_sync() that then waits + * for it. + */ + complete_rpm(&sw->dev, NULL); + bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); + tb_switch_remove(sw); + + pm_runtime_mark_last_busy(parent); + pm_runtime_put_autosuspend(parent); + + put_device(parent); +} + +static void icm_free_unplugged_children(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (port->xdomain && port->xdomain->is_unplugged) { + tb_xdomain_remove(port->xdomain); + port->xdomain = NULL; + } else if (tb_port_has_remote(port)) { + if (port->remote->sw->is_unplugged) { + remove_unplugged_switch(port->remote->sw); + port->remote = NULL; + } else { + icm_free_unplugged_children(port->remote->sw); + } + } + } +} + +static void icm_rescan_work(struct work_struct *work) +{ + struct icm *icm = container_of(work, struct icm, rescan_work.work); + struct tb *tb = icm_to_tb(icm); + + mutex_lock(&tb->lock); + if (tb->root_switch) + icm_free_unplugged_children(tb->root_switch); + mutex_unlock(&tb->lock); +} + +static void icm_complete(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + if (tb->nhi->going_away) + return; + + /* + * If RTD3 was vetoed before we entered system suspend allow it + * again now before driver ready is sent. Firmware sends a new RTD3 + * veto if it is still the case after we have sent it driver ready + * command. + */ + icm_veto_end(tb); + icm_unplug_children(tb->root_switch); + + /* + * Now all existing children should be resumed, start events + * from ICM to get updated status. + */ + __icm_driver_ready(tb, NULL, NULL, NULL, NULL); + + /* + * We do not get notifications of devices that have been + * unplugged during suspend so schedule rescan to clean them up + * if any. + */ + queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); +} + +static int icm_runtime_suspend(struct tb *tb) +{ + nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); + return 0; +} + +static int icm_runtime_suspend_switch(struct tb_switch *sw) +{ + if (tb_route(sw)) + reinit_completion(&sw->rpm_complete); + return 0; +} + +static int icm_runtime_resume_switch(struct tb_switch *sw) +{ + if (tb_route(sw)) { + if (!wait_for_completion_timeout(&sw->rpm_complete, + msecs_to_jiffies(500))) { + dev_dbg(&sw->dev, "runtime resuming timed out\n"); + } + } + return 0; +} + +static int icm_runtime_resume(struct tb *tb) +{ + /* + * We can reuse the same resume functionality than with system + * suspend. + */ + icm_complete(tb); + return 0; +} + +static int icm_start(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + int ret; + + if (icm->safe_mode) + tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); + else + tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); + if (IS_ERR(tb->root_switch)) + return PTR_ERR(tb->root_switch); + + tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm; + tb->root_switch->rpm = icm->rpm; + + if (icm->set_uuid) + icm->set_uuid(tb); + + ret = tb_switch_add(tb->root_switch); + if (ret) { + tb_switch_put(tb->root_switch); + tb->root_switch = NULL; + } + + return ret; +} + +static void icm_stop(struct tb *tb) +{ + struct icm *icm = tb_priv(tb); + + cancel_delayed_work(&icm->rescan_work); + tb_switch_remove(tb->root_switch); + tb->root_switch = NULL; + nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); + kfree(icm->last_nvm_auth); + icm->last_nvm_auth = NULL; +} + +static int icm_disconnect_pcie_paths(struct tb *tb) +{ + return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); +} + +static void icm_usb4_switch_nvm_auth_complete(void *data) +{ + struct usb4_switch_nvm_auth *auth = data; + struct icm *icm = auth->icm; + struct tb *tb = icm_to_tb(icm); + + tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n", + get_route(auth->reply.route_hi, auth->reply.route_lo), + auth->reply.hdr.flags, auth->reply.status); + + mutex_lock(&tb->lock); + if (WARN_ON(icm->last_nvm_auth)) + kfree(icm->last_nvm_auth); + icm->last_nvm_auth = auth; + mutex_unlock(&tb->lock); +} + +static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route) +{ + struct usb4_switch_nvm_auth *auth; + struct icm *icm = tb_priv(tb); + struct tb_cfg_request *req; + int ret; + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) + return -ENOMEM; + + auth->icm = icm; + auth->request.hdr.code = ICM_USB4_SWITCH_OP; + auth->request.route_hi = upper_32_bits(route); + auth->request.route_lo = lower_32_bits(route); + auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH; + + req = tb_cfg_request_alloc(); + if (!req) { + ret = -ENOMEM; + goto err_free_auth; + } + + req->match = icm_match; + req->copy = icm_copy; + req->request = &auth->request; + req->request_size = sizeof(auth->request); + req->request_type = TB_CFG_PKG_ICM_CMD; + req->response = &auth->reply; + req->npackets = 1; + req->response_size = sizeof(auth->reply); + req->response_type = TB_CFG_PKG_ICM_RESP; + + tb_dbg(tb, "NVM_AUTH request for %llx\n", route); + + mutex_lock(&icm->request_lock); + ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete, + auth); + mutex_unlock(&icm->request_lock); + + tb_cfg_request_put(req); + if (ret) + goto err_free_auth; + return 0; + +err_free_auth: + kfree(auth); + return ret; +} + +static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_data_len, + void *rx_data, size_t rx_data_len) +{ + struct icm_usb4_switch_op_response reply; + struct icm_usb4_switch_op request; + struct tb *tb = sw->tb; + struct icm *icm = tb_priv(tb); + u64 route = tb_route(sw); + int ret; + + /* + * USB4 router operation proxy is supported in firmware if the + * protocol version is 3 or higher. + */ + if (icm->proto_version < 3) + return -EOPNOTSUPP; + + /* + * NVM_AUTH is a special USB4 proxy operation that does not + * return immediately so handle it separately. + */ + if (opcode == USB4_SWITCH_OP_NVM_AUTH) + return icm_usb4_switch_nvm_authenticate(tb, route); + + memset(&request, 0, sizeof(request)); + request.hdr.code = ICM_USB4_SWITCH_OP; + request.route_hi = upper_32_bits(route); + request.route_lo = lower_32_bits(route); + request.opcode = opcode; + if (metadata) + request.metadata = *metadata; + + if (tx_data_len) { + request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID; + if (tx_data_len < ARRAY_SIZE(request.data)) + request.data_len_valid = + tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK; + memcpy(request.data, tx_data, tx_data_len * sizeof(u32)); + } + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_RETRIES, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + if (status) + *status = reply.status; + + if (metadata) + *metadata = reply.metadata; + + if (rx_data_len) + memcpy(rx_data, reply.data, rx_data_len * sizeof(u32)); + + return 0; +} + +static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw, + u32 *status) +{ + struct usb4_switch_nvm_auth *auth; + struct tb *tb = sw->tb; + struct icm *icm = tb_priv(tb); + int ret = 0; + + if (icm->proto_version < 3) + return -EOPNOTSUPP; + + auth = icm->last_nvm_auth; + icm->last_nvm_auth = NULL; + + if (auth && auth->reply.route_hi == sw->config.route_hi && + auth->reply.route_lo == sw->config.route_lo) { + tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n", + tb_route(sw), auth->reply.hdr.flags, auth->reply.status); + if (auth->reply.hdr.flags & ICM_FLAGS_ERROR) + ret = -EIO; + else + *status = auth->reply.status; + } else { + *status = 0; + } + + kfree(auth); + return ret; +} + +/* Falcon Ridge */ +static const struct tb_cm_ops icm_fr_ops = { + .driver_ready = icm_driver_ready, + .start = icm_start, + .stop = icm_stop, + .suspend = icm_suspend, + .complete = icm_complete, + .handle_event = icm_handle_event, + .approve_switch = icm_fr_approve_switch, + .add_switch_key = icm_fr_add_switch_key, + .challenge_switch_key = icm_fr_challenge_switch_key, + .disconnect_pcie_paths = icm_disconnect_pcie_paths, + .approve_xdomain_paths = icm_fr_approve_xdomain_paths, + .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, +}; + +/* Alpine Ridge */ +static const struct tb_cm_ops icm_ar_ops = { + .driver_ready = icm_driver_ready, + .start = icm_start, + .stop = icm_stop, + .suspend = icm_suspend, + .complete = icm_complete, + .runtime_suspend = icm_runtime_suspend, + .runtime_resume = icm_runtime_resume, + .runtime_suspend_switch = icm_runtime_suspend_switch, + .runtime_resume_switch = icm_runtime_resume_switch, + .handle_event = icm_handle_event, + .get_boot_acl = icm_ar_get_boot_acl, + .set_boot_acl = icm_ar_set_boot_acl, + .approve_switch = icm_fr_approve_switch, + .add_switch_key = icm_fr_add_switch_key, + .challenge_switch_key = icm_fr_challenge_switch_key, + .disconnect_pcie_paths = icm_disconnect_pcie_paths, + .approve_xdomain_paths = icm_fr_approve_xdomain_paths, + .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths, +}; + +/* Titan Ridge */ +static const struct tb_cm_ops icm_tr_ops = { + .driver_ready = icm_driver_ready, + .start = icm_start, + .stop = icm_stop, + .suspend = icm_suspend, + .complete = icm_complete, + .runtime_suspend = icm_runtime_suspend, + .runtime_resume = icm_runtime_resume, + .runtime_suspend_switch = icm_runtime_suspend_switch, + .runtime_resume_switch = icm_runtime_resume_switch, + .handle_event = icm_handle_event, + .get_boot_acl = icm_ar_get_boot_acl, + .set_boot_acl = icm_ar_set_boot_acl, + .approve_switch = icm_tr_approve_switch, + .add_switch_key = icm_tr_add_switch_key, + .challenge_switch_key = icm_tr_challenge_switch_key, + .disconnect_pcie_paths = icm_disconnect_pcie_paths, + .approve_xdomain_paths = icm_tr_approve_xdomain_paths, + .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, + .usb4_switch_op = icm_usb4_switch_op, + .usb4_switch_nvm_authenticate_status = + icm_usb4_switch_nvm_authenticate_status, +}; + +/* Ice Lake */ +static const struct tb_cm_ops icm_icl_ops = { + .driver_ready = icm_driver_ready, + .start = icm_start, + .stop = icm_stop, + .complete = icm_complete, + .runtime_suspend = icm_runtime_suspend, + .runtime_resume = icm_runtime_resume, + .handle_event = icm_handle_event, + .approve_xdomain_paths = icm_tr_approve_xdomain_paths, + .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, + .usb4_switch_op = icm_usb4_switch_op, + .usb4_switch_nvm_authenticate_status = + icm_usb4_switch_nvm_authenticate_status, +}; + +struct tb *icm_probe(struct tb_nhi *nhi) +{ + struct icm *icm; + struct tb *tb; + + tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm)); + if (!tb) + return NULL; + + icm = tb_priv(tb); + INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work); + mutex_init(&icm->request_lock); + + switch (nhi->pdev->device) { + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + icm->can_upgrade_nvm = true; + icm->is_supported = icm_fr_is_supported; + icm->get_route = icm_fr_get_route; + icm->save_devices = icm_fr_save_devices; + icm->driver_ready = icm_fr_driver_ready; + icm->device_connected = icm_fr_device_connected; + icm->device_disconnected = icm_fr_device_disconnected; + icm->xdomain_connected = icm_fr_xdomain_connected; + icm->xdomain_disconnected = icm_fr_xdomain_disconnected; + tb->cm_ops = &icm_fr_ops; + break; + + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI: + icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; + /* + * NVM upgrade has not been tested on Apple systems and + * they don't provide images publicly either. To be on + * the safe side prevent root switch NVM upgrade on Macs + * for now. + */ + icm->can_upgrade_nvm = !x86_apple_machine; + icm->is_supported = icm_ar_is_supported; + icm->cio_reset = icm_ar_cio_reset; + icm->get_mode = icm_ar_get_mode; + icm->get_route = icm_ar_get_route; + icm->save_devices = icm_fr_save_devices; + icm->driver_ready = icm_ar_driver_ready; + icm->device_connected = icm_fr_device_connected; + icm->device_disconnected = icm_fr_device_disconnected; + icm->xdomain_connected = icm_fr_xdomain_connected; + icm->xdomain_disconnected = icm_fr_xdomain_disconnected; + tb->cm_ops = &icm_ar_ops; + break; + + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI: + icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES; + icm->can_upgrade_nvm = !x86_apple_machine; + icm->is_supported = icm_ar_is_supported; + icm->cio_reset = icm_tr_cio_reset; + icm->get_mode = icm_ar_get_mode; + icm->driver_ready = icm_tr_driver_ready; + icm->device_connected = icm_tr_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + tb->cm_ops = &icm_tr_ops; + break; + + case PCI_DEVICE_ID_INTEL_ICL_NHI0: + case PCI_DEVICE_ID_INTEL_ICL_NHI1: + icm->is_supported = icm_fr_is_supported; + icm->driver_ready = icm_icl_driver_ready; + icm->set_uuid = icm_icl_set_uuid; + icm->device_connected = icm_icl_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + icm->rtd3_veto = icm_icl_rtd3_veto; + tb->cm_ops = &icm_icl_ops; + break; + + case PCI_DEVICE_ID_INTEL_TGL_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_NHI1: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: + case PCI_DEVICE_ID_INTEL_ADL_NHI0: + case PCI_DEVICE_ID_INTEL_ADL_NHI1: + case PCI_DEVICE_ID_INTEL_RPL_NHI0: + case PCI_DEVICE_ID_INTEL_RPL_NHI1: + case PCI_DEVICE_ID_INTEL_MTL_M_NHI0: + case PCI_DEVICE_ID_INTEL_MTL_P_NHI0: + case PCI_DEVICE_ID_INTEL_MTL_P_NHI1: + icm->is_supported = icm_tgl_is_supported; + icm->driver_ready = icm_icl_driver_ready; + icm->set_uuid = icm_icl_set_uuid; + icm->device_connected = icm_icl_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + icm->rtd3_veto = icm_icl_rtd3_veto; + tb->cm_ops = &icm_icl_ops; + break; + + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: + icm->is_supported = icm_tgl_is_supported; + icm->get_mode = icm_ar_get_mode; + icm->driver_ready = icm_tr_driver_ready; + icm->device_connected = icm_tr_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + tb->cm_ops = &icm_tr_ops; + break; + } + + if (!icm->is_supported || !icm->is_supported(tb)) { + dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n"); + tb_domain_put(tb); + return NULL; + } + + tb_dbg(tb, "using firmware connection manager\n"); + + return tb; +} diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c new file mode 100644 index 0000000000..633970fbe9 --- /dev/null +++ b/drivers/thunderbolt/lc.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt link controller support + * + * Copyright (C) 2019, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include "tb.h" + +/** + * tb_lc_read_uuid() - Read switch UUID from link controller common register + * @sw: Switch whose UUID is read + * @uuid: UUID is placed here + */ +int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid) +{ + if (!sw->cap_lc) + return -EINVAL; + return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4); +} + +static int read_lc_desc(struct tb_switch *sw, u32 *desc) +{ + if (!sw->cap_lc) + return -EINVAL; + return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1); +} + +static int find_port_lc_cap(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int start, phys, ret, size; + u32 desc; + + ret = read_lc_desc(sw, &desc); + if (ret) + return ret; + + /* Start of port LC registers */ + start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; + size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT; + phys = tb_phy_port_from_link(port->port); + + return sw->cap_lc + start + phys * size; +} + +static int tb_lc_set_port_configured(struct tb_port *port, bool configured) +{ + bool upstream = tb_is_upstream_port(port); + struct tb_switch *sw = port->sw; + u32 ctrl, lane; + int cap, ret; + + if (sw->generation < 2) + return 0; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + /* Resolve correct lane */ + if (port->port % 2) + lane = TB_LC_SX_CTRL_L1C; + else + lane = TB_LC_SX_CTRL_L2C; + + if (configured) { + ctrl |= lane; + if (upstream) + ctrl |= TB_LC_SX_CTRL_UPSTREAM; + } else { + ctrl &= ~lane; + if (upstream) + ctrl &= ~TB_LC_SX_CTRL_UPSTREAM; + } + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_configure_port() - Let LC know about configured port + * @port: Port that is set as configured + * + * Sets the port configured for power management purposes. + */ +int tb_lc_configure_port(struct tb_port *port) +{ + return tb_lc_set_port_configured(port, true); +} + +/** + * tb_lc_unconfigure_port() - Let LC know about unconfigured port + * @port: Port that is set as configured + * + * Sets the port unconfigured for power management purposes. + */ +void tb_lc_unconfigure_port(struct tb_port *port) +{ + tb_lc_set_port_configured(port, false); +} + +static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure) +{ + struct tb_switch *sw = port->sw; + u32 ctrl, lane; + int cap, ret; + + if (sw->generation < 2) + return 0; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + /* Resolve correct lane */ + if (port->port % 2) + lane = TB_LC_SX_CTRL_L1D; + else + lane = TB_LC_SX_CTRL_L2D; + + if (configure) + ctrl |= lane; + else + ctrl &= ~lane; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_configure_xdomain() - Inform LC that the link is XDomain + * @port: Switch downstream port connected to another host + * + * Sets the lane configured for XDomain accordingly so that the LC knows + * about this. Returns %0 in success and negative errno in failure. + */ +int tb_lc_configure_xdomain(struct tb_port *port) +{ + return tb_lc_set_xdomain_configured(port, true); +} + +/** + * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port + * @port: Switch downstream port that was connected to another host + * + * Unsets the lane XDomain configuration. + */ +void tb_lc_unconfigure_xdomain(struct tb_port *port) +{ + tb_lc_set_xdomain_configured(port, false); +} + +/** + * tb_lc_start_lane_initialization() - Start lane initialization + * @port: Device router lane 0 adapter + * + * Starts lane initialization for @port after the router resumed from + * sleep. Should be called for those downstream lane adapters that were + * not connected (tb_lc_configure_port() was not called) before sleep. + * + * Returns %0 in success and negative errno in case of failure. + */ +int tb_lc_start_lane_initialization(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int ret, cap; + u32 ctrl; + + if (!tb_route(sw)) + return 0; + + if (sw->generation < 2) + return 0; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + ctrl |= TB_LC_SX_CTRL_SLI; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter + * @port: Lane adapter + * + * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool tb_lc_is_clx_supported(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1); + if (ret) + return false; + + return !!(val & TB_LC_LINK_ATTR_CPS); +} + +/** + * tb_lc_is_usb_plugged() - Is there USB device connected to port + * @port: Device router lane 0 adapter + * + * Returns true if the @port has USB type-C device connected. + */ +bool tb_lc_is_usb_plugged(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + if (sw->generation != 3) + return false; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_CS_42, 1); + if (ret) + return false; + + return !!(val & TB_LC_CS_42_USB_PLUGGED); +} + +/** + * tb_lc_is_xhci_connected() - Is the internal xHCI connected + * @port: Device router lane 0 adapter + * + * Returns true if the internal xHCI has been connected to @port. + */ +bool tb_lc_is_xhci_connected(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + if (sw->generation != 3) + return false; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1); + if (ret) + return false; + + return !!(val & TB_LC_LINK_REQ_XHCI_CONNECT); +} + +static int __tb_lc_xhci_connect(struct tb_port *port, bool connect) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + if (sw->generation != 3) + return -EINVAL; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1); + if (ret) + return ret; + + if (connect) + val |= TB_LC_LINK_REQ_XHCI_CONNECT; + else + val &= ~TB_LC_LINK_REQ_XHCI_CONNECT; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1); +} + +/** + * tb_lc_xhci_connect() - Connect internal xHCI + * @port: Device router lane 0 adapter + * + * Tells LC to connect the internal xHCI to @port. Returns %0 on success + * and negative errno in case of failure. Can be called for Thunderbolt 3 + * routers only. + */ +int tb_lc_xhci_connect(struct tb_port *port) +{ + int ret; + + ret = __tb_lc_xhci_connect(port, true); + if (ret) + return ret; + + tb_port_dbg(port, "xHCI connected\n"); + return 0; +} + +/** + * tb_lc_xhci_disconnect() - Disconnect internal xHCI + * @port: Device router lane 0 adapter + * + * Tells LC to disconnect the internal xHCI from @port. Can be called + * for Thunderbolt 3 routers only. + */ +void tb_lc_xhci_disconnect(struct tb_port *port) +{ + __tb_lc_xhci_connect(port, false); + tb_port_dbg(port, "xHCI disconnected\n"); +} + +static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset, + unsigned int flags) +{ + u32 ctrl; + int ret; + + /* + * Enable wake on PCIe and USB4 (wake coming from another + * router). + */ + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, + offset + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC | + TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4); + + if (flags & TB_WAKE_ON_CONNECT) + ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD; + if (flags & TB_WAKE_ON_USB4) + ctrl |= TB_LC_SX_CTRL_WOU4; + if (flags & TB_WAKE_ON_PCIE) + ctrl |= TB_LC_SX_CTRL_WOP; + if (flags & TB_WAKE_ON_DP) + ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_set_wake() - Enable/disable wake + * @sw: Switch whose wakes to configure + * @flags: Wakeup flags (%0 to disable) + * + * For each LC sets wake bits accordingly. + */ +int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags) +{ + int start, size, nlc, ret, i; + u32 desc; + + if (sw->generation < 2) + return 0; + + if (!tb_route(sw)) + return 0; + + ret = read_lc_desc(sw, &desc); + if (ret) + return ret; + + /* Figure out number of link controllers */ + nlc = desc & TB_LC_DESC_NLC_MASK; + start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; + size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT; + + /* For each link controller set sleep bit */ + for (i = 0; i < nlc; i++) { + unsigned int offset = sw->cap_lc + start + i * size; + + ret = tb_lc_set_wake_one(sw, offset, flags); + if (ret) + return ret; + } + + return 0; +} + +/** + * tb_lc_set_sleep() - Inform LC that the switch is going to sleep + * @sw: Switch to set sleep + * + * Let the switch link controllers know that the switch is going to + * sleep. + */ +int tb_lc_set_sleep(struct tb_switch *sw) +{ + int start, size, nlc, ret, i; + u32 desc; + + if (sw->generation < 2) + return 0; + + ret = read_lc_desc(sw, &desc); + if (ret) + return ret; + + /* Figure out number of link controllers */ + nlc = desc & TB_LC_DESC_NLC_MASK; + start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; + size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT; + + /* For each link controller set sleep bit */ + for (i = 0; i < nlc; i++) { + unsigned int offset = sw->cap_lc + start + i * size; + u32 ctrl; + + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, + offset + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + ctrl |= TB_LC_SX_CTRL_SLP; + ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, + offset + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + } + + return 0; +} + +/** + * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch + * @sw: Switch to check + * + * Checks whether conditions for lane bonding from parent to @sw are + * possible. + */ +bool tb_lc_lane_bonding_possible(struct tb_switch *sw) +{ + struct tb_port *up; + int cap, ret; + u32 val; + + if (sw->generation < 2) + return false; + + up = tb_upstream_port(sw); + cap = find_port_lc_cap(up); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1); + if (ret) + return false; + + return !!(val & TB_LC_PORT_ATTR_BE); +} + +static int tb_lc_dp_sink_from_port(const struct tb_switch *sw, + struct tb_port *in) +{ + struct tb_port *port; + + /* The first DP IN port is sink 0 and second is sink 1 */ + tb_switch_for_each_port(sw, port) { + if (tb_port_is_dpin(port)) + return in != port; + } + + return -EINVAL; +} + +static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink) +{ + u32 val, alloc; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + /* + * Sink is available for CM/SW to use if the allocation valie is + * either 0 or 1. + */ + if (!sink) { + alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK; + if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM) + return 0; + } else { + alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >> + TB_LC_SNK_ALLOCATION_SNK1_SHIFT; + if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM) + return 0; + } + + return -EBUSY; +} + +/** + * tb_lc_dp_sink_query() - Is DP sink available for DP IN port + * @sw: Switch whose DP sink is queried + * @in: DP IN port to check + * + * Queries through LC SNK_ALLOCATION registers whether DP sink is available + * for the given DP IN port or not. + */ +bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in) +{ + int sink; + + /* + * For older generations sink is always available as there is no + * allocation mechanism. + */ + if (sw->generation < 3) + return true; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return false; + + return !tb_lc_dp_sink_available(sw, sink); +} + +/** + * tb_lc_dp_sink_alloc() - Allocate DP sink + * @sw: Switch whose DP sink is allocated + * @in: DP IN port the DP sink is allocated for + * + * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the + * resource is available and allocation is successful returns %0. In all + * other cases returs negative errno. In particular %-EBUSY is returned if + * the resource was not available. + */ +int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in) +{ + int ret, sink; + u32 val; + + if (sw->generation < 3) + return 0; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return sink; + + ret = tb_lc_dp_sink_available(sw, sink); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + if (!sink) { + val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK; + val |= TB_LC_SNK_ALLOCATION_SNK0_CM; + } else { + val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK; + val |= TB_LC_SNK_ALLOCATION_SNK1_CM << + TB_LC_SNK_ALLOCATION_SNK1_SHIFT; + } + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + + if (ret) + return ret; + + tb_port_dbg(in, "sink %d allocated\n", sink); + return 0; +} + +/** + * tb_lc_dp_sink_dealloc() - De-allocate DP sink + * @sw: Switch whose DP sink is de-allocated + * @in: DP IN port whose DP sink is de-allocated + * + * De-allocate DP sink from @in using LC SNK_ALLOCATION registers. + */ +int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in) +{ + int ret, sink; + u32 val; + + if (sw->generation < 3) + return 0; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return sink; + + /* Needs to be owned by CM/SW */ + ret = tb_lc_dp_sink_available(sw, sink); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + if (!sink) + val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK; + else + val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + tb_port_dbg(in, "sink %d de-allocated\n", sink); + return 0; +} + +/** + * tb_lc_force_power() - Forces LC to be powered on + * @sw: Thunderbolt switch + * + * This is useful to let authentication cycle pass even without + * a Thunderbolt link present. + */ +int tb_lc_force_power(struct tb_switch *sw) +{ + u32 in = 0xffff; + + return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1); +} diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c new file mode 100644 index 0000000000..4b7bec74e8 --- /dev/null +++ b/drivers/thunderbolt/nhi.c @@ -0,0 +1,1562 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Thunderbolt driver - NHI driver + * + * The NHI (native host interface) is the pci device that allows us to send and + * receive frames from the thunderbolt bus. + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/property.h> +#include <linux/string_helpers.h> + +#include "nhi.h" +#include "nhi_regs.h" +#include "tb.h" + +#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") + +#define RING_FIRST_USABLE_HOPID 1 +/* + * Used with QUIRK_E2E to specify an unused HopID the Rx credits are + * transferred. + */ +#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID +/* + * Minimal number of vectors when we use MSI-X. Two for control channel + * Rx/Tx and the rest four are for cross domain DMA paths. + */ +#define MSIX_MIN_VECS 6 +#define MSIX_MAX_VECS 16 + +#define NHI_MAILBOX_TIMEOUT 500 /* ms */ + +/* Host interface quirks */ +#define QUIRK_AUTO_CLEAR_INT BIT(0) +#define QUIRK_E2E BIT(1) + +static bool host_reset = true; +module_param(host_reset, bool, 0444); +MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)"); + +static int ring_interrupt_index(const struct tb_ring *ring) +{ + int bit = ring->hop; + if (!ring->is_tx) + bit += ring->nhi->hop_count; + return bit; +} + +static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring) +{ + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) { + u32 val; + + val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring); + iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring); + } else { + iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); + } +} + +static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring) +{ + if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) + ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring); + else + iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring); +} + +/* + * ring_interrupt_active() - activate/deactivate interrupts for a single ring + * + * ring->nhi->lock must be held. + */ +static void ring_interrupt_active(struct tb_ring *ring, bool active) +{ + int index = ring_interrupt_index(ring) / 32 * 4; + int reg = REG_RING_INTERRUPT_BASE + index; + int interrupt_bit = ring_interrupt_index(ring) & 31; + int mask = 1 << interrupt_bit; + u32 old, new; + + if (ring->irq > 0) { + u32 step, shift, ivr, misc; + void __iomem *ivr_base; + int auto_clear_bit; + int index; + + if (ring->is_tx) + index = ring->hop; + else + index = ring->hop + ring->nhi->hop_count; + + /* + * Intel routers support a bit that isn't part of + * the USB4 spec to ask the hardware to clear + * interrupt status bits automatically since + * we already know which interrupt was triggered. + * + * Other routers explicitly disable auto-clear + * to prevent conditions that may occur where two + * MSIX interrupts are simultaneously active and + * reading the register clears both of them. + */ + misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) + auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR; + else + auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR; + if (!(misc & auto_clear_bit)) + iowrite32(misc | auto_clear_bit, + ring->nhi->iobase + REG_DMA_MISC); + + ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; + step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; + shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS; + ivr = ioread32(ivr_base + step); + ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift); + if (active) + ivr |= ring->vector << shift; + iowrite32(ivr, ivr_base + step); + } + + old = ioread32(ring->nhi->iobase + reg); + if (active) + new = old | mask; + else + new = old & ~mask; + + dev_dbg(&ring->nhi->pdev->dev, + "%s interrupt at register %#x bit %d (%#x -> %#x)\n", + active ? "enabling" : "disabling", reg, interrupt_bit, old, new); + + if (new == old) + dev_WARN(&ring->nhi->pdev->dev, + "interrupt for %s %d is already %s\n", + RING_TYPE(ring), ring->hop, + active ? "enabled" : "disabled"); + + if (active) + iowrite32(new, ring->nhi->iobase + reg); + else + nhi_mask_interrupt(ring->nhi, mask, index); +} + +/* + * nhi_disable_interrupts() - disable interrupts for all rings + * + * Use only during init and shutdown. + */ +static void nhi_disable_interrupts(struct tb_nhi *nhi) +{ + int i = 0; + /* disable interrupts */ + for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) + nhi_mask_interrupt(nhi, ~0, 4 * i); + + /* clear interrupt status bits */ + for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) + nhi_clear_interrupt(nhi, 4 * i); +} + +/* ring helper methods */ + +static void __iomem *ring_desc_base(struct tb_ring *ring) +{ + void __iomem *io = ring->nhi->iobase; + io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; + io += ring->hop * 16; + return io; +} + +static void __iomem *ring_options_base(struct tb_ring *ring) +{ + void __iomem *io = ring->nhi->iobase; + io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; + io += ring->hop * 32; + return io; +} + +static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) +{ + /* + * The other 16-bits in the register is read-only and writes to it + * are ignored by the hardware so we can save one ioread32() by + * filling the read-only bits with zeroes. + */ + iowrite32(cons, ring_desc_base(ring) + 8); +} + +static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) +{ + /* See ring_iowrite_cons() above for explanation */ + iowrite32(prod << 16, ring_desc_base(ring) + 8); +} + +static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) +{ + iowrite32(value, ring_desc_base(ring) + offset); +} + +static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) +{ + iowrite32(value, ring_desc_base(ring) + offset); + iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); +} + +static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) +{ + iowrite32(value, ring_options_base(ring) + offset); +} + +static bool ring_full(struct tb_ring *ring) +{ + return ((ring->head + 1) % ring->size) == ring->tail; +} + +static bool ring_empty(struct tb_ring *ring) +{ + return ring->head == ring->tail; +} + +/* + * ring_write_descriptors() - post frames from ring->queue to the controller + * + * ring->lock is held. + */ +static void ring_write_descriptors(struct tb_ring *ring) +{ + struct ring_frame *frame, *n; + struct ring_desc *descriptor; + list_for_each_entry_safe(frame, n, &ring->queue, list) { + if (ring_full(ring)) + break; + list_move_tail(&frame->list, &ring->in_flight); + descriptor = &ring->descriptors[ring->head]; + descriptor->phys = frame->buffer_phy; + descriptor->time = 0; + descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; + if (ring->is_tx) { + descriptor->length = frame->size; + descriptor->eof = frame->eof; + descriptor->sof = frame->sof; + } + ring->head = (ring->head + 1) % ring->size; + if (ring->is_tx) + ring_iowrite_prod(ring, ring->head); + else + ring_iowrite_cons(ring, ring->head); + } +} + +/* + * ring_work() - progress completed frames + * + * If the ring is shutting down then all frames are marked as canceled and + * their callbacks are invoked. + * + * Otherwise we collect all completed frame from the ring buffer, write new + * frame to the ring buffer and invoke the callbacks for the completed frames. + */ +static void ring_work(struct work_struct *work) +{ + struct tb_ring *ring = container_of(work, typeof(*ring), work); + struct ring_frame *frame; + bool canceled = false; + unsigned long flags; + LIST_HEAD(done); + + spin_lock_irqsave(&ring->lock, flags); + + if (!ring->running) { + /* Move all frames to done and mark them as canceled. */ + list_splice_tail_init(&ring->in_flight, &done); + list_splice_tail_init(&ring->queue, &done); + canceled = true; + goto invoke_callback; + } + + while (!ring_empty(ring)) { + if (!(ring->descriptors[ring->tail].flags + & RING_DESC_COMPLETED)) + break; + frame = list_first_entry(&ring->in_flight, typeof(*frame), + list); + list_move_tail(&frame->list, &done); + if (!ring->is_tx) { + frame->size = ring->descriptors[ring->tail].length; + frame->eof = ring->descriptors[ring->tail].eof; + frame->sof = ring->descriptors[ring->tail].sof; + frame->flags = ring->descriptors[ring->tail].flags; + } + ring->tail = (ring->tail + 1) % ring->size; + } + ring_write_descriptors(ring); + +invoke_callback: + /* allow callbacks to schedule new work */ + spin_unlock_irqrestore(&ring->lock, flags); + while (!list_empty(&done)) { + frame = list_first_entry(&done, typeof(*frame), list); + /* + * The callback may reenqueue or delete frame. + * Do not hold on to it. + */ + list_del_init(&frame->list); + if (frame->callback) + frame->callback(ring, frame, canceled); + } +} + +int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&ring->lock, flags); + if (ring->running) { + list_add_tail(&frame->list, &ring->queue); + ring_write_descriptors(ring); + } else { + ret = -ESHUTDOWN; + } + spin_unlock_irqrestore(&ring->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(__tb_ring_enqueue); + +/** + * tb_ring_poll() - Poll one completed frame from the ring + * @ring: Ring to poll + * + * This function can be called when @start_poll callback of the @ring + * has been called. It will read one completed frame from the ring and + * return it to the caller. Returns %NULL if there is no more completed + * frames. + */ +struct ring_frame *tb_ring_poll(struct tb_ring *ring) +{ + struct ring_frame *frame = NULL; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (!ring->running) + goto unlock; + if (ring_empty(ring)) + goto unlock; + + if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { + frame = list_first_entry(&ring->in_flight, typeof(*frame), + list); + list_del_init(&frame->list); + + if (!ring->is_tx) { + frame->size = ring->descriptors[ring->tail].length; + frame->eof = ring->descriptors[ring->tail].eof; + frame->sof = ring->descriptors[ring->tail].sof; + frame->flags = ring->descriptors[ring->tail].flags; + } + + ring->tail = (ring->tail + 1) % ring->size; + } + +unlock: + spin_unlock_irqrestore(&ring->lock, flags); + return frame; +} +EXPORT_SYMBOL_GPL(tb_ring_poll); + +static void __ring_interrupt_mask(struct tb_ring *ring, bool mask) +{ + int idx = ring_interrupt_index(ring); + int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4; + int bit = idx % 32; + u32 val; + + val = ioread32(ring->nhi->iobase + reg); + if (mask) + val &= ~BIT(bit); + else + val |= BIT(bit); + iowrite32(val, ring->nhi->iobase + reg); +} + +/* Both @nhi->lock and @ring->lock should be held */ +static void __ring_interrupt(struct tb_ring *ring) +{ + if (!ring->running) + return; + + if (ring->start_poll) { + __ring_interrupt_mask(ring, true); + ring->start_poll(ring->poll_data); + } else { + schedule_work(&ring->work); + } +} + +/** + * tb_ring_poll_complete() - Re-start interrupt for the ring + * @ring: Ring to re-start the interrupt + * + * This will re-start (unmask) the ring interrupt once the user is done + * with polling. + */ +void tb_ring_poll_complete(struct tb_ring *ring) +{ + unsigned long flags; + + spin_lock_irqsave(&ring->nhi->lock, flags); + spin_lock(&ring->lock); + if (ring->start_poll) + __ring_interrupt_mask(ring, false); + spin_unlock(&ring->lock); + spin_unlock_irqrestore(&ring->nhi->lock, flags); +} +EXPORT_SYMBOL_GPL(tb_ring_poll_complete); + +static void ring_clear_msix(const struct tb_ring *ring) +{ + int bit; + + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) + return; + + bit = ring_interrupt_index(ring) & 31; + if (ring->is_tx) + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR); + else + iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR + + 4 * (ring->nhi->hop_count / 32)); +} + +static irqreturn_t ring_msix(int irq, void *data) +{ + struct tb_ring *ring = data; + + spin_lock(&ring->nhi->lock); + ring_clear_msix(ring); + spin_lock(&ring->lock); + __ring_interrupt(ring); + spin_unlock(&ring->lock); + spin_unlock(&ring->nhi->lock); + + return IRQ_HANDLED; +} + +static int ring_request_msix(struct tb_ring *ring, bool no_suspend) +{ + struct tb_nhi *nhi = ring->nhi; + unsigned long irqflags; + int ret; + + if (!nhi->pdev->msix_enabled) + return 0; + + ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); + if (ret < 0) + return ret; + + ring->vector = ret; + + ret = pci_irq_vector(ring->nhi->pdev, ring->vector); + if (ret < 0) + goto err_ida_remove; + + ring->irq = ret; + + irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; + ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); + if (ret) + goto err_ida_remove; + + return 0; + +err_ida_remove: + ida_simple_remove(&nhi->msix_ida, ring->vector); + + return ret; +} + +static void ring_release_msix(struct tb_ring *ring) +{ + if (ring->irq <= 0) + return; + + free_irq(ring->irq, ring); + ida_simple_remove(&ring->nhi->msix_ida, ring->vector); + ring->vector = 0; + ring->irq = 0; +} + +static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) +{ + unsigned int start_hop = RING_FIRST_USABLE_HOPID; + int ret = 0; + + if (nhi->quirks & QUIRK_E2E) { + start_hop = RING_FIRST_USABLE_HOPID + 1; + if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { + dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n", + ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID); + ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID; + } + } + + spin_lock_irq(&nhi->lock); + + if (ring->hop < 0) { + unsigned int i; + + /* + * Automatically allocate HopID from the non-reserved + * range 1 .. hop_count - 1. + */ + for (i = start_hop; i < nhi->hop_count; i++) { + if (ring->is_tx) { + if (!nhi->tx_rings[i]) { + ring->hop = i; + break; + } + } else { + if (!nhi->rx_rings[i]) { + ring->hop = i; + break; + } + } + } + } + + if (ring->hop > 0 && ring->hop < start_hop) { + dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); + ret = -EINVAL; + goto err_unlock; + } + if (ring->hop < 0 || ring->hop >= nhi->hop_count) { + dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); + ret = -EINVAL; + goto err_unlock; + } + if (ring->is_tx && nhi->tx_rings[ring->hop]) { + dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", + ring->hop); + ret = -EBUSY; + goto err_unlock; + } + if (!ring->is_tx && nhi->rx_rings[ring->hop]) { + dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", + ring->hop); + ret = -EBUSY; + goto err_unlock; + } + + if (ring->is_tx) + nhi->tx_rings[ring->hop] = ring; + else + nhi->rx_rings[ring->hop] = ring; + +err_unlock: + spin_unlock_irq(&nhi->lock); + + return ret; +} + +static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, + bool transmit, unsigned int flags, + int e2e_tx_hop, u16 sof_mask, u16 eof_mask, + void (*start_poll)(void *), + void *poll_data) +{ + struct tb_ring *ring = NULL; + + dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", + transmit ? "TX" : "RX", hop, size); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + return NULL; + + spin_lock_init(&ring->lock); + INIT_LIST_HEAD(&ring->queue); + INIT_LIST_HEAD(&ring->in_flight); + INIT_WORK(&ring->work, ring_work); + + ring->nhi = nhi; + ring->hop = hop; + ring->is_tx = transmit; + ring->size = size; + ring->flags = flags; + ring->e2e_tx_hop = e2e_tx_hop; + ring->sof_mask = sof_mask; + ring->eof_mask = eof_mask; + ring->head = 0; + ring->tail = 0; + ring->running = false; + ring->start_poll = start_poll; + ring->poll_data = poll_data; + + ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, + size * sizeof(*ring->descriptors), + &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); + if (!ring->descriptors) + goto err_free_ring; + + if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) + goto err_free_descs; + + if (nhi_alloc_hop(nhi, ring)) + goto err_release_msix; + + return ring; + +err_release_msix: + ring_release_msix(ring); +err_free_descs: + dma_free_coherent(&ring->nhi->pdev->dev, + ring->size * sizeof(*ring->descriptors), + ring->descriptors, ring->descriptors_dma); +err_free_ring: + kfree(ring); + + return NULL; +} + +/** + * tb_ring_alloc_tx() - Allocate DMA ring for transmit + * @nhi: Pointer to the NHI the ring is to be allocated + * @hop: HopID (ring) to allocate + * @size: Number of entries in the ring + * @flags: Flags for the ring + */ +struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, + unsigned int flags) +{ + return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL); +} +EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); + +/** + * tb_ring_alloc_rx() - Allocate DMA ring for receive + * @nhi: Pointer to the NHI the ring is to be allocated + * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. + * @size: Number of entries in the ring + * @flags: Flags for the ring + * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags + * @sof_mask: Mask of PDF values that start a frame + * @eof_mask: Mask of PDF values that end a frame + * @start_poll: If not %NULL the ring will call this function when an + * interrupt is triggered and masked, instead of callback + * in each Rx frame. + * @poll_data: Optional data passed to @start_poll + */ +struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, + unsigned int flags, int e2e_tx_hop, + u16 sof_mask, u16 eof_mask, + void (*start_poll)(void *), void *poll_data) +{ + return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask, + start_poll, poll_data); +} +EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); + +/** + * tb_ring_start() - enable a ring + * @ring: Ring to start + * + * Must not be invoked in parallel with tb_ring_stop(). + */ +void tb_ring_start(struct tb_ring *ring) +{ + u16 frame_size; + u32 flags; + + spin_lock_irq(&ring->nhi->lock); + spin_lock(&ring->lock); + if (ring->nhi->going_away) + goto err; + if (ring->running) { + dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); + goto err; + } + dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", + RING_TYPE(ring), ring->hop); + + if (ring->flags & RING_FLAG_FRAME) { + /* Means 4096 */ + frame_size = 0; + flags = RING_FLAG_ENABLE; + } else { + frame_size = TB_FRAME_SIZE; + flags = RING_FLAG_ENABLE | RING_FLAG_RAW; + } + + ring_iowrite64desc(ring, ring->descriptors_dma, 0); + if (ring->is_tx) { + ring_iowrite32desc(ring, ring->size, 12); + ring_iowrite32options(ring, 0, 4); /* time releated ? */ + ring_iowrite32options(ring, flags, 0); + } else { + u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; + + ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); + ring_iowrite32options(ring, sof_eof_mask, 4); + ring_iowrite32options(ring, flags, 0); + } + + /* + * Now that the ring valid bit is set we can configure E2E if + * enabled for the ring. + */ + if (ring->flags & RING_FLAG_E2E) { + if (!ring->is_tx) { + u32 hop; + + hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT; + hop &= REG_RX_OPTIONS_E2E_HOP_MASK; + flags |= hop; + + dev_dbg(&ring->nhi->pdev->dev, + "enabling E2E for %s %d with TX HopID %d\n", + RING_TYPE(ring), ring->hop, ring->e2e_tx_hop); + } else { + dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n", + RING_TYPE(ring), ring->hop); + } + + flags |= RING_FLAG_E2E_FLOW_CONTROL; + ring_iowrite32options(ring, flags, 0); + } + + ring_interrupt_active(ring, true); + ring->running = true; +err: + spin_unlock(&ring->lock); + spin_unlock_irq(&ring->nhi->lock); +} +EXPORT_SYMBOL_GPL(tb_ring_start); + +/** + * tb_ring_stop() - shutdown a ring + * @ring: Ring to stop + * + * Must not be invoked from a callback. + * + * This method will disable the ring. Further calls to + * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been + * called. + * + * All enqueued frames will be canceled and their callbacks will be executed + * with frame->canceled set to true (on the callback thread). This method + * returns only after all callback invocations have finished. + */ +void tb_ring_stop(struct tb_ring *ring) +{ + spin_lock_irq(&ring->nhi->lock); + spin_lock(&ring->lock); + dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", + RING_TYPE(ring), ring->hop); + if (ring->nhi->going_away) + goto err; + if (!ring->running) { + dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", + RING_TYPE(ring), ring->hop); + goto err; + } + ring_interrupt_active(ring, false); + + ring_iowrite32options(ring, 0, 0); + ring_iowrite64desc(ring, 0, 0); + ring_iowrite32desc(ring, 0, 8); + ring_iowrite32desc(ring, 0, 12); + ring->head = 0; + ring->tail = 0; + ring->running = false; + +err: + spin_unlock(&ring->lock); + spin_unlock_irq(&ring->nhi->lock); + + /* + * schedule ring->work to invoke callbacks on all remaining frames. + */ + schedule_work(&ring->work); + flush_work(&ring->work); +} +EXPORT_SYMBOL_GPL(tb_ring_stop); + +/* + * tb_ring_free() - free ring + * + * When this method returns all invocations of ring->callback will have + * finished. + * + * Ring must be stopped. + * + * Must NOT be called from ring_frame->callback! + */ +void tb_ring_free(struct tb_ring *ring) +{ + spin_lock_irq(&ring->nhi->lock); + /* + * Dissociate the ring from the NHI. This also ensures that + * nhi_interrupt_work cannot reschedule ring->work. + */ + if (ring->is_tx) + ring->nhi->tx_rings[ring->hop] = NULL; + else + ring->nhi->rx_rings[ring->hop] = NULL; + + if (ring->running) { + dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", + RING_TYPE(ring), ring->hop); + } + spin_unlock_irq(&ring->nhi->lock); + + ring_release_msix(ring); + + dma_free_coherent(&ring->nhi->pdev->dev, + ring->size * sizeof(*ring->descriptors), + ring->descriptors, ring->descriptors_dma); + + ring->descriptors = NULL; + ring->descriptors_dma = 0; + + + dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), + ring->hop); + + /* + * ring->work can no longer be scheduled (it is scheduled only + * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it + * to finish before freeing the ring. + */ + flush_work(&ring->work); + kfree(ring); +} +EXPORT_SYMBOL_GPL(tb_ring_free); + +/** + * nhi_mailbox_cmd() - Send a command through NHI mailbox + * @nhi: Pointer to the NHI structure + * @cmd: Command to send + * @data: Data to be send with the command + * + * Sends mailbox command to the firmware running on NHI. Returns %0 in + * case of success and negative errno in case of failure. + */ +int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) +{ + ktime_t timeout; + u32 val; + + iowrite32(data, nhi->iobase + REG_INMAIL_DATA); + + val = ioread32(nhi->iobase + REG_INMAIL_CMD); + val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR); + val |= REG_INMAIL_OP_REQUEST | cmd; + iowrite32(val, nhi->iobase + REG_INMAIL_CMD); + + timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT); + do { + val = ioread32(nhi->iobase + REG_INMAIL_CMD); + if (!(val & REG_INMAIL_OP_REQUEST)) + break; + usleep_range(10, 20); + } while (ktime_before(ktime_get(), timeout)); + + if (val & REG_INMAIL_OP_REQUEST) + return -ETIMEDOUT; + if (val & REG_INMAIL_ERROR) + return -EIO; + + return 0; +} + +/** + * nhi_mailbox_mode() - Return current firmware operation mode + * @nhi: Pointer to the NHI structure + * + * The function reads current firmware operation mode using NHI mailbox + * registers and returns it to the caller. + */ +enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) +{ + u32 val; + + val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); + val &= REG_OUTMAIL_CMD_OPMODE_MASK; + val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT; + + return (enum nhi_fw_mode)val; +} + +static void nhi_interrupt_work(struct work_struct *work) +{ + struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); + int value = 0; /* Suppress uninitialized usage warning. */ + int bit; + int hop = -1; + int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ + struct tb_ring *ring; + + spin_lock_irq(&nhi->lock); + + /* + * Starting at REG_RING_NOTIFY_BASE there are three status bitfields + * (TX, RX, RX overflow). We iterate over the bits and read a new + * dwords as required. The registers are cleared on read. + */ + for (bit = 0; bit < 3 * nhi->hop_count; bit++) { + if (bit % 32 == 0) + value = ioread32(nhi->iobase + + REG_RING_NOTIFY_BASE + + 4 * (bit / 32)); + if (++hop == nhi->hop_count) { + hop = 0; + type++; + } + if ((value & (1 << (bit % 32))) == 0) + continue; + if (type == 2) { + dev_warn(&nhi->pdev->dev, + "RX overflow for ring %d\n", + hop); + continue; + } + if (type == 0) + ring = nhi->tx_rings[hop]; + else + ring = nhi->rx_rings[hop]; + if (ring == NULL) { + dev_warn(&nhi->pdev->dev, + "got interrupt for inactive %s ring %d\n", + type ? "RX" : "TX", + hop); + continue; + } + + spin_lock(&ring->lock); + __ring_interrupt(ring); + spin_unlock(&ring->lock); + } + spin_unlock_irq(&nhi->lock); +} + +static irqreturn_t nhi_msi(int irq, void *data) +{ + struct tb_nhi *nhi = data; + schedule_work(&nhi->interrupt_work); + return IRQ_HANDLED; +} + +static int __nhi_suspend_noirq(struct device *dev, bool wakeup) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; + + ret = tb_domain_suspend_noirq(tb); + if (ret) + return ret; + + if (nhi->ops && nhi->ops->suspend_noirq) { + ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); + if (ret) + return ret; + } + + return 0; +} + +static int nhi_suspend_noirq(struct device *dev) +{ + return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); +} + +static int nhi_freeze_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_freeze_noirq(tb); +} + +static int nhi_thaw_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_thaw_noirq(tb); +} + +static bool nhi_wake_supported(struct pci_dev *pdev) +{ + u8 val; + + /* + * If power rails are sustainable for wakeup from S4 this + * property is set by the BIOS. + */ + if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) + return !!val; + + return true; +} + +static int nhi_poweroff_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + bool wakeup; + + wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev); + return __nhi_suspend_noirq(dev, wakeup); +} + +static void nhi_enable_int_throttling(struct tb_nhi *nhi) +{ + /* Throttling is specified in 256ns increments */ + u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256); + unsigned int i; + + /* + * Configure interrupt throttling for all vectors even if we + * only use few. + */ + for (i = 0; i < MSIX_MAX_VECS; i++) { + u32 reg = REG_INT_THROTTLING_RATE + i * 4; + iowrite32(throttle, nhi->iobase + reg); + } +} + +static int nhi_resume_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; + + /* + * Check that the device is still there. It may be that the user + * unplugged last device which causes the host controller to go + * away on PCs. + */ + if (!pci_device_is_present(pdev)) { + nhi->going_away = true; + } else { + if (nhi->ops && nhi->ops->resume_noirq) { + ret = nhi->ops->resume_noirq(nhi); + if (ret) + return ret; + } + nhi_enable_int_throttling(tb->nhi); + } + + return tb_domain_resume_noirq(tb); +} + +static int nhi_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_suspend(tb); +} + +static void nhi_complete(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + /* + * If we were runtime suspended when system suspend started, + * schedule runtime resume now. It should bring the domain back + * to functional state. + */ + if (pm_runtime_suspended(&pdev->dev)) + pm_runtime_resume(&pdev->dev); + else + tb_domain_complete(tb); +} + +static int nhi_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; + + ret = tb_domain_runtime_suspend(tb); + if (ret) + return ret; + + if (nhi->ops && nhi->ops->runtime_suspend) { + ret = nhi->ops->runtime_suspend(tb->nhi); + if (ret) + return ret; + } + return 0; +} + +static int nhi_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + int ret; + + if (nhi->ops && nhi->ops->runtime_resume) { + ret = nhi->ops->runtime_resume(nhi); + if (ret) + return ret; + } + + nhi_enable_int_throttling(nhi); + return tb_domain_runtime_resume(tb); +} + +static void nhi_shutdown(struct tb_nhi *nhi) +{ + int i; + + dev_dbg(&nhi->pdev->dev, "shutdown\n"); + + for (i = 0; i < nhi->hop_count; i++) { + if (nhi->tx_rings[i]) + dev_WARN(&nhi->pdev->dev, + "TX ring %d is still active\n", i); + if (nhi->rx_rings[i]) + dev_WARN(&nhi->pdev->dev, + "RX ring %d is still active\n", i); + } + nhi_disable_interrupts(nhi); + /* + * We have to release the irq before calling flush_work. Otherwise an + * already executing IRQ handler could call schedule_work again. + */ + if (!nhi->pdev->msix_enabled) { + devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); + flush_work(&nhi->interrupt_work); + } + ida_destroy(&nhi->msix_ida); + + if (nhi->ops && nhi->ops->shutdown) + nhi->ops->shutdown(nhi); +} + +static void nhi_check_quirks(struct tb_nhi *nhi) +{ + if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) { + /* + * Intel hardware supports auto clear of the interrupt + * status register right after interrupt is being + * issued. + */ + nhi->quirks |= QUIRK_AUTO_CLEAR_INT; + + switch (nhi->pdev->device) { + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + /* + * Falcon Ridge controller needs the end-to-end + * flow control workaround to avoid losing Rx + * packets when RING_FLAG_E2E is set. + */ + nhi->quirks |= QUIRK_E2E; + break; + } + } +} + +static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data) +{ + if (!pdev->external_facing || + !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION)) + return 0; + *(bool *)data = true; + return 1; /* Stop walking */ +} + +static void nhi_check_iommu(struct tb_nhi *nhi) +{ + struct pci_bus *bus = nhi->pdev->bus; + bool port_ok = false; + + /* + * Ideally what we'd do here is grab every PCI device that + * represents a tunnelling adapter for this NHI and check their + * status directly, but unfortunately USB4 seems to make it + * obnoxiously difficult to reliably make any correlation. + * + * So for now we'll have to bodge it... Hoping that the system + * is at least sane enough that an adapter is in the same PCI + * segment as its NHI, if we can find *something* on that segment + * which meets the requirements for Kernel DMA Protection, we'll + * take that to imply that firmware is aware and has (hopefully) + * done the right thing in general. We need to know that the PCI + * layer has seen the ExternalFacingPort property which will then + * inform the IOMMU layer to enforce the complete "untrusted DMA" + * flow, but also that the IOMMU driver itself can be trusted not + * to have been subverted by a pre-boot DMA attack. + */ + while (bus->parent) + bus = bus->parent; + + pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok); + + nhi->iommu_dma_protection = port_ok; + dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n", + str_enabled_disabled(port_ok)); +} + +static void nhi_reset(struct tb_nhi *nhi) +{ + ktime_t timeout; + u32 val; + + val = ioread32(nhi->iobase + REG_CAPS); + /* Reset only v2 and later routers */ + if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2) + return; + + if (!host_reset) { + dev_dbg(&nhi->pdev->dev, "skipping host router reset\n"); + return; + } + + iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET); + msleep(100); + + timeout = ktime_add_ms(ktime_get(), 500); + do { + val = ioread32(nhi->iobase + REG_RESET); + if (!(val & REG_RESET_HRR)) { + dev_warn(&nhi->pdev->dev, "host router reset successful\n"); + return; + } + usleep_range(10, 20); + } while (ktime_before(ktime_get(), timeout)); + + dev_warn(&nhi->pdev->dev, "timeout resetting host router\n"); +} + +static int nhi_init_msi(struct tb_nhi *nhi) +{ + struct pci_dev *pdev = nhi->pdev; + struct device *dev = &pdev->dev; + int res, irq, nvec; + + /* In case someone left them on. */ + nhi_disable_interrupts(nhi); + + nhi_enable_int_throttling(nhi); + + ida_init(&nhi->msix_ida); + + /* + * The NHI has 16 MSI-X vectors or a single MSI. We first try to + * get all MSI-X vectors and if we succeed, each ring will have + * one MSI-X. If for some reason that does not work out, we + * fallback to a single MSI. + */ + nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS, + PCI_IRQ_MSIX); + if (nvec < 0) { + nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (nvec < 0) + return nvec; + + INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); + + irq = pci_irq_vector(nhi->pdev, 0); + if (irq < 0) + return irq; + + res = devm_request_irq(&pdev->dev, irq, nhi_msi, + IRQF_NO_SUSPEND, "thunderbolt", nhi); + if (res) + return dev_err_probe(dev, res, "request_irq failed, aborting\n"); + } + + return 0; +} + +static bool nhi_imr_valid(struct pci_dev *pdev) +{ + u8 val; + + if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) + return !!val; + + return true; +} + +static struct tb *nhi_select_cm(struct tb_nhi *nhi) +{ + struct tb *tb; + + /* + * USB4 case is simple. If we got control of any of the + * capabilities, we use software CM. + */ + if (tb_acpi_is_native()) + return tb_probe(nhi); + + /* + * Either firmware based CM is running (we did not get control + * from the firmware) or this is pre-USB4 PC so try first + * firmware CM and then fallback to software CM. + */ + tb = icm_probe(nhi); + if (!tb) + tb = tb_probe(nhi); + + return tb; +} + +static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct tb_nhi *nhi; + struct tb *tb; + int res; + + if (!nhi_imr_valid(pdev)) + return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n"); + + res = pcim_enable_device(pdev); + if (res) + return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n"); + + res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); + if (res) + return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n"); + + nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); + if (!nhi) + return -ENOMEM; + + nhi->pdev = pdev; + nhi->ops = (const struct tb_nhi_ops *)id->driver_data; + /* cannot fail - table is allocated in pcim_iomap_regions */ + nhi->iobase = pcim_iomap_table(pdev)[0]; + nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff; + dev_dbg(dev, "total paths: %d\n", nhi->hop_count); + + nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, + sizeof(*nhi->tx_rings), GFP_KERNEL); + nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, + sizeof(*nhi->rx_rings), GFP_KERNEL); + if (!nhi->tx_rings || !nhi->rx_rings) + return -ENOMEM; + + nhi_check_quirks(nhi); + nhi_check_iommu(nhi); + + nhi_reset(nhi); + + res = nhi_init_msi(nhi); + if (res) + return dev_err_probe(dev, res, "cannot enable MSI, aborting\n"); + + spin_lock_init(&nhi->lock); + + res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (res) + return dev_err_probe(dev, res, "failed to set DMA mask\n"); + + pci_set_master(pdev); + + if (nhi->ops && nhi->ops->init) { + res = nhi->ops->init(nhi); + if (res) + return res; + } + + tb = nhi_select_cm(nhi); + if (!tb) + return dev_err_probe(dev, -ENODEV, + "failed to determine connection manager, aborting\n"); + + dev_dbg(dev, "NHI initialized, starting thunderbolt\n"); + + res = tb_domain_add(tb); + if (res) { + /* + * At this point the RX/TX rings might already have been + * activated. Do a proper shutdown. + */ + tb_domain_put(tb); + nhi_shutdown(nhi); + return res; + } + pci_set_drvdata(pdev, tb); + + device_wakeup_enable(&pdev->dev); + + pm_runtime_allow(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + + return 0; +} + +static void nhi_remove(struct pci_dev *pdev) +{ + struct tb *tb = pci_get_drvdata(pdev); + struct tb_nhi *nhi = tb->nhi; + + pm_runtime_get_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_forbid(&pdev->dev); + + tb_domain_remove(tb); + nhi_shutdown(nhi); +} + +/* + * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable + * the tunnels asap. A corresponding pci quirk blocks the downstream bridges + * resume_noirq until we are done. + */ +static const struct dev_pm_ops nhi_pm_ops = { + .suspend_noirq = nhi_suspend_noirq, + .resume_noirq = nhi_resume_noirq, + .freeze_noirq = nhi_freeze_noirq, /* + * we just disable hotplug, the + * pci-tunnels stay alive. + */ + .thaw_noirq = nhi_thaw_noirq, + .restore_noirq = nhi_resume_noirq, + .suspend = nhi_suspend, + .poweroff_noirq = nhi_poweroff_noirq, + .poweroff = nhi_suspend, + .complete = nhi_complete, + .runtime_suspend = nhi_runtime_suspend, + .runtime_resume = nhi_runtime_resume, +}; + +static struct pci_device_id nhi_ids[] = { + /* + * We have to specify class, the TB bridges use the same device and + * vendor (sub)id on gen 1 and gen 2 controllers. + */ + { + .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, + .subvendor = 0x2222, .subdevice = 0x1111, + }, + { + .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, + .subvendor = 0x2222, .subdevice = 0x1111, + }, + { + .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, + }, + { + .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, + }, + + /* Thunderbolt 3 */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + /* Thunderbolt 4 */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) }, + + /* Any USB4 compliant host */ + { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, + + { 0,} +}; + +MODULE_DEVICE_TABLE(pci, nhi_ids); +MODULE_DESCRIPTION("Thunderbolt/USB4 core driver"); +MODULE_LICENSE("GPL"); + +static struct pci_driver nhi_driver = { + .name = "thunderbolt", + .id_table = nhi_ids, + .probe = nhi_probe, + .remove = nhi_remove, + .shutdown = nhi_remove, + .driver.pm = &nhi_pm_ops, +}; + +static int __init nhi_init(void) +{ + int ret; + + ret = tb_domain_init(); + if (ret) + return ret; + ret = pci_register_driver(&nhi_driver); + if (ret) + tb_domain_exit(); + return ret; +} + +static void __exit nhi_unload(void) +{ + pci_unregister_driver(&nhi_driver); + tb_domain_exit(); +} + +rootfs_initcall(nhi_init); +module_exit(nhi_unload); diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h new file mode 100644 index 0000000000..0f029ce758 --- /dev/null +++ b/drivers/thunderbolt/nhi.h @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt driver - NHI driver + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#ifndef DSL3510_H_ +#define DSL3510_H_ + +#include <linux/thunderbolt.h> + +enum nhi_fw_mode { + NHI_FW_SAFE_MODE, + NHI_FW_AUTH_MODE, + NHI_FW_EP_MODE, + NHI_FW_CM_MODE, +}; + +enum nhi_mailbox_cmd { + NHI_MAILBOX_SAVE_DEVS = 0x05, + NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06, + NHI_MAILBOX_DRV_UNLOADS = 0x07, + NHI_MAILBOX_DISCONNECT_PA = 0x10, + NHI_MAILBOX_DISCONNECT_PB = 0x11, + NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, +}; + +int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); +enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); + +/** + * struct tb_nhi_ops - NHI specific optional operations + * @init: NHI specific initialization + * @suspend_noirq: NHI specific suspend_noirq hook + * @resume_noirq: NHI specific resume_noirq hook + * @runtime_suspend: NHI specific runtime_suspend hook + * @runtime_resume: NHI specific runtime_resume hook + * @shutdown: NHI specific shutdown + */ +struct tb_nhi_ops { + int (*init)(struct tb_nhi *nhi); + int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup); + int (*resume_noirq)(struct tb_nhi *nhi); + int (*runtime_suspend)(struct tb_nhi *nhi); + int (*runtime_resume)(struct tb_nhi *nhi); + void (*shutdown)(struct tb_nhi *nhi); +}; + +extern const struct tb_nhi_ops icl_nhi_ops; + +/* + * PCI IDs used in this driver from Win Ridge forward. There is no + * need for the PCI quirk anymore as we will use ICM also on Apple + * hardware. + */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134 +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 +#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d +#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9 +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd +#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de +#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE 0x15e7 +#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI 0x15e8 +#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea +#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb +#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef +#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e +#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781 +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784 +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786 +#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4 +#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2 +#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2 +#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3 +#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d +#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17 +#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b +#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d +#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0 0x9a1f +#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21 +#define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e +#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d + +#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 + +#endif diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c new file mode 100644 index 0000000000..96da07e88c --- /dev/null +++ b/drivers/thunderbolt/nhi_ops.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NHI specific operations + * + * Copyright (C) 2019, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/delay.h> +#include <linux/suspend.h> + +#include "nhi.h" +#include "nhi_regs.h" +#include "tb.h" + +/* Ice Lake specific NHI operations */ + +#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */ + +static int check_for_device(struct device *dev, void *data) +{ + return tb_is_switch(dev); +} + +static bool icl_nhi_is_device_connected(struct tb_nhi *nhi) +{ + struct tb *tb = pci_get_drvdata(nhi->pdev); + int ret; + + ret = device_for_each_child(&tb->root_switch->dev, NULL, + check_for_device); + return ret > 0; +} + +static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) +{ + u32 vs_cap; + + /* + * The Thunderbolt host controller is present always in Ice Lake + * but the firmware may not be loaded and running (depending + * whether there is device connected and so on). Each time the + * controller is used we need to "Force Power" it first and wait + * for the firmware to indicate it is up and running. This "Force + * Power" is really not about actually powering on/off the + * controller so it is accessible even if "Force Power" is off. + * + * The actual power management happens inside shared ACPI power + * resources using standard ACPI methods. + */ + pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap); + if (power) { + vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK; + vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT; + vs_cap |= VS_CAP_22_FORCE_POWER; + } else { + vs_cap &= ~VS_CAP_22_FORCE_POWER; + } + pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap); + + if (power) { + unsigned int retries = 350; + u32 val; + + /* Wait until the firmware tells it is up and running */ + do { + pci_read_config_dword(nhi->pdev, VS_CAP_9, &val); + if (val & VS_CAP_9_FW_READY) + return 0; + usleep_range(3000, 3100); + } while (--retries); + + return -ETIMEDOUT; + } + + return 0; +} + +static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd) +{ + u32 data; + + data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK; + pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID); +} + +static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout) +{ + unsigned long end; + u32 data; + + if (!timeout) + goto clear; + + end = jiffies + msecs_to_jiffies(timeout); + do { + pci_read_config_dword(nhi->pdev, VS_CAP_18, &data); + if (data & VS_CAP_18_DONE) + goto clear; + usleep_range(1000, 1100); + } while (time_before(jiffies, end)); + + return -ETIMEDOUT; + +clear: + /* Clear the valid bit */ + pci_write_config_dword(nhi->pdev, VS_CAP_19, 0); + return 0; +} + +static void icl_nhi_set_ltr(struct tb_nhi *nhi) +{ + u32 max_ltr, ltr; + + pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr); + max_ltr &= 0xffff; + /* Program the same value for both snoop and no-snoop */ + ltr = max_ltr << 16 | max_ltr; + pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr); +} + +static int icl_nhi_suspend(struct tb_nhi *nhi) +{ + struct tb *tb = pci_get_drvdata(nhi->pdev); + int ret; + + if (icl_nhi_is_device_connected(nhi)) + return 0; + + if (tb_switch_is_icm(tb->root_switch)) { + /* + * If there is no device connected we need to perform + * both: a handshake through LC mailbox and force power + * down before entering D3. + */ + icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); + ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); + if (ret) + return ret; + } + + return icl_nhi_force_power(nhi, false); +} + +static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup) +{ + struct tb *tb = pci_get_drvdata(nhi->pdev); + enum icl_lc_mailbox_cmd cmd; + + if (!pm_suspend_via_firmware()) + return icl_nhi_suspend(nhi); + + if (!tb_switch_is_icm(tb->root_switch)) + return 0; + + cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE; + icl_nhi_lc_mailbox_cmd(nhi, cmd); + return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); +} + +static int icl_nhi_resume(struct tb_nhi *nhi) +{ + int ret; + + ret = icl_nhi_force_power(nhi, true); + if (ret) + return ret; + + icl_nhi_set_ltr(nhi); + return 0; +} + +static void icl_nhi_shutdown(struct tb_nhi *nhi) +{ + icl_nhi_force_power(nhi, false); +} + +const struct tb_nhi_ops icl_nhi_ops = { + .init = icl_nhi_resume, + .suspend_noirq = icl_nhi_suspend_noirq, + .resume_noirq = icl_nhi_resume, + .runtime_suspend = icl_nhi_suspend, + .runtime_resume = icl_nhi_resume, + .shutdown = icl_nhi_shutdown, +}; diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h new file mode 100644 index 0000000000..297a3e4406 --- /dev/null +++ b/drivers/thunderbolt/nhi_regs.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt driver - NHI registers + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#ifndef NHI_REGS_H_ +#define NHI_REGS_H_ + +#include <linux/types.h> + +enum ring_flags { + RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */ + RING_FLAG_E2E_FLOW_CONTROL = 1 << 28, + RING_FLAG_PCI_NO_SNOOP = 1 << 29, + RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */ + RING_FLAG_ENABLE = 1 << 31, +}; + +/** + * struct ring_desc - TX/RX ring entry + * + * For TX set length/eof/sof. + * For RX length/eof/sof are set by the NHI. + */ +struct ring_desc { + u64 phys; + u32 length:12; + u32 eof:4; + u32 sof:4; + enum ring_desc_flags flags:12; + u32 time; /* write zero */ +} __packed; + +/* NHI registers in bar 0 */ + +/* + * 16 bytes per entry, one entry for every hop (REG_CAPS) + * 00: physical pointer to an array of struct ring_desc + * 08: ring tail (set by NHI) + * 10: ring head (index of first non posted descriptor) + * 12: descriptor count + */ +#define REG_TX_RING_BASE 0x00000 + +/* + * 16 bytes per entry, one entry for every hop (REG_CAPS) + * 00: physical pointer to an array of struct ring_desc + * 08: ring head (index of first not posted descriptor) + * 10: ring tail (set by NHI) + * 12: descriptor count + * 14: max frame sizes (anything larger than 0x100 has no effect) + */ +#define REG_RX_RING_BASE 0x08000 + +/* + * 32 bytes per entry, one entry for every hop (REG_CAPS) + * 00: enum_ring_flags + * 04: isoch time stamp ?? (write 0) + * ..: unknown + */ +#define REG_TX_OPTIONS_BASE 0x19800 + +/* + * 32 bytes per entry, one entry for every hop (REG_CAPS) + * 00: enum ring_flags + * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to + * the corresponding TX hop id. + * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings) + * ..: unknown + */ +#define REG_RX_OPTIONS_BASE 0x29800 +#define REG_RX_OPTIONS_E2E_HOP_MASK GENMASK(22, 12) +#define REG_RX_OPTIONS_E2E_HOP_SHIFT 12 + +/* + * three bitfields: tx, rx, rx overflow + * Every bitfield contains one bit for every hop (REG_CAPS). + * New interrupts are fired only after ALL registers have been + * read (even those containing only disabled rings). + */ +#define REG_RING_NOTIFY_BASE 0x37800 +#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32) +#define REG_RING_INT_CLEAR 0x37808 + +/* + * two bitfields: rx, tx + * Both bitfields contains one bit for every hop (REG_CAPS). To + * enable/disable interrupts set/clear the corresponding bits. + */ +#define REG_RING_INTERRUPT_BASE 0x38200 +#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32) + +#define REG_RING_INTERRUPT_MASK_CLEAR_BASE 0x38208 + +#define REG_INT_THROTTLING_RATE 0x38c00 + +/* Interrupt Vector Allocation */ +#define REG_INT_VEC_ALLOC_BASE 0x38c40 +#define REG_INT_VEC_ALLOC_BITS 4 +#define REG_INT_VEC_ALLOC_MASK GENMASK(3, 0) +#define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS) + +/* The last 11 bits contain the number of hops supported by the NHI port. */ +#define REG_CAPS 0x39640 +#define REG_CAPS_VERSION_MASK GENMASK(23, 16) +#define REG_CAPS_VERSION_2 0x40 + +#define REG_DMA_MISC 0x39864 +#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2) +#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17) + +#define REG_RESET 0x39898 +#define REG_RESET_HRR BIT(0) + +#define REG_INMAIL_DATA 0x39900 + +#define REG_INMAIL_CMD 0x39904 +#define REG_INMAIL_CMD_MASK GENMASK(7, 0) +#define REG_INMAIL_ERROR BIT(30) +#define REG_INMAIL_OP_REQUEST BIT(31) + +#define REG_OUTMAIL_CMD 0x3990c +#define REG_OUTMAIL_CMD_OPMODE_SHIFT 8 +#define REG_OUTMAIL_CMD_OPMODE_MASK GENMASK(11, 8) + +#define REG_FW_STS 0x39944 +#define REG_FW_STS_NVM_AUTH_DONE BIT(31) +#define REG_FW_STS_CIO_RESET_REQ BIT(30) +#define REG_FW_STS_ICM_EN_CPU BIT(2) +#define REG_FW_STS_ICM_EN_INVERT BIT(1) +#define REG_FW_STS_ICM_EN BIT(0) + +/* ICL NHI VSEC registers */ + +/* FW ready */ +#define VS_CAP_9 0xc8 +#define VS_CAP_9_FW_READY BIT(31) +/* UUID */ +#define VS_CAP_10 0xcc +#define VS_CAP_11 0xd0 +/* LTR */ +#define VS_CAP_15 0xe0 +#define VS_CAP_16 0xe4 +/* TBT2PCIe */ +#define VS_CAP_18 0xec +#define VS_CAP_18_DONE BIT(0) +/* PCIe2TBT */ +#define VS_CAP_19 0xf0 +#define VS_CAP_19_VALID BIT(0) +#define VS_CAP_19_CMD_SHIFT 1 +#define VS_CAP_19_CMD_MASK GENMASK(7, 1) +/* Force power */ +#define VS_CAP_22 0xfc +#define VS_CAP_22_FORCE_POWER BIT(1) +#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24) +#define VS_CAP_22_DMA_DELAY_SHIFT 24 + +/** + * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands + * @ICL_LC_GO2SX: Ask LC to enter Sx without wake + * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake + * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset + */ +enum icl_lc_mailbox_cmd { + ICL_LC_GO2SX = 0x02, + ICL_LC_GO2SX_NO_WAKE = 0x03, + ICL_LC_PREPARE_FOR_RESET = 0x21, +}; + +#endif diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c new file mode 100644 index 0000000000..69fb3b0fa3 --- /dev/null +++ b/drivers/thunderbolt/nvm.c @@ -0,0 +1,634 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM helpers + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include "tb.h" + +#define NVM_MIN_SIZE SZ_32K +#define NVM_MAX_SIZE SZ_1M +#define NVM_DATA_DWORDS 16 + +/* Intel specific NVM offsets */ +#define INTEL_NVM_DEVID 0x05 +#define INTEL_NVM_VERSION 0x08 +#define INTEL_NVM_CSS 0x10 +#define INTEL_NVM_FLASH_SIZE 0x45 + +/* ASMedia specific NVM offsets */ +#define ASMEDIA_NVM_DATE 0x1c +#define ASMEDIA_NVM_VERSION 0x28 + +static DEFINE_IDA(nvm_ida); + +/** + * struct tb_nvm_vendor_ops - Vendor specific NVM operations + * @read_version: Reads out NVM version from the flash + * @validate: Validates the NVM image before update (optional) + * @write_headers: Writes headers before the rest of the image (optional) + */ +struct tb_nvm_vendor_ops { + int (*read_version)(struct tb_nvm *nvm); + int (*validate)(struct tb_nvm *nvm); + int (*write_headers)(struct tb_nvm *nvm); +}; + +/** + * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping + * @vendor: Vendor ID + * @vops: Vendor specific NVM operations + * + * Maps vendor ID to NVM vendor operations. If there is no mapping then + * NVM firmware upgrade is disabled for the device. + */ +struct tb_nvm_vendor { + u16 vendor; + const struct tb_nvm_vendor_ops *vops; +}; + +static int intel_switch_nvm_version(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + u32 val, nvm_size, hdr_size; + int ret; + + /* + * If the switch is in safe-mode the only accessible portion of + * the NVM is the non-active one where userspace is expected to + * write new functional NVM. + */ + if (sw->safe_mode) + return 0; + + ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); + if (ret) + return ret; + + hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; + nvm_size = (SZ_1M << (val & 7)) / 8; + nvm_size = (nvm_size - hdr_size) / 2; + + ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val)); + if (ret) + return ret; + + nvm->major = (val >> 16) & 0xff; + nvm->minor = (val >> 8) & 0xff; + nvm->active_size = nvm_size; + + return 0; +} + +static int intel_switch_nvm_validate(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + unsigned int image_size, hdr_size; + u16 ds_size, device_id; + u8 *buf = nvm->buf; + + image_size = nvm->buf_data_size; + + /* + * FARB pointer must point inside the image and must at least + * contain parts of the digital section we will be reading here. + */ + hdr_size = (*(u32 *)buf) & 0xffffff; + if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) + return -EINVAL; + + /* Digital section start should be aligned to 4k page */ + if (!IS_ALIGNED(hdr_size, SZ_4K)) + return -EINVAL; + + /* + * Read digital section size and check that it also fits inside + * the image. + */ + ds_size = *(u16 *)(buf + hdr_size); + if (ds_size >= image_size) + return -EINVAL; + + if (sw->safe_mode) + return 0; + + /* + * Make sure the device ID in the image matches the one + * we read from the switch config space. + */ + device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); + if (device_id != sw->config.device_id) + return -EINVAL; + + /* Skip headers in the image */ + nvm->buf_data_start = buf + hdr_size; + nvm->buf_data_size = image_size - hdr_size; + + return 0; +} + +static int intel_switch_nvm_write_headers(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + + if (sw->generation < 3) { + int ret; + + /* Write CSS headers first */ + ret = dma_port_flash_write(sw->dma_port, + DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS, + DMA_PORT_CSS_MAX_SIZE); + if (ret) + return ret; + } + + return 0; +} + +static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = { + .read_version = intel_switch_nvm_version, + .validate = intel_switch_nvm_validate, + .write_headers = intel_switch_nvm_write_headers, +}; + +static int asmedia_switch_nvm_version(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + u32 val; + int ret; + + ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val)); + if (ret) + return ret; + + nvm->major = (val << 16) & 0xff0000; + nvm->major |= val & 0x00ff00; + nvm->major |= (val >> 16) & 0x0000ff; + + ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val)); + if (ret) + return ret; + + nvm->minor = (val << 16) & 0xff0000; + nvm->minor |= val & 0x00ff00; + nvm->minor |= (val >> 16) & 0x0000ff; + + /* ASMedia NVM size is fixed to 512k */ + nvm->active_size = SZ_512K; + + return 0; +} + +static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = { + .read_version = asmedia_switch_nvm_version, +}; + +/* Router vendor NVM support table */ +static const struct tb_nvm_vendor switch_nvm_vendors[] = { + { 0x174c, &asmedia_switch_nvm_ops }, + { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops }, + { 0x8087, &intel_switch_nvm_ops }, +}; + +static int intel_retimer_nvm_version(struct tb_nvm *nvm) +{ + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + u32 val, nvm_size; + int ret; + + ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val)); + if (ret) + return ret; + + nvm->major = (val >> 16) & 0xff; + nvm->minor = (val >> 8) & 0xff; + + ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); + if (ret) + return ret; + + nvm_size = (SZ_1M << (val & 7)) / 8; + nvm_size = (nvm_size - SZ_16K) / 2; + nvm->active_size = nvm_size; + + return 0; +} + +static int intel_retimer_nvm_validate(struct tb_nvm *nvm) +{ + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + unsigned int image_size, hdr_size; + u8 *buf = nvm->buf; + u16 ds_size, device; + + image_size = nvm->buf_data_size; + + /* + * FARB pointer must point inside the image and must at least + * contain parts of the digital section we will be reading here. + */ + hdr_size = (*(u32 *)buf) & 0xffffff; + if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) + return -EINVAL; + + /* Digital section start should be aligned to 4k page */ + if (!IS_ALIGNED(hdr_size, SZ_4K)) + return -EINVAL; + + /* + * Read digital section size and check that it also fits inside + * the image. + */ + ds_size = *(u16 *)(buf + hdr_size); + if (ds_size >= image_size) + return -EINVAL; + + /* + * Make sure the device ID in the image matches the retimer + * hardware. + */ + device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); + if (device != rt->device) + return -EINVAL; + + /* Skip headers in the image */ + nvm->buf_data_start = buf + hdr_size; + nvm->buf_data_size = image_size - hdr_size; + + return 0; +} + +static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = { + .read_version = intel_retimer_nvm_version, + .validate = intel_retimer_nvm_validate, +}; + +/* Retimer vendor NVM support table */ +static const struct tb_nvm_vendor retimer_nvm_vendors[] = { + { 0x8087, &intel_retimer_nvm_ops }, +}; + +/** + * tb_nvm_alloc() - Allocate new NVM structure + * @dev: Device owning the NVM + * + * Allocates new NVM structure with unique @id and returns it. In case + * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the + * NVM format of the @dev is not known by the kernel. + */ +struct tb_nvm *tb_nvm_alloc(struct device *dev) +{ + const struct tb_nvm_vendor_ops *vops = NULL; + struct tb_nvm *nvm; + int ret, i; + + if (tb_is_switch(dev)) { + const struct tb_switch *sw = tb_to_switch(dev); + + for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) { + const struct tb_nvm_vendor *v = &switch_nvm_vendors[i]; + + if (v->vendor == sw->config.vendor_id) { + vops = v->vops; + break; + } + } + + if (!vops) { + tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n", + sw->config.vendor_id); + return ERR_PTR(-EOPNOTSUPP); + } + } else if (tb_is_retimer(dev)) { + const struct tb_retimer *rt = tb_to_retimer(dev); + + for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) { + const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i]; + + if (v->vendor == rt->vendor) { + vops = v->vops; + break; + } + } + + if (!vops) { + dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n", + rt->vendor); + return ERR_PTR(-EOPNOTSUPP); + } + } else { + return ERR_PTR(-EOPNOTSUPP); + } + + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); + if (!nvm) + return ERR_PTR(-ENOMEM); + + ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); + if (ret < 0) { + kfree(nvm); + return ERR_PTR(ret); + } + + nvm->id = ret; + nvm->dev = dev; + nvm->vops = vops; + + return nvm; +} + +/** + * tb_nvm_read_version() - Read and populate NVM version + * @nvm: NVM structure + * + * Uses vendor specific means to read out and fill in the existing + * active NVM version. Returns %0 in case of success and negative errno + * otherwise. + */ +int tb_nvm_read_version(struct tb_nvm *nvm) +{ + const struct tb_nvm_vendor_ops *vops = nvm->vops; + + if (vops && vops->read_version) + return vops->read_version(nvm); + + return -EOPNOTSUPP; +} + +/** + * tb_nvm_validate() - Validate new NVM image + * @nvm: NVM structure + * + * Runs vendor specific validation over the new NVM image and if all + * checks pass returns %0. As side effect updates @nvm->buf_data_start + * and @nvm->buf_data_size fields to match the actual data to be written + * to the NVM. + * + * If the validation does not pass then returns negative errno. + */ +int tb_nvm_validate(struct tb_nvm *nvm) +{ + const struct tb_nvm_vendor_ops *vops = nvm->vops; + unsigned int image_size; + u8 *buf = nvm->buf; + + if (!buf) + return -EINVAL; + if (!vops) + return -EOPNOTSUPP; + + /* Just do basic image size checks */ + image_size = nvm->buf_data_size; + if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) + return -EINVAL; + + /* + * Set the default data start in the buffer. The validate method + * below can change this if needed. + */ + nvm->buf_data_start = buf; + + return vops->validate ? vops->validate(nvm) : 0; +} + +/** + * tb_nvm_write_headers() - Write headers before the rest of the image + * @nvm: NVM structure + * + * If the vendor NVM format requires writing headers before the rest of + * the image, this function does that. Can be called even if the device + * does not need this. + * + * Returns %0 in case of success and negative errno otherwise. + */ +int tb_nvm_write_headers(struct tb_nvm *nvm) +{ + const struct tb_nvm_vendor_ops *vops = nvm->vops; + + return vops->write_headers ? vops->write_headers(nvm) : 0; +} + +/** + * tb_nvm_add_active() - Adds active NVMem device to NVM + * @nvm: NVM structure + * @reg_read: Pointer to the function to read the NVM (passed directly to the + * NVMem device) + * + * Registers new active NVmem device for @nvm. The @reg_read is called + * directly from NVMem so it must handle possible concurrent access if + * needed. The first parameter passed to @reg_read is @nvm structure. + * Returns %0 in success and negative errno otherwise. + */ +int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read) +{ + struct nvmem_config config; + struct nvmem_device *nvmem; + + memset(&config, 0, sizeof(config)); + + config.name = "nvm_active"; + config.reg_read = reg_read; + config.read_only = true; + config.id = nvm->id; + config.stride = 4; + config.word_size = 4; + config.size = nvm->active_size; + config.dev = nvm->dev; + config.owner = THIS_MODULE; + config.priv = nvm; + + nvmem = nvmem_register(&config); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + nvm->active = nvmem; + return 0; +} + +/** + * tb_nvm_write_buf() - Write data to @nvm buffer + * @nvm: NVM structure + * @offset: Offset where to write the data + * @val: Data buffer to write + * @bytes: Number of bytes to write + * + * Helper function to cache the new NVM image before it is actually + * written to the flash. Copies @bytes from @val to @nvm->buf starting + * from @offset. + */ +int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, + size_t bytes) +{ + if (!nvm->buf) { + nvm->buf = vmalloc(NVM_MAX_SIZE); + if (!nvm->buf) + return -ENOMEM; + } + + nvm->flushed = false; + nvm->buf_data_size = offset + bytes; + memcpy(nvm->buf + offset, val, bytes); + return 0; +} + +/** + * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM + * @nvm: NVM structure + * @reg_write: Pointer to the function to write the NVM (passed directly + * to the NVMem device) + * + * Registers new non-active NVmem device for @nvm. The @reg_write is called + * directly from NVMem so it must handle possible concurrent access if + * needed. The first parameter passed to @reg_write is @nvm structure. + * The size of the NVMem device is set to %NVM_MAX_SIZE. + * + * Returns %0 in success and negative errno otherwise. + */ +int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write) +{ + struct nvmem_config config; + struct nvmem_device *nvmem; + + memset(&config, 0, sizeof(config)); + + config.name = "nvm_non_active"; + config.reg_write = reg_write; + config.root_only = true; + config.id = nvm->id; + config.stride = 4; + config.word_size = 4; + config.size = NVM_MAX_SIZE; + config.dev = nvm->dev; + config.owner = THIS_MODULE; + config.priv = nvm; + + nvmem = nvmem_register(&config); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + nvm->non_active = nvmem; + return 0; +} + +/** + * tb_nvm_free() - Release NVM and its resources + * @nvm: NVM structure to release + * + * Releases NVM and the NVMem devices if they were registered. + */ +void tb_nvm_free(struct tb_nvm *nvm) +{ + if (nvm) { + nvmem_unregister(nvm->non_active); + nvmem_unregister(nvm->active); + vfree(nvm->buf); + ida_simple_remove(&nvm_ida, nvm->id); + } + kfree(nvm); +} + +/** + * tb_nvm_read_data() - Read data from NVM + * @address: Start address on the flash + * @buf: Buffer where the read data is copied + * @size: Size of the buffer in bytes + * @retries: Number of retries if block read fails + * @read_block: Function that reads block from the flash + * @read_block_data: Data passsed to @read_block + * + * This is a generic function that reads data from NVM or NVM like + * device. + * + * Returns %0 on success and negative errno otherwise. + */ +int tb_nvm_read_data(unsigned int address, void *buf, size_t size, + unsigned int retries, read_block_fn read_block, + void *read_block_data) +{ + do { + unsigned int dwaddress, dwords, offset; + u8 data[NVM_DATA_DWORDS * 4]; + size_t nbytes; + int ret; + + offset = address & 3; + nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4); + + dwaddress = address / 4; + dwords = ALIGN(nbytes, 4) / 4; + + ret = read_block(read_block_data, dwaddress, data, dwords); + if (ret) { + if (ret != -ENODEV && retries--) + continue; + return ret; + } + + nbytes -= offset; + memcpy(buf, data + offset, nbytes); + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +/** + * tb_nvm_write_data() - Write data to NVM + * @address: Start address on the flash + * @buf: Buffer where the data is copied from + * @size: Size of the buffer in bytes + * @retries: Number of retries if the block write fails + * @write_block: Function that writes block to the flash + * @write_block_data: Data passwd to @write_block + * + * This is generic function that writes data to NVM or NVM like device. + * + * Returns %0 on success and negative errno otherwise. + */ +int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, + unsigned int retries, write_block_fn write_block, + void *write_block_data) +{ + do { + unsigned int offset, dwaddress; + u8 data[NVM_DATA_DWORDS * 4]; + size_t nbytes; + int ret; + + offset = address & 3; + nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4); + + memcpy(data + offset, buf, nbytes); + + dwaddress = address / 4; + ret = write_block(write_block_data, dwaddress, data, nbytes / 4); + if (ret) { + if (ret == -ETIMEDOUT) { + if (retries--) + continue; + ret = -EIO; + } + return ret; + } + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +void tb_nvm_exit(void) +{ + ida_destroy(&nvm_ida); +} diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c new file mode 100644 index 0000000000..ee03fd75a4 --- /dev/null +++ b/drivers/thunderbolt/path.c @@ -0,0 +1,614 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - path/tunnel functionality + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2019, Intel Corporation + */ + +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/ktime.h> + +#include "tb.h" + +static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs) +{ + const struct tb_port *port = hop->in_port; + + tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n", + hop->in_hop_index, regs->out_port, regs->next_hop); + tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n", + regs->weight, regs->priority, + regs->initial_credits, regs->drop_packages); + tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n", + regs->counter_enable, regs->counter); + tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n", + regs->ingress_fc, regs->egress_fc, + regs->ingress_shared_buffer, regs->egress_shared_buffer); + tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n", + regs->unknown1, regs->unknown2, regs->unknown3); +} + +static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid, + int dst_hopid) +{ + struct tb_port *port, *out_port = NULL; + struct tb_regs_hop hop; + struct tb_switch *sw; + int i, ret, hopid; + + hopid = src_hopid; + port = src; + + for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) { + sw = port->sw; + + ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2); + if (ret) { + tb_port_warn(port, "failed to read path at %d\n", hopid); + return NULL; + } + + if (!hop.enable) + return NULL; + + out_port = &sw->ports[hop.out_port]; + hopid = hop.next_hop; + port = out_port->remote; + } + + return out_port && hopid == dst_hopid ? out_port : NULL; +} + +static int tb_path_find_src_hopid(struct tb_port *src, + const struct tb_port *dst, int dst_hopid) +{ + struct tb_port *out; + int i; + + for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) { + out = tb_path_find_dst_port(src, i, dst_hopid); + if (out == dst) + return i; + } + + return 0; +} + +/** + * tb_path_discover() - Discover a path + * @src: First input port of a path + * @src_hopid: Starting HopID of a path (%-1 if don't care) + * @dst: Expected destination port of the path (%NULL if don't care) + * @dst_hopid: HopID to the @dst (%-1 if don't care) + * @last: Last port is filled here if not %NULL + * @name: Name of the path + * @alloc_hopid: Allocate HopIDs for the ports + * + * Follows a path starting from @src and @src_hopid to the last output + * port of the path. Allocates HopIDs for the visited ports (if + * @alloc_hopid is true). Call tb_path_free() to release the path and + * allocated HopIDs when the path is not needed anymore. + * + * Note function discovers also incomplete paths so caller should check + * that the @dst port is the expected one. If it is not, the path can be + * cleaned up by calling tb_path_deactivate() before tb_path_free(). + * + * Return: Discovered path on success, %NULL in case of failure + */ +struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, + struct tb_port *dst, int dst_hopid, + struct tb_port **last, const char *name, + bool alloc_hopid) +{ + struct tb_port *out_port; + struct tb_regs_hop hop; + struct tb_path *path; + struct tb_switch *sw; + struct tb_port *p; + size_t num_hops; + int ret, i, h; + + if (src_hopid < 0 && dst) { + /* + * For incomplete paths the intermediate HopID can be + * different from the one used by the protocol adapter + * so in that case find a path that ends on @dst with + * matching @dst_hopid. That should give us the correct + * HopID for the @src. + */ + src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid); + if (!src_hopid) + return NULL; + } + + p = src; + h = src_hopid; + num_hops = 0; + + for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) { + sw = p->sw; + + ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2); + if (ret) { + tb_port_warn(p, "failed to read path at %d\n", h); + return NULL; + } + + /* If the hop is not enabled we got an incomplete path */ + if (!hop.enable) + break; + + out_port = &sw->ports[hop.out_port]; + if (last) + *last = out_port; + + h = hop.next_hop; + p = out_port->remote; + num_hops++; + } + + path = kzalloc(sizeof(*path), GFP_KERNEL); + if (!path) + return NULL; + + path->name = name; + path->tb = src->sw->tb; + path->path_length = num_hops; + path->activated = true; + path->alloc_hopid = alloc_hopid; + + path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); + if (!path->hops) { + kfree(path); + return NULL; + } + + tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n", + path->name, tb_route(src->sw), src->port); + + p = src; + h = src_hopid; + + for (i = 0; i < num_hops; i++) { + int next_hop; + + sw = p->sw; + + ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2); + if (ret) { + tb_port_warn(p, "failed to read path at %d\n", h); + goto err; + } + + if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0) + goto err; + + out_port = &sw->ports[hop.out_port]; + next_hop = hop.next_hop; + + if (alloc_hopid && + tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { + tb_port_release_in_hopid(p, h); + goto err; + } + + path->hops[i].in_port = p; + path->hops[i].in_hop_index = h; + path->hops[i].in_counter_index = -1; + path->hops[i].out_port = out_port; + path->hops[i].next_hop_index = next_hop; + + tb_dump_hop(&path->hops[i], &hop); + + h = next_hop; + p = out_port->remote; + } + + tb_dbg(path->tb, "path discovery complete\n"); + return path; + +err: + tb_port_warn(src, "failed to discover path starting at HopID %d\n", + src_hopid); + tb_path_free(path); + return NULL; +} + +/** + * tb_path_alloc() - allocate a thunderbolt path between two ports + * @tb: Domain pointer + * @src: Source port of the path + * @src_hopid: HopID used for the first ingress port in the path + * @dst: Destination port of the path + * @dst_hopid: HopID used for the last egress port in the path + * @link_nr: Preferred link if there are dual links on the path + * @name: Name of the path + * + * Creates path between two ports starting with given @src_hopid. Reserves + * HopIDs for each port (they can be different from @src_hopid depending on + * how many HopIDs each port already have reserved). If there are dual + * links on the path, prioritizes using @link_nr but takes into account + * that the lanes may be bonded. + * + * Return: Returns a tb_path on success or NULL on failure. + */ +struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, + struct tb_port *dst, int dst_hopid, int link_nr, + const char *name) +{ + struct tb_port *in_port, *out_port, *first_port, *last_port; + int in_hopid, out_hopid; + struct tb_path *path; + size_t num_hops; + int i, ret; + + path = kzalloc(sizeof(*path), GFP_KERNEL); + if (!path) + return NULL; + + first_port = last_port = NULL; + i = 0; + tb_for_each_port_on_path(src, dst, in_port) { + if (!first_port) + first_port = in_port; + last_port = in_port; + i++; + } + + /* Check that src and dst are reachable */ + if (first_port != src || last_port != dst) { + kfree(path); + return NULL; + } + + /* Each hop takes two ports */ + num_hops = i / 2; + + path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); + if (!path->hops) { + kfree(path); + return NULL; + } + + path->alloc_hopid = true; + + in_hopid = src_hopid; + out_port = NULL; + + for (i = 0; i < num_hops; i++) { + in_port = tb_next_port_on_path(src, dst, out_port); + if (!in_port) + goto err; + + /* When lanes are bonded primary link must be used */ + if (!in_port->bonded && in_port->dual_link_port && + in_port->link_nr != link_nr) + in_port = in_port->dual_link_port; + + ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid); + if (ret < 0) + goto err; + in_hopid = ret; + + out_port = tb_next_port_on_path(src, dst, in_port); + if (!out_port) + goto err; + + /* + * Pick up right port when going from non-bonded to + * bonded or from bonded to non-bonded. + */ + if (out_port->dual_link_port) { + if (!in_port->bonded && out_port->bonded && + out_port->link_nr) { + /* + * Use primary link when going from + * non-bonded to bonded. + */ + out_port = out_port->dual_link_port; + } else if (!out_port->bonded && + out_port->link_nr != link_nr) { + /* + * If out port is not bonded follow + * link_nr. + */ + out_port = out_port->dual_link_port; + } + } + + if (i == num_hops - 1) + ret = tb_port_alloc_out_hopid(out_port, dst_hopid, + dst_hopid); + else + ret = tb_port_alloc_out_hopid(out_port, -1, -1); + + if (ret < 0) + goto err; + out_hopid = ret; + + path->hops[i].in_hop_index = in_hopid; + path->hops[i].in_port = in_port; + path->hops[i].in_counter_index = -1; + path->hops[i].out_port = out_port; + path->hops[i].next_hop_index = out_hopid; + + in_hopid = out_hopid; + } + + path->tb = tb; + path->path_length = num_hops; + path->name = name; + + return path; + +err: + tb_path_free(path); + return NULL; +} + +/** + * tb_path_free() - free a path + * @path: Path to free + * + * Frees a path. The path does not need to be deactivated. + */ +void tb_path_free(struct tb_path *path) +{ + if (path->alloc_hopid) { + int i; + + for (i = 0; i < path->path_length; i++) { + const struct tb_path_hop *hop = &path->hops[i]; + + if (hop->in_port) + tb_port_release_in_hopid(hop->in_port, + hop->in_hop_index); + if (hop->out_port) + tb_port_release_out_hopid(hop->out_port, + hop->next_hop_index); + } + } + + kfree(path->hops); + kfree(path); +} + +static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop) +{ + int i, res; + for (i = first_hop; i < path->path_length; i++) { + res = tb_port_add_nfc_credits(path->hops[i].in_port, + -path->hops[i].nfc_credits); + if (res) + tb_port_warn(path->hops[i].in_port, + "nfc credits deallocation failed for hop %d\n", + i); + } +} + +static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index, + bool clear_fc) +{ + struct tb_regs_hop hop; + ktime_t timeout; + int ret; + + /* Disable the path */ + ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); + if (ret) + return ret; + + /* Already disabled */ + if (!hop.enable) + return 0; + + hop.enable = 0; + + ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); + if (ret) + return ret; + + /* Wait until it is drained */ + timeout = ktime_add_ms(ktime_get(), 500); + do { + ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2); + if (ret) + return ret; + + if (!hop.pending) { + if (clear_fc) { + /* + * Clear flow control. Protocol adapters + * IFC and ISE bits are vendor defined + * in the USB4 spec so we clear them + * only for pre-USB4 adapters. + */ + if (!tb_switch_is_usb4(port->sw)) { + hop.ingress_fc = 0; + hop.ingress_shared_buffer = 0; + } + hop.egress_fc = 0; + hop.egress_shared_buffer = 0; + + return tb_port_write(port, &hop, TB_CFG_HOPS, + 2 * hop_index, 2); + } + + return 0; + } + + usleep_range(10, 20); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) +{ + int i, res; + + for (i = first_hop; i < path->path_length; i++) { + res = __tb_path_deactivate_hop(path->hops[i].in_port, + path->hops[i].in_hop_index, + path->clear_fc); + if (res && res != -ENODEV) + tb_port_warn(path->hops[i].in_port, + "hop deactivation failed for hop %d, index %d\n", + i, path->hops[i].in_hop_index); + } +} + +void tb_path_deactivate(struct tb_path *path) +{ + if (!path->activated) { + tb_WARN(path->tb, "trying to deactivate an inactive path\n"); + return; + } + tb_dbg(path->tb, + "deactivating %s path from %llx:%u to %llx:%u\n", + path->name, tb_route(path->hops[0].in_port->sw), + path->hops[0].in_port->port, + tb_route(path->hops[path->path_length - 1].out_port->sw), + path->hops[path->path_length - 1].out_port->port); + __tb_path_deactivate_hops(path, 0); + __tb_path_deallocate_nfc(path, 0); + path->activated = false; +} + +/** + * tb_path_activate() - activate a path + * @path: Path to activate + * + * Activate a path starting with the last hop and iterating backwards. The + * caller must fill path->hops before calling tb_path_activate(). + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_path_activate(struct tb_path *path) +{ + int i, res; + enum tb_path_port out_mask, in_mask; + if (path->activated) { + tb_WARN(path->tb, "trying to activate already activated path\n"); + return -EINVAL; + } + + tb_dbg(path->tb, + "activating %s path from %llx:%u to %llx:%u\n", + path->name, tb_route(path->hops[0].in_port->sw), + path->hops[0].in_port->port, + tb_route(path->hops[path->path_length - 1].out_port->sw), + path->hops[path->path_length - 1].out_port->port); + + /* Clear counters. */ + for (i = path->path_length - 1; i >= 0; i--) { + if (path->hops[i].in_counter_index == -1) + continue; + res = tb_port_clear_counter(path->hops[i].in_port, + path->hops[i].in_counter_index); + if (res) + goto err; + } + + /* Add non flow controlled credits. */ + for (i = path->path_length - 1; i >= 0; i--) { + res = tb_port_add_nfc_credits(path->hops[i].in_port, + path->hops[i].nfc_credits); + if (res) { + __tb_path_deallocate_nfc(path, i); + goto err; + } + } + + /* Activate hops. */ + for (i = path->path_length - 1; i >= 0; i--) { + struct tb_regs_hop hop = { 0 }; + + /* If it is left active deactivate it first */ + __tb_path_deactivate_hop(path->hops[i].in_port, + path->hops[i].in_hop_index, path->clear_fc); + + /* dword 0 */ + hop.next_hop = path->hops[i].next_hop_index; + hop.out_port = path->hops[i].out_port->port; + hop.initial_credits = path->hops[i].initial_credits; + hop.unknown1 = 0; + hop.enable = 1; + + /* dword 1 */ + out_mask = (i == path->path_length - 1) ? + TB_PATH_DESTINATION : TB_PATH_INTERNAL; + in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL; + hop.weight = path->weight; + hop.unknown2 = 0; + hop.priority = path->priority; + hop.drop_packages = path->drop_packages; + hop.counter = path->hops[i].in_counter_index; + hop.counter_enable = path->hops[i].in_counter_index != -1; + hop.ingress_fc = path->ingress_fc_enable & in_mask; + hop.egress_fc = path->egress_fc_enable & out_mask; + hop.ingress_shared_buffer = path->ingress_shared_buffer + & in_mask; + hop.egress_shared_buffer = path->egress_shared_buffer + & out_mask; + hop.unknown3 = 0; + + tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i); + tb_dump_hop(&path->hops[i], &hop); + res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, + 2 * path->hops[i].in_hop_index, 2); + if (res) { + __tb_path_deactivate_hops(path, i); + __tb_path_deallocate_nfc(path, 0); + goto err; + } + } + path->activated = true; + tb_dbg(path->tb, "path activation complete\n"); + return 0; +err: + tb_WARN(path->tb, "path activation failed\n"); + return res; +} + +/** + * tb_path_is_invalid() - check whether any ports on the path are invalid + * @path: Path to check + * + * Return: Returns true if the path is invalid, false otherwise. + */ +bool tb_path_is_invalid(struct tb_path *path) +{ + int i = 0; + for (i = 0; i < path->path_length; i++) { + if (path->hops[i].in_port->sw->is_unplugged) + return true; + if (path->hops[i].out_port->sw->is_unplugged) + return true; + } + return false; +} + +/** + * tb_path_port_on_path() - Does the path go through certain port + * @path: Path to check + * @port: Switch to check + * + * Goes over all hops on path and checks if @port is any of them. + * Direction does not matter. + */ +bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port) +{ + int i; + + for (i = 0; i < path->path_length; i++) { + if (path->hops[i].in_port == port || + path->hops[i].out_port == port) + return true; + } + + return false; +} diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c new file mode 100644 index 0000000000..dc555cda98 --- /dev/null +++ b/drivers/thunderbolt/property.c @@ -0,0 +1,752 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt XDomain property support + * + * Copyright (C) 2017, Intel Corporation + * Authors: Michael Jamet <michael.jamet@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/uuid.h> +#include <linux/thunderbolt.h> + +struct tb_property_entry { + u32 key_hi; + u32 key_lo; + u16 length; + u8 reserved; + u8 type; + u32 value; +}; + +struct tb_property_rootdir_entry { + u32 magic; + u32 length; + struct tb_property_entry entries[]; +}; + +struct tb_property_dir_entry { + u32 uuid[4]; + struct tb_property_entry entries[]; +}; + +#define TB_PROPERTY_ROOTDIR_MAGIC 0x55584401 + +static struct tb_property_dir *__tb_property_parse_dir(const u32 *block, + size_t block_len, unsigned int dir_offset, size_t dir_len, + bool is_root); + +static inline void parse_dwdata(void *dst, const void *src, size_t dwords) +{ + be32_to_cpu_array(dst, src, dwords); +} + +static inline void format_dwdata(void *dst, const void *src, size_t dwords) +{ + cpu_to_be32_array(dst, src, dwords); +} + +static bool tb_property_entry_valid(const struct tb_property_entry *entry, + size_t block_len) +{ + switch (entry->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + case TB_PROPERTY_TYPE_DATA: + case TB_PROPERTY_TYPE_TEXT: + if (entry->length > block_len) + return false; + if (entry->value + entry->length > block_len) + return false; + break; + + case TB_PROPERTY_TYPE_VALUE: + if (entry->length != 1) + return false; + break; + } + + return true; +} + +static bool tb_property_key_valid(const char *key) +{ + return key && strlen(key) <= TB_PROPERTY_KEY_SIZE; +} + +static struct tb_property * +tb_property_alloc(const char *key, enum tb_property_type type) +{ + struct tb_property *property; + + property = kzalloc(sizeof(*property), GFP_KERNEL); + if (!property) + return NULL; + + strcpy(property->key, key); + property->type = type; + INIT_LIST_HEAD(&property->list); + + return property; +} + +static struct tb_property *tb_property_parse(const u32 *block, size_t block_len, + const struct tb_property_entry *entry) +{ + char key[TB_PROPERTY_KEY_SIZE + 1]; + struct tb_property *property; + struct tb_property_dir *dir; + + if (!tb_property_entry_valid(entry, block_len)) + return NULL; + + parse_dwdata(key, entry, 2); + key[TB_PROPERTY_KEY_SIZE] = '\0'; + + property = tb_property_alloc(key, entry->type); + if (!property) + return NULL; + + property->length = entry->length; + + switch (property->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + dir = __tb_property_parse_dir(block, block_len, entry->value, + entry->length, false); + if (!dir) { + kfree(property); + return NULL; + } + property->value.dir = dir; + break; + + case TB_PROPERTY_TYPE_DATA: + property->value.data = kcalloc(property->length, sizeof(u32), + GFP_KERNEL); + if (!property->value.data) { + kfree(property); + return NULL; + } + parse_dwdata(property->value.data, block + entry->value, + entry->length); + break; + + case TB_PROPERTY_TYPE_TEXT: + property->value.text = kcalloc(property->length, sizeof(u32), + GFP_KERNEL); + if (!property->value.text) { + kfree(property); + return NULL; + } + parse_dwdata(property->value.text, block + entry->value, + entry->length); + /* Force null termination */ + property->value.text[property->length * 4 - 1] = '\0'; + break; + + case TB_PROPERTY_TYPE_VALUE: + property->value.immediate = entry->value; + break; + + default: + property->type = TB_PROPERTY_TYPE_UNKNOWN; + break; + } + + return property; +} + +static struct tb_property_dir *__tb_property_parse_dir(const u32 *block, + size_t block_len, unsigned int dir_offset, size_t dir_len, bool is_root) +{ + const struct tb_property_entry *entries; + size_t i, content_len, nentries; + unsigned int content_offset; + struct tb_property_dir *dir; + + dir = kzalloc(sizeof(*dir), GFP_KERNEL); + if (!dir) + return NULL; + + if (is_root) { + content_offset = dir_offset + 2; + content_len = dir_len; + } else { + dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid), + GFP_KERNEL); + if (!dir->uuid) { + tb_property_free_dir(dir); + return NULL; + } + content_offset = dir_offset + 4; + content_len = dir_len - 4; /* Length includes UUID */ + } + + entries = (const struct tb_property_entry *)&block[content_offset]; + nentries = content_len / (sizeof(*entries) / 4); + + INIT_LIST_HEAD(&dir->properties); + + for (i = 0; i < nentries; i++) { + struct tb_property *property; + + property = tb_property_parse(block, block_len, &entries[i]); + if (!property) { + tb_property_free_dir(dir); + return NULL; + } + + list_add_tail(&property->list, &dir->properties); + } + + return dir; +} + +/** + * tb_property_parse_dir() - Parses properties from given property block + * @block: Property block to parse + * @block_len: Number of dword elements in the property block + * + * This function parses the XDomain properties data block into format that + * can be traversed using the helper functions provided by this module. + * Upon success returns the parsed directory. In case of error returns + * %NULL. The resulting &struct tb_property_dir needs to be released by + * calling tb_property_free_dir() when not needed anymore. + * + * The @block is expected to be root directory. + */ +struct tb_property_dir *tb_property_parse_dir(const u32 *block, + size_t block_len) +{ + const struct tb_property_rootdir_entry *rootdir = + (const struct tb_property_rootdir_entry *)block; + + if (rootdir->magic != TB_PROPERTY_ROOTDIR_MAGIC) + return NULL; + if (rootdir->length > block_len) + return NULL; + + return __tb_property_parse_dir(block, block_len, 0, rootdir->length, + true); +} + +/** + * tb_property_create_dir() - Creates new property directory + * @uuid: UUID used to identify the particular directory + * + * Creates new, empty property directory. If @uuid is %NULL then the + * directory is assumed to be root directory. + */ +struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid) +{ + struct tb_property_dir *dir; + + dir = kzalloc(sizeof(*dir), GFP_KERNEL); + if (!dir) + return NULL; + + INIT_LIST_HEAD(&dir->properties); + if (uuid) { + dir->uuid = kmemdup(uuid, sizeof(*dir->uuid), GFP_KERNEL); + if (!dir->uuid) { + kfree(dir); + return NULL; + } + } + + return dir; +} +EXPORT_SYMBOL_GPL(tb_property_create_dir); + +static void tb_property_free(struct tb_property *property) +{ + switch (property->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + tb_property_free_dir(property->value.dir); + break; + + case TB_PROPERTY_TYPE_DATA: + kfree(property->value.data); + break; + + case TB_PROPERTY_TYPE_TEXT: + kfree(property->value.text); + break; + + default: + break; + } + + kfree(property); +} + +/** + * tb_property_free_dir() - Release memory allocated for property directory + * @dir: Directory to release + * + * This will release all the memory the directory occupies including all + * descendants. It is OK to pass %NULL @dir, then the function does + * nothing. + */ +void tb_property_free_dir(struct tb_property_dir *dir) +{ + struct tb_property *property, *tmp; + + if (!dir) + return; + + list_for_each_entry_safe(property, tmp, &dir->properties, list) { + list_del(&property->list); + tb_property_free(property); + } + kfree(dir->uuid); + kfree(dir); +} +EXPORT_SYMBOL_GPL(tb_property_free_dir); + +static size_t tb_property_dir_length(const struct tb_property_dir *dir, + bool recurse, size_t *data_len) +{ + const struct tb_property *property; + size_t len = 0; + + if (dir->uuid) + len += sizeof(*dir->uuid) / 4; + else + len += sizeof(struct tb_property_rootdir_entry) / 4; + + list_for_each_entry(property, &dir->properties, list) { + len += sizeof(struct tb_property_entry) / 4; + + switch (property->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + if (recurse) { + len += tb_property_dir_length( + property->value.dir, recurse, data_len); + } + /* Reserve dword padding after each directory */ + if (data_len) + *data_len += 1; + break; + + case TB_PROPERTY_TYPE_DATA: + case TB_PROPERTY_TYPE_TEXT: + if (data_len) + *data_len += property->length; + break; + + default: + break; + } + } + + return len; +} + +static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir, + u32 *block, unsigned int start_offset, size_t block_len) +{ + unsigned int data_offset, dir_end; + const struct tb_property *property; + struct tb_property_entry *entry; + size_t dir_len, data_len = 0; + int ret; + + /* + * The structure of property block looks like following. Leaf + * data/text is included right after the directory and each + * directory follows each other (even nested ones). + * + * +----------+ <-- start_offset + * | header | <-- root directory header + * +----------+ --- + * | entry 0 | -^--------------------. + * +----------+ | | + * | entry 1 | -|--------------------|--. + * +----------+ | | | + * | entry 2 | -|-----------------. | | + * +----------+ | | | | + * : : | dir_len | | | + * . . | | | | + * : : | | | | + * +----------+ | | | | + * | entry n | v | | | + * +----------+ <-- data_offset | | | + * | data 0 | <------------------|--' | + * +----------+ | | + * | data 1 | <------------------|-----' + * +----------+ | + * | 00000000 | padding | + * +----------+ <-- dir_end <------' + * | UUID | <-- directory UUID (child directory) + * +----------+ + * | entry 0 | + * +----------+ + * | entry 1 | + * +----------+ + * : : + * . . + * : : + * +----------+ + * | entry n | + * +----------+ + * | data 0 | + * +----------+ + * + * We use dir_end to hold pointer to the end of the directory. It + * will increase as we add directories and each directory should be + * added starting from previous dir_end. + */ + dir_len = tb_property_dir_length(dir, false, &data_len); + data_offset = start_offset + dir_len; + dir_end = start_offset + data_len + dir_len; + + if (data_offset > dir_end) + return -EINVAL; + if (dir_end > block_len) + return -EINVAL; + + /* Write headers first */ + if (dir->uuid) { + struct tb_property_dir_entry *pe; + + pe = (struct tb_property_dir_entry *)&block[start_offset]; + memcpy(pe->uuid, dir->uuid, sizeof(pe->uuid)); + entry = pe->entries; + } else { + struct tb_property_rootdir_entry *re; + + re = (struct tb_property_rootdir_entry *)&block[start_offset]; + re->magic = TB_PROPERTY_ROOTDIR_MAGIC; + re->length = dir_len - sizeof(*re) / 4; + entry = re->entries; + } + + list_for_each_entry(property, &dir->properties, list) { + const struct tb_property_dir *child; + + format_dwdata(entry, property->key, 2); + entry->type = property->type; + + switch (property->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + child = property->value.dir; + ret = __tb_property_format_dir(child, block, dir_end, + block_len); + if (ret < 0) + return ret; + entry->length = tb_property_dir_length(child, false, + NULL); + entry->value = dir_end; + dir_end = ret; + break; + + case TB_PROPERTY_TYPE_DATA: + format_dwdata(&block[data_offset], property->value.data, + property->length); + entry->length = property->length; + entry->value = data_offset; + data_offset += entry->length; + break; + + case TB_PROPERTY_TYPE_TEXT: + format_dwdata(&block[data_offset], property->value.text, + property->length); + entry->length = property->length; + entry->value = data_offset; + data_offset += entry->length; + break; + + case TB_PROPERTY_TYPE_VALUE: + entry->length = property->length; + entry->value = property->value.immediate; + break; + + default: + break; + } + + entry++; + } + + return dir_end; +} + +/** + * tb_property_format_dir() - Formats directory to the packed XDomain format + * @dir: Directory to format + * @block: Property block where the packed data is placed + * @block_len: Length of the property block + * + * This function formats the directory to the packed format that can be + * then send over the thunderbolt fabric to receiving host. Returns %0 in + * case of success and negative errno on faulure. Passing %NULL in @block + * returns number of entries the block takes. + */ +ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, + size_t block_len) +{ + ssize_t ret; + + if (!block) { + size_t dir_len, data_len = 0; + + dir_len = tb_property_dir_length(dir, true, &data_len); + return dir_len + data_len; + } + + ret = __tb_property_format_dir(dir, block, 0, block_len); + return ret < 0 ? ret : 0; +} + +/** + * tb_property_copy_dir() - Take a deep copy of directory + * @dir: Directory to copy + * + * This function takes a deep copy of @dir and returns back the copy. In + * case of error returns %NULL. The resulting directory needs to be + * released by calling tb_property_free_dir(). + */ +struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir) +{ + struct tb_property *property, *p = NULL; + struct tb_property_dir *d; + + if (!dir) + return NULL; + + d = tb_property_create_dir(dir->uuid); + if (!d) + return NULL; + + list_for_each_entry(property, &dir->properties, list) { + struct tb_property *p; + + p = tb_property_alloc(property->key, property->type); + if (!p) + goto err_free; + + p->length = property->length; + + switch (property->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + p->value.dir = tb_property_copy_dir(property->value.dir); + if (!p->value.dir) + goto err_free; + break; + + case TB_PROPERTY_TYPE_DATA: + p->value.data = kmemdup(property->value.data, + property->length * 4, + GFP_KERNEL); + if (!p->value.data) + goto err_free; + break; + + case TB_PROPERTY_TYPE_TEXT: + p->value.text = kzalloc(p->length * 4, GFP_KERNEL); + if (!p->value.text) + goto err_free; + strcpy(p->value.text, property->value.text); + break; + + case TB_PROPERTY_TYPE_VALUE: + p->value.immediate = property->value.immediate; + break; + + default: + break; + } + + list_add_tail(&p->list, &d->properties); + } + + return d; + +err_free: + kfree(p); + tb_property_free_dir(d); + + return NULL; +} + +/** + * tb_property_add_immediate() - Add immediate property to directory + * @parent: Directory to add the property + * @key: Key for the property + * @value: Immediate value to store with the property + */ +int tb_property_add_immediate(struct tb_property_dir *parent, const char *key, + u32 value) +{ + struct tb_property *property; + + if (!tb_property_key_valid(key)) + return -EINVAL; + + property = tb_property_alloc(key, TB_PROPERTY_TYPE_VALUE); + if (!property) + return -ENOMEM; + + property->length = 1; + property->value.immediate = value; + + list_add_tail(&property->list, &parent->properties); + return 0; +} +EXPORT_SYMBOL_GPL(tb_property_add_immediate); + +/** + * tb_property_add_data() - Adds arbitrary data property to directory + * @parent: Directory to add the property + * @key: Key for the property + * @buf: Data buffer to add + * @buflen: Number of bytes in the data buffer + * + * Function takes a copy of @buf and adds it to the directory. + */ +int tb_property_add_data(struct tb_property_dir *parent, const char *key, + const void *buf, size_t buflen) +{ + /* Need to pad to dword boundary */ + size_t size = round_up(buflen, 4); + struct tb_property *property; + + if (!tb_property_key_valid(key)) + return -EINVAL; + + property = tb_property_alloc(key, TB_PROPERTY_TYPE_DATA); + if (!property) + return -ENOMEM; + + property->length = size / 4; + property->value.data = kzalloc(size, GFP_KERNEL); + if (!property->value.data) { + kfree(property); + return -ENOMEM; + } + + memcpy(property->value.data, buf, buflen); + + list_add_tail(&property->list, &parent->properties); + return 0; +} +EXPORT_SYMBOL_GPL(tb_property_add_data); + +/** + * tb_property_add_text() - Adds string property to directory + * @parent: Directory to add the property + * @key: Key for the property + * @text: String to add + * + * Function takes a copy of @text and adds it to the directory. + */ +int tb_property_add_text(struct tb_property_dir *parent, const char *key, + const char *text) +{ + /* Need to pad to dword boundary */ + size_t size = round_up(strlen(text) + 1, 4); + struct tb_property *property; + + if (!tb_property_key_valid(key)) + return -EINVAL; + + property = tb_property_alloc(key, TB_PROPERTY_TYPE_TEXT); + if (!property) + return -ENOMEM; + + property->length = size / 4; + property->value.text = kzalloc(size, GFP_KERNEL); + if (!property->value.text) { + kfree(property); + return -ENOMEM; + } + + strcpy(property->value.text, text); + + list_add_tail(&property->list, &parent->properties); + return 0; +} +EXPORT_SYMBOL_GPL(tb_property_add_text); + +/** + * tb_property_add_dir() - Adds a directory to the parent directory + * @parent: Directory to add the property + * @key: Key for the property + * @dir: Directory to add + */ +int tb_property_add_dir(struct tb_property_dir *parent, const char *key, + struct tb_property_dir *dir) +{ + struct tb_property *property; + + if (!tb_property_key_valid(key)) + return -EINVAL; + + property = tb_property_alloc(key, TB_PROPERTY_TYPE_DIRECTORY); + if (!property) + return -ENOMEM; + + property->value.dir = dir; + + list_add_tail(&property->list, &parent->properties); + return 0; +} +EXPORT_SYMBOL_GPL(tb_property_add_dir); + +/** + * tb_property_remove() - Removes property from a parent directory + * @property: Property to remove + * + * Note memory for @property is released as well so it is not allowed to + * touch the object after call to this function. + */ +void tb_property_remove(struct tb_property *property) +{ + list_del(&property->list); + kfree(property); +} +EXPORT_SYMBOL_GPL(tb_property_remove); + +/** + * tb_property_find() - Find a property from a directory + * @dir: Directory where the property is searched + * @key: Key to look for + * @type: Type of the property + * + * Finds and returns property from the given directory. Does not recurse + * into sub-directories. Returns %NULL if the property was not found. + */ +struct tb_property *tb_property_find(struct tb_property_dir *dir, + const char *key, enum tb_property_type type) +{ + struct tb_property *property; + + list_for_each_entry(property, &dir->properties, list) { + if (property->type == type && !strcmp(property->key, key)) + return property; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(tb_property_find); + +/** + * tb_property_get_next() - Get next property from directory + * @dir: Directory holding properties + * @prev: Previous property in the directory (%NULL returns the first) + */ +struct tb_property *tb_property_get_next(struct tb_property_dir *dir, + struct tb_property *prev) +{ + if (prev) { + if (list_is_last(&prev->list, &dir->properties)) + return NULL; + return list_next_entry(prev, list); + } + return list_first_entry_or_null(&dir->properties, struct tb_property, + list); +} +EXPORT_SYMBOL_GPL(tb_property_get_next); diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c new file mode 100644 index 0000000000..e6bfa63b40 --- /dev/null +++ b/drivers/thunderbolt/quirks.c @@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - quirks + * + * Copyright (c) 2020 Mario Limonciello <mario.limonciello@dell.com> + */ + +#include "tb.h" + +static void quirk_force_power_link(struct tb_switch *sw) +{ + sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER; + tb_sw_dbg(sw, "forcing power to link controller\n"); +} + +static void quirk_dp_credit_allocation(struct tb_switch *sw) +{ + if (sw->credit_allocation && sw->min_dp_main_credits == 56) { + sw->min_dp_main_credits = 18; + tb_sw_dbg(sw, "quirked DP main: %u\n", sw->min_dp_main_credits); + } +} + +static void quirk_clx_disable(struct tb_switch *sw) +{ + sw->quirks |= QUIRK_NO_CLX; + tb_sw_dbg(sw, "disabling CL states\n"); +} + +static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw)) + return; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_usb3_down(port)) + continue; + port->max_bw = 16376; + tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n", + port->max_bw); + } +} + +struct tb_quirk { + u16 hw_vendor_id; + u16 hw_device_id; + u16 vendor; + u16 device; + void (*hook)(struct tb_switch *sw); +}; + +static const struct tb_quirk tb_quirks[] = { + /* Dell WD19TB supports self-authentication on unplug */ + { 0x0000, 0x0000, 0x00d4, 0xb070, quirk_force_power_link }, + { 0x0000, 0x0000, 0x00d4, 0xb071, quirk_force_power_link }, + /* + * Intel Goshen Ridge NVM 27 and before report wrong number of + * DP buffers. + */ + { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation }, + /* + * Limit the maximum USB3 bandwidth for the following Intel USB4 + * host routers due to a hardware issue. + */ + { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000, + quirk_usb3_maximum_bandwidth }, + /* + * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms. + */ + { 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable }, + { 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable }, +}; + +/** + * tb_check_quirks() - Check for quirks to apply + * @sw: Thunderbolt switch + * + * Apply any quirks for the Thunderbolt controller. + */ +void tb_check_quirks(struct tb_switch *sw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) { + const struct tb_quirk *q = &tb_quirks[i]; + + if (q->hw_vendor_id && q->hw_vendor_id != sw->config.vendor_id) + continue; + if (q->hw_device_id && q->hw_device_id != sw->config.device_id) + continue; + if (q->vendor && q->vendor != sw->vendor) + continue; + if (q->device && q->device != sw->device) + continue; + + tb_sw_dbg(sw, "running %ps\n", q->hook); + q->hook(sw); + } +} diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c new file mode 100644 index 0000000000..47becb363a --- /dev/null +++ b/drivers/thunderbolt/retimer.c @@ -0,0 +1,565 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt/USB4 retimer support. + * + * Copyright (C) 2020, Intel Corporation + * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/sched/signal.h> + +#include "sb_regs.h" +#include "tb.h" + +#define TB_MAX_RETIMER_INDEX 6 + +/** + * tb_retimer_nvm_read() - Read contents of retimer NVM + * @rt: Retimer device + * @address: NVM address (in bytes) to start reading + * @buf: Data read from NVM is stored here + * @size: Number of bytes to read + * + * Reads retimer NVM and copies the contents to @buf. Returns %0 if the + * read was successful and negative errno in case of failure. + */ +int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, + size_t size) +{ + return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size); +} + +static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) +{ + struct tb_nvm *nvm = priv; + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + int ret; + + pm_runtime_get_sync(&rt->dev); + + if (!mutex_trylock(&rt->tb->lock)) { + ret = restart_syscall(); + goto out; + } + + ret = tb_retimer_nvm_read(rt, offset, val, bytes); + mutex_unlock(&rt->tb->lock); + +out: + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_put_autosuspend(&rt->dev); + + return ret; +} + +static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) +{ + struct tb_nvm *nvm = priv; + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + int ret = 0; + + if (!mutex_trylock(&rt->tb->lock)) + return restart_syscall(); + + ret = tb_nvm_write_buf(nvm, offset, val, bytes); + mutex_unlock(&rt->tb->lock); + + return ret; +} + +static int tb_retimer_nvm_add(struct tb_retimer *rt) +{ + struct tb_nvm *nvm; + int ret; + + nvm = tb_nvm_alloc(&rt->dev); + if (IS_ERR(nvm)) { + ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); + goto err_nvm; + } + + ret = tb_nvm_read_version(nvm); + if (ret) + goto err_nvm; + + ret = tb_nvm_add_active(nvm, nvm_read); + if (ret) + goto err_nvm; + + ret = tb_nvm_add_non_active(nvm, nvm_write); + if (ret) + goto err_nvm; + + rt->nvm = nvm; + return 0; + +err_nvm: + dev_dbg(&rt->dev, "NVM upgrade disabled\n"); + if (!IS_ERR(nvm)) + tb_nvm_free(nvm); + + return ret; +} + +static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) +{ + unsigned int image_size; + const u8 *buf; + int ret; + + ret = tb_nvm_validate(rt->nvm); + if (ret) + return ret; + + buf = rt->nvm->buf_data_start; + image_size = rt->nvm->buf_data_size; + + ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, + image_size); + if (ret) + return ret; + + rt->nvm->flushed = true; + return 0; +} + +static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only) +{ + u32 status; + int ret; + + if (auth_only) { + ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0); + if (ret) + return ret; + } + + ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); + if (ret) + return ret; + + usleep_range(100, 150); + + /* + * Check the status now if we still can access the retimer. It + * is expected that the below fails. + */ + ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index, + &status); + if (!ret) { + rt->auth_status = status; + return status ? -EINVAL : 0; + } + + return 0; +} + +static ssize_t device_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + + return sysfs_emit(buf, "%#x\n", rt->device); +} +static DEVICE_ATTR_RO(device); + +static ssize_t nvm_authenticate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + int ret; + + if (!mutex_trylock(&rt->tb->lock)) + return restart_syscall(); + + if (!rt->nvm) + ret = -EAGAIN; + else if (rt->no_nvm_upgrade) + ret = -EOPNOTSUPP; + else + ret = sysfs_emit(buf, "%#x\n", rt->auth_status); + + mutex_unlock(&rt->tb->lock); + + return ret; +} + +static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) +{ + int i; + + tb_port_dbg(port, "reading NVM authentication status of retimers\n"); + + /* + * Before doing anything else, read the authentication status. + * If the retimer has it set, store it for the new retimer + * device instance. + */ + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); +} + +static void tb_retimer_set_inbound_sbtx(struct tb_port *port) +{ + int i; + + /* + * When USB4 port is online sideband communications are + * already up. + */ + if (!usb4_port_device_is_offline(port->usb4)) + return; + + tb_port_dbg(port, "enabling sideband transactions\n"); + + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_set_inbound_sbtx(port, i); +} + +static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) +{ + int i; + + /* + * When USB4 port is offline we need to keep the sideband + * communications up to make it possible to communicate with + * the connected retimers. + */ + if (usb4_port_device_is_offline(port->usb4)) + return; + + tb_port_dbg(port, "disabling sideband transactions\n"); + + for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) + usb4_port_retimer_unset_inbound_sbtx(port, i); +} + +static ssize_t nvm_authenticate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + int val, ret; + + pm_runtime_get_sync(&rt->dev); + + if (!mutex_trylock(&rt->tb->lock)) { + ret = restart_syscall(); + goto exit_rpm; + } + + if (!rt->nvm) { + ret = -EAGAIN; + goto exit_unlock; + } + + ret = kstrtoint(buf, 10, &val); + if (ret) + goto exit_unlock; + + /* Always clear status */ + rt->auth_status = 0; + + if (val) { + /* + * When NVM authentication starts the retimer is not + * accessible so calling tb_retimer_unset_inbound_sbtx() + * will fail and therefore we do not call it. Exception + * is when the validation fails or we only write the new + * NVM image without authentication. + */ + tb_retimer_set_inbound_sbtx(rt->port); + if (val == AUTHENTICATE_ONLY) { + ret = tb_retimer_nvm_authenticate(rt, true); + } else { + if (!rt->nvm->flushed) { + if (!rt->nvm->buf) { + ret = -EINVAL; + goto exit_unlock; + } + + ret = tb_retimer_nvm_validate_and_write(rt); + if (ret || val == WRITE_ONLY) + goto exit_unlock; + } + if (val == WRITE_AND_AUTHENTICATE) + ret = tb_retimer_nvm_authenticate(rt, false); + } + } + +exit_unlock: + if (ret || val == WRITE_ONLY) + tb_retimer_unset_inbound_sbtx(rt->port); + mutex_unlock(&rt->tb->lock); +exit_rpm: + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_put_autosuspend(&rt->dev); + + if (ret) + return ret; + return count; +} +static DEVICE_ATTR_RW(nvm_authenticate); + +static ssize_t nvm_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + int ret; + + if (!mutex_trylock(&rt->tb->lock)) + return restart_syscall(); + + if (!rt->nvm) + ret = -EAGAIN; + else + ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); + + mutex_unlock(&rt->tb->lock); + return ret; +} +static DEVICE_ATTR_RO(nvm_version); + +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + + return sysfs_emit(buf, "%#x\n", rt->vendor); +} +static DEVICE_ATTR_RO(vendor); + +static struct attribute *retimer_attrs[] = { + &dev_attr_device.attr, + &dev_attr_nvm_authenticate.attr, + &dev_attr_nvm_version.attr, + &dev_attr_vendor.attr, + NULL +}; + +static const struct attribute_group retimer_group = { + .attrs = retimer_attrs, +}; + +static const struct attribute_group *retimer_groups[] = { + &retimer_group, + NULL +}; + +static void tb_retimer_release(struct device *dev) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + + kfree(rt); +} + +struct device_type tb_retimer_type = { + .name = "thunderbolt_retimer", + .groups = retimer_groups, + .release = tb_retimer_release, +}; + +static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) +{ + struct tb_retimer *rt; + u32 vendor, device; + int ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, + sizeof(vendor)); + if (ret) { + if (ret != -ENODEV) + tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); + return ret; + } + + ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, + sizeof(device)); + if (ret) { + if (ret != -ENODEV) + tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); + return ret; + } + + /* + * Check that it supports NVM operations. If not then don't add + * the device at all. + */ + ret = usb4_port_retimer_nvm_sector_size(port, index); + if (ret < 0) + return ret; + + rt = kzalloc(sizeof(*rt), GFP_KERNEL); + if (!rt) + return -ENOMEM; + + rt->index = index; + rt->vendor = vendor; + rt->device = device; + rt->auth_status = auth_status; + rt->port = port; + rt->tb = port->sw->tb; + + rt->dev.parent = &port->usb4->dev; + rt->dev.bus = &tb_bus_type; + rt->dev.type = &tb_retimer_type; + dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), + port->port, index); + + ret = device_register(&rt->dev); + if (ret) { + dev_err(&rt->dev, "failed to register retimer: %d\n", ret); + put_device(&rt->dev); + return ret; + } + + ret = tb_retimer_nvm_add(rt); + if (ret) { + dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); + device_unregister(&rt->dev); + return ret; + } + + dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", + rt->vendor, rt->device); + + pm_runtime_no_callbacks(&rt->dev); + pm_runtime_set_active(&rt->dev); + pm_runtime_enable(&rt->dev); + pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_use_autosuspend(&rt->dev); + + return 0; +} + +static void tb_retimer_remove(struct tb_retimer *rt) +{ + dev_info(&rt->dev, "retimer disconnected\n"); + tb_nvm_free(rt->nvm); + device_unregister(&rt->dev); +} + +struct tb_retimer_lookup { + const struct tb_port *port; + u8 index; +}; + +static int retimer_match(struct device *dev, void *data) +{ + const struct tb_retimer_lookup *lookup = data; + struct tb_retimer *rt = tb_to_retimer(dev); + + return rt && rt->port == lookup->port && rt->index == lookup->index; +} + +static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) +{ + struct tb_retimer_lookup lookup = { .port = port, .index = index }; + struct device *dev; + + dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); + if (dev) + return tb_to_retimer(dev); + + return NULL; +} + +/** + * tb_retimer_scan() - Scan for on-board retimers under port + * @port: USB4 port to scan + * @add: If true also registers found retimers + * + * Brings the sideband into a state where retimers can be accessed. + * Then Tries to enumerate on-board retimers connected to @port. Found + * retimers are registered as children of @port if @add is set. Does + * not scan for cable retimers for now. + */ +int tb_retimer_scan(struct tb_port *port, bool add) +{ + u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; + int ret, i, last_idx = 0; + + /* + * Send broadcast RT to make sure retimer indices facing this + * port are set. + */ + ret = usb4_port_enumerate_retimers(port); + if (ret) + return ret; + + /* + * Immediately after sending enumerate retimers read the + * authentication status of each retimer. + */ + tb_retimer_nvm_authenticate_status(port, status); + + /* + * Enable sideband channel for each retimer. We can do this + * regardless whether there is device connected or not. + */ + tb_retimer_set_inbound_sbtx(port); + + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { + /* + * Last retimer is true only for the last on-board + * retimer (the one connected directly to the Type-C + * port). + */ + ret = usb4_port_retimer_is_last(port, i); + if (ret > 0) + last_idx = i; + else if (ret < 0) + break; + } + + tb_retimer_unset_inbound_sbtx(port); + + if (!last_idx) + return 0; + + /* Add on-board retimers if they do not exist already */ + ret = 0; + for (i = 1; i <= last_idx; i++) { + struct tb_retimer *rt; + + rt = tb_port_find_retimer(port, i); + if (rt) { + put_device(&rt->dev); + } else if (add) { + ret = tb_retimer_add(port, i, status[i]); + if (ret && ret != -EOPNOTSUPP) + break; + } + } + + return ret; +} + +static int remove_retimer(struct device *dev, void *data) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + struct tb_port *port = data; + + if (rt && rt->port == port) + tb_retimer_remove(rt); + return 0; +} + +/** + * tb_retimer_remove_all() - Remove all retimers under port + * @port: USB4 port whose retimers to remove + * + * This removes all previously added retimers under @port. + */ +void tb_retimer_remove_all(struct tb_port *port) +{ + struct usb4_port *usb4; + + usb4 = port->usb4; + if (usb4) + device_for_each_child_reverse(&usb4->dev, port, + remove_retimer); +} diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h new file mode 100644 index 0000000000..f37a4320f1 --- /dev/null +++ b/drivers/thunderbolt/sb_regs.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * USB4 port sideband registers found on routers and retimers + * + * Copyright (C) 2020, Intel Corporation + * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> + * Rajmohan Mani <rajmohan.mani@intel.com> + */ + +#ifndef _SB_REGS +#define _SB_REGS + +#define USB4_SB_VENDOR_ID 0x00 +#define USB4_SB_PRODUCT_ID 0x01 +#define USB4_SB_OPCODE 0x08 + +enum usb4_sb_opcode { + USB4_SB_OPCODE_ERR = 0x20525245, /* "ERR " */ + USB4_SB_OPCODE_ONS = 0x444d4321, /* "!CMD" */ + USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */ + USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */ + USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */ + USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */ + USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */ + USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */ + USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */ + USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */ + USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */ + USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */ + USB4_SB_OPCODE_READ_LANE_MARGINING_CAP = 0x50434452, /* "RDCP" */ + USB4_SB_OPCODE_RUN_HW_LANE_MARGINING = 0x474d4852, /* "RHMG" */ + USB4_SB_OPCODE_RUN_SW_LANE_MARGINING = 0x474d5352, /* "RSMG" */ + USB4_SB_OPCODE_READ_SW_MARGIN_ERR = 0x57534452, /* "RDSW" */ +}; + +#define USB4_SB_METADATA 0x09 +#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0) +#define USB4_SB_DATA 0x12 + +/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */ +#define USB4_MARGIN_CAP_0_MODES_HW BIT(0) +#define USB4_MARGIN_CAP_0_MODES_SW BIT(1) +#define USB4_MARGIN_CAP_0_2_LANES BIT(2) +#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3) +#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3 +#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0 +#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1 +#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2 +#define USB4_MARGIN_CAP_0_TIME BIT(5) +#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6) +#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6 +#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13) +#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13 +#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8) +#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9) +#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9 +#define USB4_MARGIN_CAP_1_TIME_MIN 0x0 +#define USB4_MARGIN_CAP_1_TIME_LR 0x1 +#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2 +#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11) +#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11 +#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16) +#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16 +#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21) +#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21 +#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26) +#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26 +#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26 + +/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */ +#define USB4_MARGIN_HW_TIME BIT(3) +#define USB4_MARGIN_HW_RH BIT(4) +#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5) +#define USB4_MARGIN_HW_BER_SHIFT 5 + +/* Applicable to all margin values */ +#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0) +#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7) +/* Different lane margin shifts */ +#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8 +#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16 +#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24 + +/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */ +#define USB4_MARGIN_SW_TIME BIT(3) +#define USB4_MARGIN_SW_RH BIT(4) +#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13) +#define USB4_MARGIN_SW_COUNTER_SHIFT 13 +#define USB4_MARGIN_SW_COUNTER_NOP 0x0 +#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1 +#define USB4_MARGIN_SW_COUNTER_START 0x2 +#define USB4_MARGIN_SW_COUNTER_STOP 0x3 + +#endif diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c new file mode 100644 index 0000000000..509b99af50 --- /dev/null +++ b/drivers/thunderbolt/switch.c @@ -0,0 +1,3585 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - switch/port utility functions + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#include <linux/delay.h> +#include <linux/idr.h> +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/pm_runtime.h> +#include <linux/sched/signal.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/string_helpers.h> + +#include "tb.h" + +/* Switch NVM support */ + +struct nvm_auth_status { + struct list_head list; + uuid_t uuid; + u32 status; +}; + +/* + * Hold NVM authentication failure status per switch This information + * needs to stay around even when the switch gets power cycled so we + * keep it separately. + */ +static LIST_HEAD(nvm_auth_status_cache); +static DEFINE_MUTEX(nvm_auth_status_lock); + +static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) +{ + struct nvm_auth_status *st; + + list_for_each_entry(st, &nvm_auth_status_cache, list) { + if (uuid_equal(&st->uuid, sw->uuid)) + return st; + } + + return NULL; +} + +static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) +{ + struct nvm_auth_status *st; + + mutex_lock(&nvm_auth_status_lock); + st = __nvm_get_auth_status(sw); + mutex_unlock(&nvm_auth_status_lock); + + *status = st ? st->status : 0; +} + +static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) +{ + struct nvm_auth_status *st; + + if (WARN_ON(!sw->uuid)) + return; + + mutex_lock(&nvm_auth_status_lock); + st = __nvm_get_auth_status(sw); + + if (!st) { + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto unlock; + + memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); + INIT_LIST_HEAD(&st->list); + list_add_tail(&st->list, &nvm_auth_status_cache); + } + + st->status = status; +unlock: + mutex_unlock(&nvm_auth_status_lock); +} + +static void nvm_clear_auth_status(const struct tb_switch *sw) +{ + struct nvm_auth_status *st; + + mutex_lock(&nvm_auth_status_lock); + st = __nvm_get_auth_status(sw); + if (st) { + list_del(&st->list); + kfree(st); + } + mutex_unlock(&nvm_auth_status_lock); +} + +static int nvm_validate_and_write(struct tb_switch *sw) +{ + unsigned int image_size; + const u8 *buf; + int ret; + + ret = tb_nvm_validate(sw->nvm); + if (ret) + return ret; + + ret = tb_nvm_write_headers(sw->nvm); + if (ret) + return ret; + + buf = sw->nvm->buf_data_start; + image_size = sw->nvm->buf_data_size; + + if (tb_switch_is_usb4(sw)) + ret = usb4_switch_nvm_write(sw, 0, buf, image_size); + else + ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); + if (ret) + return ret; + + sw->nvm->flushed = true; + return 0; +} + +static int nvm_authenticate_host_dma_port(struct tb_switch *sw) +{ + int ret = 0; + + /* + * Root switch NVM upgrade requires that we disconnect the + * existing paths first (in case it is not in safe mode + * already). + */ + if (!sw->safe_mode) { + u32 status; + + ret = tb_domain_disconnect_all_paths(sw->tb); + if (ret) + return ret; + /* + * The host controller goes away pretty soon after this if + * everything goes well so getting timeout is expected. + */ + ret = dma_port_flash_update_auth(sw->dma_port); + if (!ret || ret == -ETIMEDOUT) + return 0; + + /* + * Any error from update auth operation requires power + * cycling of the host router. + */ + tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); + if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) + nvm_set_auth_status(sw, status); + } + + /* + * From safe mode we can get out by just power cycling the + * switch. + */ + dma_port_power_cycle(sw->dma_port); + return ret; +} + +static int nvm_authenticate_device_dma_port(struct tb_switch *sw) +{ + int ret, retries = 10; + + ret = dma_port_flash_update_auth(sw->dma_port); + switch (ret) { + case 0: + case -ETIMEDOUT: + case -EACCES: + case -EINVAL: + /* Power cycle is required */ + break; + default: + return ret; + } + + /* + * Poll here for the authentication status. It takes some time + * for the device to respond (we get timeout for a while). Once + * we get response the device needs to be power cycled in order + * to the new NVM to be taken into use. + */ + do { + u32 status; + + ret = dma_port_flash_update_auth_status(sw->dma_port, &status); + if (ret < 0 && ret != -ETIMEDOUT) + return ret; + if (ret > 0) { + if (status) { + tb_sw_warn(sw, "failed to authenticate NVM\n"); + nvm_set_auth_status(sw, status); + } + + tb_sw_info(sw, "power cycling the switch now\n"); + dma_port_power_cycle(sw->dma_port); + return 0; + } + + msleep(500); + } while (--retries); + + return -ETIMEDOUT; +} + +static void nvm_authenticate_start_dma_port(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + /* + * During host router NVM upgrade we should not allow root port to + * go into D3cold because some root ports cannot trigger PME + * itself. To be on the safe side keep the root port in D0 during + * the whole upgrade process. + */ + root_port = pcie_find_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_get_noresume(&root_port->dev); +} + +static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + root_port = pcie_find_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_put(&root_port->dev); +} + +static inline bool nvm_readable(struct tb_switch *sw) +{ + if (tb_switch_is_usb4(sw)) { + /* + * USB4 devices must support NVM operations but it is + * optional for hosts. Therefore we query the NVM sector + * size here and if it is supported assume NVM + * operations are implemented. + */ + return usb4_switch_nvm_sector_size(sw) > 0; + } + + /* Thunderbolt 2 and 3 devices support NVM through DMA port */ + return !!sw->dma_port; +} + +static inline bool nvm_upgradeable(struct tb_switch *sw) +{ + if (sw->no_nvm_upgrade) + return false; + return nvm_readable(sw); +} + +static int nvm_authenticate(struct tb_switch *sw, bool auth_only) +{ + int ret; + + if (tb_switch_is_usb4(sw)) { + if (auth_only) { + ret = usb4_switch_nvm_set_offset(sw, 0); + if (ret) + return ret; + } + sw->nvm->authenticating = true; + return usb4_switch_nvm_authenticate(sw); + } + if (auth_only) + return -EOPNOTSUPP; + + sw->nvm->authenticating = true; + if (!tb_route(sw)) { + nvm_authenticate_start_dma_port(sw); + ret = nvm_authenticate_host_dma_port(sw); + } else { + ret = nvm_authenticate_device_dma_port(sw); + } + + return ret; +} + +/** + * tb_switch_nvm_read() - Read router NVM + * @sw: Router whose NVM to read + * @address: Start address on the NVM + * @buf: Buffer where the read data is copied + * @size: Size of the buffer in bytes + * + * Reads from router NVM and returns the requested data in @buf. Locking + * is up to the caller. Returns %0 in success and negative errno in case + * of failure. + */ +int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_read(sw, address, buf, size); + return dma_port_flash_read(sw->dma_port, address, buf, size); +} + +static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) +{ + struct tb_nvm *nvm = priv; + struct tb_switch *sw = tb_to_switch(nvm->dev); + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (!mutex_trylock(&sw->tb->lock)) { + ret = restart_syscall(); + goto out; + } + + ret = tb_switch_nvm_read(sw, offset, val, bytes); + mutex_unlock(&sw->tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} + +static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) +{ + struct tb_nvm *nvm = priv; + struct tb_switch *sw = tb_to_switch(nvm->dev); + int ret; + + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); + + /* + * Since writing the NVM image might require some special steps, + * for example when CSS headers are written, we cache the image + * locally here and handle the special cases when the user asks + * us to authenticate the image. + */ + ret = tb_nvm_write_buf(nvm, offset, val, bytes); + mutex_unlock(&sw->tb->lock); + + return ret; +} + +static int tb_switch_nvm_add(struct tb_switch *sw) +{ + struct tb_nvm *nvm; + int ret; + + if (!nvm_readable(sw)) + return 0; + + nvm = tb_nvm_alloc(&sw->dev); + if (IS_ERR(nvm)) { + ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); + goto err_nvm; + } + + ret = tb_nvm_read_version(nvm); + if (ret) + goto err_nvm; + + /* + * If the switch is in safe-mode the only accessible portion of + * the NVM is the non-active one where userspace is expected to + * write new functional NVM. + */ + if (!sw->safe_mode) { + ret = tb_nvm_add_active(nvm, nvm_read); + if (ret) + goto err_nvm; + } + + if (!sw->no_nvm_upgrade) { + ret = tb_nvm_add_non_active(nvm, nvm_write); + if (ret) + goto err_nvm; + } + + sw->nvm = nvm; + return 0; + +err_nvm: + tb_sw_dbg(sw, "NVM upgrade disabled\n"); + sw->no_nvm_upgrade = true; + if (!IS_ERR(nvm)) + tb_nvm_free(nvm); + + return ret; +} + +static void tb_switch_nvm_remove(struct tb_switch *sw) +{ + struct tb_nvm *nvm; + + nvm = sw->nvm; + sw->nvm = NULL; + + if (!nvm) + return; + + /* Remove authentication status in case the switch is unplugged */ + if (!nvm->authenticating) + nvm_clear_auth_status(sw); + + tb_nvm_free(nvm); +} + +/* port utility functions */ + +static const char *tb_port_type(const struct tb_regs_port_header *port) +{ + switch (port->type >> 16) { + case 0: + switch ((u8) port->type) { + case 0: + return "Inactive"; + case 1: + return "Port"; + case 2: + return "NHI"; + default: + return "unknown"; + } + case 0x2: + return "Ethernet"; + case 0x8: + return "SATA"; + case 0xe: + return "DP/HDMI"; + case 0x10: + return "PCIe"; + case 0x20: + return "USB"; + default: + return "unknown"; + } +} + +static void tb_dump_port(struct tb *tb, const struct tb_port *port) +{ + const struct tb_regs_port_header *regs = &port->config; + + tb_dbg(tb, + " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", + regs->port_number, regs->vendor_id, regs->device_id, + regs->revision, regs->thunderbolt_version, tb_port_type(regs), + regs->type); + tb_dbg(tb, " Max hop id (in/out): %d/%d\n", + regs->max_in_hop_id, regs->max_out_hop_id); + tb_dbg(tb, " Max counters: %d\n", regs->max_counters); + tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); + tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, + port->ctl_credits); +} + +/** + * tb_port_state() - get connectedness state of a port + * @port: the port to check + * + * The port must have a TB_CAP_PHY (i.e. it should be a real port). + * + * Return: Returns an enum tb_port_state on success or an error code on failure. + */ +int tb_port_state(struct tb_port *port) +{ + struct tb_cap_phy phy; + int res; + if (port->cap_phy == 0) { + tb_port_WARN(port, "does not have a PHY\n"); + return -EINVAL; + } + res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); + if (res) + return res; + return phy.state; +} + +/** + * tb_wait_for_port() - wait for a port to become ready + * @port: Port to wait + * @wait_if_unplugged: Wait also when port is unplugged + * + * Wait up to 1 second for a port to reach state TB_PORT_UP. If + * wait_if_unplugged is set then we also wait if the port is in state + * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after + * switch resume). Otherwise we only wait if a device is registered but the link + * has not yet been established. + * + * Return: Returns an error code on failure. Returns 0 if the port is not + * connected or failed to reach state TB_PORT_UP within one second. Returns 1 + * if the port is connected and in state TB_PORT_UP. + */ +int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) +{ + int retries = 10; + int state; + if (!port->cap_phy) { + tb_port_WARN(port, "does not have PHY\n"); + return -EINVAL; + } + if (tb_is_upstream_port(port)) { + tb_port_WARN(port, "is the upstream port\n"); + return -EINVAL; + } + + while (retries--) { + state = tb_port_state(port); + switch (state) { + case TB_PORT_DISABLED: + tb_port_dbg(port, "is disabled (state: 0)\n"); + return 0; + + case TB_PORT_UNPLUGGED: + if (wait_if_unplugged) { + /* used during resume */ + tb_port_dbg(port, + "is unplugged (state: 7), retrying...\n"); + msleep(100); + break; + } + tb_port_dbg(port, "is unplugged (state: 7)\n"); + return 0; + + case TB_PORT_UP: + case TB_PORT_TX_CL0S: + case TB_PORT_RX_CL0S: + case TB_PORT_CL1: + case TB_PORT_CL2: + tb_port_dbg(port, "is connected, link is up (state: %d)\n", state); + return 1; + + default: + if (state < 0) + return state; + + /* + * After plug-in the state is TB_PORT_CONNECTING. Give it some + * time. + */ + tb_port_dbg(port, + "is connected, link is not up (state: %d), retrying...\n", + state); + msleep(100); + } + + } + tb_port_warn(port, + "failed to reach state TB_PORT_UP. Ignoring port...\n"); + return 0; +} + +/** + * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port + * @port: Port to add/remove NFC credits + * @credits: Credits to add/remove + * + * Change the number of NFC credits allocated to @port by @credits. To remove + * NFC credits pass a negative amount of credits. + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_port_add_nfc_credits(struct tb_port *port, int credits) +{ + u32 nfc_credits; + + if (credits == 0 || port->sw->is_unplugged) + return 0; + + /* + * USB4 restricts programming NFC buffers to lane adapters only + * so skip other ports. + */ + if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) + return 0; + + nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; + if (credits < 0) + credits = max_t(int, -nfc_credits, credits); + + nfc_credits += credits; + + tb_port_dbg(port, "adding %d NFC credits to %lu", credits, + port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); + + port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; + port->config.nfc_credits |= nfc_credits; + + return tb_port_write(port, &port->config.nfc_credits, + TB_CFG_PORT, ADP_CS_4, 1); +} + +/** + * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER + * @port: Port whose counters to clear + * @counter: Counter index to clear + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_port_clear_counter(struct tb_port *port, int counter) +{ + u32 zero[3] = { 0, 0, 0 }; + tb_port_dbg(port, "clearing counter %d\n", counter); + return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); +} + +/** + * tb_port_unlock() - Unlock downstream port + * @port: Port to unlock + * + * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the + * downstream router accessible for CM. + */ +int tb_port_unlock(struct tb_port *port) +{ + if (tb_switch_is_icm(port->sw)) + return 0; + if (!tb_port_is_null(port)) + return -EINVAL; + if (tb_switch_is_usb4(port->sw)) + return usb4_port_unlock(port); + return 0; +} + +static int __tb_port_enable(struct tb_port *port, bool enable) +{ + int ret; + u32 phy; + + if (!tb_port_is_null(port)) + return -EINVAL; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (enable) + phy &= ~LANE_ADP_CS_1_LD; + else + phy |= LANE_ADP_CS_1_LD; + + + ret = tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable)); + return 0; +} + +/** + * tb_port_enable() - Enable lane adapter + * @port: Port to enable (can be %NULL) + * + * This is used for lane 0 and 1 adapters to enable it. + */ +int tb_port_enable(struct tb_port *port) +{ + return __tb_port_enable(port, true); +} + +/** + * tb_port_disable() - Disable lane adapter + * @port: Port to disable (can be %NULL) + * + * This is used for lane 0 and 1 adapters to disable it. + */ +int tb_port_disable(struct tb_port *port) +{ + return __tb_port_enable(port, false); +} + +/* + * tb_init_port() - initialize a port + * + * This is a helper method for tb_switch_alloc. Does not check or initialize + * any downstream switches. + * + * Return: Returns 0 on success or an error code on failure. + */ +static int tb_init_port(struct tb_port *port) +{ + int res; + int cap; + + INIT_LIST_HEAD(&port->list); + + /* Control adapter does not have configuration space */ + if (!port->port) + return 0; + + res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); + if (res) { + if (res == -ENODEV) { + tb_dbg(port->sw->tb, " Port %d: not implemented\n", + port->port); + port->disabled = true; + return 0; + } + return res; + } + + /* Port 0 is the switch itself and has no PHY. */ + if (port->config.type == TB_TYPE_PORT) { + cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); + + if (cap > 0) + port->cap_phy = cap; + else + tb_port_WARN(port, "non switch port without a PHY\n"); + + cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); + if (cap > 0) + port->cap_usb4 = cap; + + /* + * USB4 ports the buffers allocated for the control path + * can be read from the path config space. Legacy + * devices we use hard-coded value. + */ + if (port->cap_usb4) { + struct tb_regs_hop hop; + + if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) + port->ctl_credits = hop.initial_credits; + } + if (!port->ctl_credits) + port->ctl_credits = 2; + + } else { + cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); + if (cap > 0) + port->cap_adap = cap; + } + + port->total_credits = + (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; + + tb_dump_port(port->sw->tb, port); + return 0; +} + +static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, + int max_hopid) +{ + int port_max_hopid; + struct ida *ida; + + if (in) { + port_max_hopid = port->config.max_in_hop_id; + ida = &port->in_hopids; + } else { + port_max_hopid = port->config.max_out_hop_id; + ida = &port->out_hopids; + } + + /* + * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are + * reserved. + */ + if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) + min_hopid = TB_PATH_MIN_HOPID; + + if (max_hopid < 0 || max_hopid > port_max_hopid) + max_hopid = port_max_hopid; + + return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL); +} + +/** + * tb_port_alloc_in_hopid() - Allocate input HopID from port + * @port: Port to allocate HopID for + * @min_hopid: Minimum acceptable input HopID + * @max_hopid: Maximum acceptable input HopID + * + * Return: HopID between @min_hopid and @max_hopid or negative errno in + * case of error. + */ +int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid) +{ + return tb_port_alloc_hopid(port, true, min_hopid, max_hopid); +} + +/** + * tb_port_alloc_out_hopid() - Allocate output HopID from port + * @port: Port to allocate HopID for + * @min_hopid: Minimum acceptable output HopID + * @max_hopid: Maximum acceptable output HopID + * + * Return: HopID between @min_hopid and @max_hopid or negative errno in + * case of error. + */ +int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid) +{ + return tb_port_alloc_hopid(port, false, min_hopid, max_hopid); +} + +/** + * tb_port_release_in_hopid() - Release allocated input HopID from port + * @port: Port whose HopID to release + * @hopid: HopID to release + */ +void tb_port_release_in_hopid(struct tb_port *port, int hopid) +{ + ida_simple_remove(&port->in_hopids, hopid); +} + +/** + * tb_port_release_out_hopid() - Release allocated output HopID from port + * @port: Port whose HopID to release + * @hopid: HopID to release + */ +void tb_port_release_out_hopid(struct tb_port *port, int hopid) +{ + ida_simple_remove(&port->out_hopids, hopid); +} + +static inline bool tb_switch_is_reachable(const struct tb_switch *parent, + const struct tb_switch *sw) +{ + u64 mask = (1ULL << parent->config.depth * 8) - 1; + return (tb_route(parent) & mask) == (tb_route(sw) & mask); +} + +/** + * tb_next_port_on_path() - Return next port for given port on a path + * @start: Start port of the walk + * @end: End port of the walk + * @prev: Previous port (%NULL if this is the first) + * + * This function can be used to walk from one port to another if they + * are connected through zero or more switches. If the @prev is dual + * link port, the function follows that link and returns another end on + * that same link. + * + * If the @end port has been reached, return %NULL. + * + * Domain tb->lock must be held when this function is called. + */ +struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, + struct tb_port *prev) +{ + struct tb_port *next; + + if (!prev) + return start; + + if (prev->sw == end->sw) { + if (prev == end) + return NULL; + return end; + } + + if (tb_switch_is_reachable(prev->sw, end->sw)) { + next = tb_port_at(tb_route(end->sw), prev->sw); + /* Walk down the topology if next == prev */ + if (prev->remote && + (next == prev || next->dual_link_port == prev)) + next = prev->remote; + } else { + if (tb_is_upstream_port(prev)) { + next = prev->remote; + } else { + next = tb_upstream_port(prev->sw); + /* + * Keep the same link if prev and next are both + * dual link ports. + */ + if (next->dual_link_port && + next->link_nr != prev->link_nr) { + next = next->dual_link_port; + } + } + } + + return next != prev ? next : NULL; +} + +/** + * tb_port_get_link_speed() - Get current link speed + * @port: Port to check (USB4 or CIO) + * + * Returns link speed in Gb/s or negative errno in case of failure. + */ +int tb_port_get_link_speed(struct tb_port *port) +{ + u32 val, speed; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> + LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; + + switch (speed) { + case LANE_ADP_CS_1_CURRENT_SPEED_GEN4: + return 40; + case LANE_ADP_CS_1_CURRENT_SPEED_GEN3: + return 20; + default: + return 10; + } +} + +/** + * tb_port_get_link_width() - Get current link width + * @port: Port to check (USB4 or CIO) + * + * Returns link width. Return the link width as encoded in &enum + * tb_link_width or negative errno in case of failure. + */ +int tb_port_get_link_width(struct tb_port *port) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + /* Matches the values in enum tb_link_width */ + return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> + LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; +} + +static bool tb_port_is_width_supported(struct tb_port *port, + unsigned int width_mask) +{ + u32 phy, widths; + int ret; + + if (!port->cap_phy) + return false; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return false; + + widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> + LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; + + return widths & width_mask; +} + +static bool is_gen4_link(struct tb_port *port) +{ + return tb_port_get_link_speed(port) > 20; +} + +/** + * tb_port_set_link_width() - Set target link width of the lane adapter + * @port: Lane adapter + * @width: Target link width + * + * Sets the target link width of the lane adapter to @width. Does not + * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; + switch (width) { + case TB_LINK_WIDTH_SINGLE: + /* Gen 4 link cannot be single */ + if (is_gen4_link(port)) + return -EOPNOTSUPP; + val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; + case TB_LINK_WIDTH_DUAL: + val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; + default: + return -EINVAL; + } + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +/** + * tb_port_set_lane_bonding() - Enable/disable lane bonding + * @port: Lane adapter + * @bonding: enable/disable bonding + * + * Enables or disables lane bonding. This should be called after target + * link width has been set (tb_port_set_link_width()). Note in most + * cases one should use tb_port_lane_bonding_enable() instead to enable + * lane bonding. + * + * Return: %0 in case of success and negative errno in case of error + */ +static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (bonding) + val |= LANE_ADP_CS_1_LB; + else + val &= ~LANE_ADP_CS_1_LB; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +/** + * tb_port_lane_bonding_enable() - Enable bonding on port + * @port: port to enable + * + * Enable bonding by setting the link width of the port and the other + * port in case of dual link port. Does not wait for the link to + * actually reach the bonded state so caller needs to call + * tb_port_wait_for_link_width() before enabling any paths through the + * link to make sure the link is in expected state. + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_port_lane_bonding_enable(struct tb_port *port) +{ + enum tb_link_width width; + int ret; + + /* + * Enable lane bonding for both links if not already enabled by + * for example the boot firmware. + */ + width = tb_port_get_link_width(port); + if (width == TB_LINK_WIDTH_SINGLE) { + ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL); + if (ret) + goto err_lane0; + } + + width = tb_port_get_link_width(port->dual_link_port); + if (width == TB_LINK_WIDTH_SINGLE) { + ret = tb_port_set_link_width(port->dual_link_port, + TB_LINK_WIDTH_DUAL); + if (ret) + goto err_lane0; + } + + /* + * Only set bonding if the link was not already bonded. This + * avoids the lane adapter to re-enter bonding state. + */ + if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) { + ret = tb_port_set_lane_bonding(port, true); + if (ret) + goto err_lane1; + } + + /* + * When lane 0 bonding is set it will affect lane 1 too so + * update both. + */ + port->bonded = true; + port->dual_link_port->bonded = true; + + return 0; + +err_lane1: + tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); +err_lane0: + tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); + + return ret; +} + +/** + * tb_port_lane_bonding_disable() - Disable bonding on port + * @port: port to disable + * + * Disable bonding by setting the link width of the port and the + * other port in case of dual link port. + */ +void tb_port_lane_bonding_disable(struct tb_port *port) +{ + tb_port_set_lane_bonding(port, false); + tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE); + tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE); + port->dual_link_port->bonded = false; + port->bonded = false; +} + +/** + * tb_port_wait_for_link_width() - Wait until link reaches specific width + * @port: Port to wait for + * @width_mask: Expected link width mask + * @timeout_msec: Timeout in ms how long to wait + * + * Should be used after both ends of the link have been bonded (or + * bonding has been disabled) to wait until the link actually reaches + * the expected state. Returns %-ETIMEDOUT if the width was not reached + * within the given timeout, %0 if it did. Can be passed a mask of + * expected widths and succeeds if any of the widths is reached. + */ +int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, + int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + int ret; + + /* Gen 4 link does not support single lane */ + if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port)) + return -EOPNOTSUPP; + + do { + ret = tb_port_get_link_width(port); + if (ret < 0) { + /* + * Sometimes we get port locked error when + * polling the lanes so we can ignore it and + * retry. + */ + if (ret != -EACCES) + return ret; + } else if (ret & width_mask) { + return 0; + } + + usleep_range(1000, 2000); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int tb_port_do_update_credits(struct tb_port *port) +{ + u32 nfc_credits; + int ret; + + ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); + if (ret) + return ret; + + if (nfc_credits != port->config.nfc_credits) { + u32 total; + + total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; + + tb_port_dbg(port, "total credits changed %u -> %u\n", + port->total_credits, total); + + port->config.nfc_credits = nfc_credits; + port->total_credits = total; + } + + return 0; +} + +/** + * tb_port_update_credits() - Re-read port total credits + * @port: Port to update + * + * After the link is bonded (or bonding was disabled) the port total + * credits may change, so this function needs to be called to re-read + * the credits. Updates also the second lane adapter. + */ +int tb_port_update_credits(struct tb_port *port) +{ + int ret; + + ret = tb_port_do_update_credits(port); + if (ret) + return ret; + return tb_port_do_update_credits(port->dual_link_port); +} + +static int tb_port_start_lane_initialization(struct tb_port *port) +{ + int ret; + + if (tb_switch_is_usb4(port->sw)) + return 0; + + ret = tb_lc_start_lane_initialization(port); + return ret == -EINVAL ? 0 : ret; +} + +/* + * Returns true if the port had something (router, XDomain) connected + * before suspend. + */ +static bool tb_port_resume(struct tb_port *port) +{ + bool has_remote = tb_port_has_remote(port); + + if (port->usb4) { + usb4_port_device_resume(port->usb4); + } else if (!has_remote) { + /* + * For disconnected downstream lane adapters start lane + * initialization now so we detect future connects. + * + * For XDomain start the lane initialzation now so the + * link gets re-established. + * + * This is only needed for non-USB4 ports. + */ + if (!tb_is_upstream_port(port) || port->xdomain) + tb_port_start_lane_initialization(port); + } + + return has_remote || port->xdomain; +} + +/** + * tb_port_is_enabled() - Is the adapter port enabled + * @port: Port to check + */ +bool tb_port_is_enabled(struct tb_port *port) +{ + switch (port->config.type) { + case TB_TYPE_PCIE_UP: + case TB_TYPE_PCIE_DOWN: + return tb_pci_port_is_enabled(port); + + case TB_TYPE_DP_HDMI_IN: + case TB_TYPE_DP_HDMI_OUT: + return tb_dp_port_is_enabled(port); + + case TB_TYPE_USB3_UP: + case TB_TYPE_USB3_DOWN: + return tb_usb3_port_is_enabled(port); + + default: + return false; + } +} + +/** + * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled + * @port: USB3 adapter port to check + */ +bool tb_usb3_port_is_enabled(struct tb_port *port) +{ + u32 data; + + if (tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_0, 1)) + return false; + + return !!(data & ADP_USB3_CS_0_PE); +} + +/** + * tb_usb3_port_enable() - Enable USB3 adapter port + * @port: USB3 adapter port to enable + * @enable: Enable/disable the USB3 adapter + */ +int tb_usb3_port_enable(struct tb_port *port, bool enable) +{ + u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) + : ADP_USB3_CS_0_V; + + if (!port->cap_adap) + return -ENXIO; + return tb_port_write(port, &word, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_0, 1); +} + +/** + * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled + * @port: PCIe port to check + */ +bool tb_pci_port_is_enabled(struct tb_port *port) +{ + u32 data; + + if (tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_0, 1)) + return false; + + return !!(data & ADP_PCIE_CS_0_PE); +} + +/** + * tb_pci_port_enable() - Enable PCIe adapter port + * @port: PCIe port to enable + * @enable: Enable/disable the PCIe adapter + */ +int tb_pci_port_enable(struct tb_port *port, bool enable) +{ + u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; + if (!port->cap_adap) + return -ENXIO; + return tb_port_write(port, &word, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_0, 1); +} + +/** + * tb_dp_port_hpd_is_active() - Is HPD already active + * @port: DP out port to check + * + * Checks if the DP OUT adapter port has HDP bit already set. + */ +int tb_dp_port_hpd_is_active(struct tb_port *port) +{ + u32 data; + int ret; + + ret = tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + return !!(data & ADP_DP_CS_2_HDP); +} + +/** + * tb_dp_port_hpd_clear() - Clear HPD from DP IN port + * @port: Port to clear HPD + * + * If the DP IN port has HDP set, this function can be used to clear it. + */ +int tb_dp_port_hpd_clear(struct tb_port *port) +{ + u32 data; + int ret; + + ret = tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_3, 1); + if (ret) + return ret; + + data |= ADP_DP_CS_3_HDPC; + return tb_port_write(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_3, 1); +} + +/** + * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port + * @port: DP IN/OUT port to set hops + * @video: Video Hop ID + * @aux_tx: AUX TX Hop ID + * @aux_rx: AUX RX Hop ID + * + * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 + * router DP adapters too but does not program the values as the fields + * are read-only. + */ +int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, + unsigned int aux_tx, unsigned int aux_rx) +{ + u32 data[2]; + int ret; + + if (tb_switch_is_usb4(port->sw)) + return 0; + + ret = tb_port_read(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); + if (ret) + return ret; + + data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; + + data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & + ADP_DP_CS_0_VIDEO_HOPID_MASK; + data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; + data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & + ADP_DP_CS_1_AUX_RX_HOPID_MASK; + + return tb_port_write(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); +} + +/** + * tb_dp_port_is_enabled() - Is DP adapter port enabled + * @port: DP adapter port to check + */ +bool tb_dp_port_is_enabled(struct tb_port *port) +{ + u32 data[2]; + + if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, + ARRAY_SIZE(data))) + return false; + + return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); +} + +/** + * tb_dp_port_enable() - Enables/disables DP paths of a port + * @port: DP IN/OUT port + * @enable: Enable/disable DP path + * + * Once Hop IDs are programmed DP paths can be enabled or disabled by + * calling this function. + */ +int tb_dp_port_enable(struct tb_port *port, bool enable) +{ + u32 data[2]; + int ret; + + ret = tb_port_read(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); + if (ret) + return ret; + + if (enable) + data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; + else + data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); + + return tb_port_write(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); +} + +/* switch utility functions */ + +static const char *tb_switch_generation_name(const struct tb_switch *sw) +{ + switch (sw->generation) { + case 1: + return "Thunderbolt 1"; + case 2: + return "Thunderbolt 2"; + case 3: + return "Thunderbolt 3"; + case 4: + return "USB4"; + default: + return "Unknown"; + } +} + +static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) +{ + const struct tb_regs_switch_header *regs = &sw->config; + + tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", + tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, + regs->revision, regs->thunderbolt_version); + tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); + tb_dbg(tb, " Config:\n"); + tb_dbg(tb, + " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", + regs->upstream_port_number, regs->depth, + (((u64) regs->route_hi) << 32) | regs->route_lo, + regs->enabled, regs->plug_events_delay); + tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", + regs->__unknown1, regs->__unknown4); +} + +/** + * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET + * @sw: Switch to reset + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_switch_reset(struct tb_switch *sw) +{ + struct tb_cfg_result res; + + if (sw->generation > 1) + return 0; + + tb_sw_dbg(sw, "resetting switch\n"); + + res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, + TB_CFG_SWITCH, 2, 2); + if (res.err) + return res.err; + res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); + if (res.err > 0) + return -EIO; + return res.err; +} + +/** + * tb_switch_wait_for_bit() - Wait for specified value of bits in offset + * @sw: Router to read the offset value from + * @offset: Offset in the router config space to read from + * @bit: Bit mask in the offset to wait for + * @value: Value of the bits to wait for + * @timeout_msec: Timeout in ms how long to wait + * + * Wait till the specified bits in specified offset reach specified value. + * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached + * within the given timeout or a negative errno in case of failure. + */ +int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +/* + * tb_plug_events_active() - enable/disable plug events on a switch + * + * Also configures a sane plug_events_delay of 255ms. + * + * Return: Returns 0 on success or an error code on failure. + */ +static int tb_plug_events_active(struct tb_switch *sw, bool active) +{ + u32 data; + int res; + + if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) + return 0; + + sw->config.plug_events_delay = 0xff; + res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); + if (res) + return res; + + res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); + if (res) + return res; + + if (active) { + data = data & 0xFFFFFF83; + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: + case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: + case PCI_DEVICE_ID_INTEL_PORT_RIDGE: + break; + default: + /* + * Skip Alpine Ridge, it needs to have vendor + * specific USB hotplug event enabled for the + * internal xHCI to work. + */ + if (!tb_switch_is_alpine_ridge(sw)) + data |= TB_PLUG_EVENTS_USB_DISABLE; + } + } else { + data = data | 0x7c; + } + return tb_sw_write(sw, &data, TB_CFG_SWITCH, + sw->cap_plug_events + 1, 1); +} + +static ssize_t authorized_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%u\n", sw->authorized); +} + +static int disapprove_switch(struct device *dev, void *not_used) +{ + char *envp[] = { "AUTHORIZED=0", NULL }; + struct tb_switch *sw; + + sw = tb_to_switch(dev); + if (sw && sw->authorized) { + int ret; + + /* First children */ + ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); + if (ret) + return ret; + + ret = tb_domain_disapprove_switch(sw->tb, sw); + if (ret) + return ret; + + sw->authorized = 0; + kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); + } + + return 0; +} + +static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) +{ + char envp_string[13]; + int ret = -EINVAL; + char *envp[] = { envp_string, NULL }; + + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); + + if (!!sw->authorized == !!val) + goto unlock; + + switch (val) { + /* Disapprove switch */ + case 0: + if (tb_route(sw)) { + ret = disapprove_switch(&sw->dev, NULL); + goto unlock; + } + break; + + /* Approve switch */ + case 1: + if (sw->key) + ret = tb_domain_approve_switch_key(sw->tb, sw); + else + ret = tb_domain_approve_switch(sw->tb, sw); + break; + + /* Challenge switch */ + case 2: + if (sw->key) + ret = tb_domain_challenge_switch_key(sw->tb, sw); + break; + + default: + break; + } + + if (!ret) { + sw->authorized = val; + /* + * Notify status change to the userspace, informing the new + * value of /sys/bus/thunderbolt/devices/.../authorized. + */ + sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); + kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); + } + +unlock: + mutex_unlock(&sw->tb->lock); + return ret; +} + +static ssize_t authorized_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct tb_switch *sw = tb_to_switch(dev); + unsigned int val; + ssize_t ret; + + ret = kstrtouint(buf, 0, &val); + if (ret) + return ret; + if (val > 2) + return -EINVAL; + + pm_runtime_get_sync(&sw->dev); + ret = tb_switch_set_authorized(sw, val); + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret ? ret : count; +} +static DEVICE_ATTR_RW(authorized); + +static ssize_t boot_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%u\n", sw->boot); +} +static DEVICE_ATTR_RO(boot); + +static ssize_t device_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%#x\n", sw->device); +} +static DEVICE_ATTR_RO(device); + +static ssize_t +device_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); +} +static DEVICE_ATTR_RO(device_name); + +static ssize_t +generation_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%u\n", sw->generation); +} +static DEVICE_ATTR_RO(generation); + +static ssize_t key_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + ssize_t ret; + + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); + + if (sw->key) + ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); + else + ret = sysfs_emit(buf, "\n"); + + mutex_unlock(&sw->tb->lock); + return ret; +} + +static ssize_t key_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct tb_switch *sw = tb_to_switch(dev); + u8 key[TB_SWITCH_KEY_SIZE]; + ssize_t ret = count; + bool clear = false; + + if (!strcmp(buf, "\n")) + clear = true; + else if (hex2bin(key, buf, sizeof(key))) + return -EINVAL; + + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); + + if (sw->authorized) { + ret = -EBUSY; + } else { + kfree(sw->key); + if (clear) { + sw->key = NULL; + } else { + sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); + if (!sw->key) + ret = -ENOMEM; + } + } + + mutex_unlock(&sw->tb->lock); + return ret; +} +static DEVICE_ATTR(key, 0600, key_show, key_store); + +static ssize_t speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); +} + +/* + * Currently all lanes must run at the same speed but we expose here + * both directions to allow possible asymmetric links in the future. + */ +static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); +static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); + +static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + unsigned int width; + + switch (sw->link_width) { + case TB_LINK_WIDTH_SINGLE: + case TB_LINK_WIDTH_ASYM_TX: + width = 1; + break; + case TB_LINK_WIDTH_DUAL: + width = 2; + break; + case TB_LINK_WIDTH_ASYM_RX: + width = 3; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + return sysfs_emit(buf, "%u\n", width); +} +static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); + +static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + unsigned int width; + + switch (sw->link_width) { + case TB_LINK_WIDTH_SINGLE: + case TB_LINK_WIDTH_ASYM_RX: + width = 1; + break; + case TB_LINK_WIDTH_DUAL: + width = 2; + break; + case TB_LINK_WIDTH_ASYM_TX: + width = 3; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + return sysfs_emit(buf, "%u\n", width); +} +static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); + +static ssize_t nvm_authenticate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + u32 status; + + nvm_get_auth_status(sw, &status); + return sysfs_emit(buf, "%#x\n", status); +} + +static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, + bool disconnect) +{ + struct tb_switch *sw = tb_to_switch(dev); + int val, ret; + + pm_runtime_get_sync(&sw->dev); + + if (!mutex_trylock(&sw->tb->lock)) { + ret = restart_syscall(); + goto exit_rpm; + } + + if (sw->no_nvm_upgrade) { + ret = -EOPNOTSUPP; + goto exit_unlock; + } + + /* If NVMem devices are not yet added */ + if (!sw->nvm) { + ret = -EAGAIN; + goto exit_unlock; + } + + ret = kstrtoint(buf, 10, &val); + if (ret) + goto exit_unlock; + + /* Always clear the authentication status */ + nvm_clear_auth_status(sw); + + if (val > 0) { + if (val == AUTHENTICATE_ONLY) { + if (disconnect) + ret = -EINVAL; + else + ret = nvm_authenticate(sw, true); + } else { + if (!sw->nvm->flushed) { + if (!sw->nvm->buf) { + ret = -EINVAL; + goto exit_unlock; + } + + ret = nvm_validate_and_write(sw); + if (ret || val == WRITE_ONLY) + goto exit_unlock; + } + if (val == WRITE_AND_AUTHENTICATE) { + if (disconnect) + ret = tb_lc_force_power(sw); + else + ret = nvm_authenticate(sw, false); + } + } + } + +exit_unlock: + mutex_unlock(&sw->tb->lock); +exit_rpm: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} + +static ssize_t nvm_authenticate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret = nvm_authenticate_sysfs(dev, buf, false); + if (ret) + return ret; + return count; +} +static DEVICE_ATTR_RW(nvm_authenticate); + +static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return nvm_authenticate_show(dev, attr, buf); +} + +static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + + ret = nvm_authenticate_sysfs(dev, buf, true); + return ret ? ret : count; +} +static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); + +static ssize_t nvm_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + int ret; + + if (!mutex_trylock(&sw->tb->lock)) + return restart_syscall(); + + if (sw->safe_mode) + ret = -ENODATA; + else if (!sw->nvm) + ret = -EAGAIN; + else + ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); + + mutex_unlock(&sw->tb->lock); + + return ret; +} +static DEVICE_ATTR_RO(nvm_version); + +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%#x\n", sw->vendor); +} +static DEVICE_ATTR_RO(vendor); + +static ssize_t +vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); +} +static DEVICE_ATTR_RO(vendor_name); + +static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sysfs_emit(buf, "%pUb\n", sw->uuid); +} +static DEVICE_ATTR_RO(unique_id); + +static struct attribute *switch_attrs[] = { + &dev_attr_authorized.attr, + &dev_attr_boot.attr, + &dev_attr_device.attr, + &dev_attr_device_name.attr, + &dev_attr_generation.attr, + &dev_attr_key.attr, + &dev_attr_nvm_authenticate.attr, + &dev_attr_nvm_authenticate_on_disconnect.attr, + &dev_attr_nvm_version.attr, + &dev_attr_rx_speed.attr, + &dev_attr_rx_lanes.attr, + &dev_attr_tx_speed.attr, + &dev_attr_tx_lanes.attr, + &dev_attr_vendor.attr, + &dev_attr_vendor_name.attr, + &dev_attr_unique_id.attr, + NULL, +}; + +static umode_t switch_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct tb_switch *sw = tb_to_switch(dev); + + if (attr == &dev_attr_authorized.attr) { + if (sw->tb->security_level == TB_SECURITY_NOPCIE || + sw->tb->security_level == TB_SECURITY_DPONLY) + return 0; + } else if (attr == &dev_attr_device.attr) { + if (!sw->device) + return 0; + } else if (attr == &dev_attr_device_name.attr) { + if (!sw->device_name) + return 0; + } else if (attr == &dev_attr_vendor.attr) { + if (!sw->vendor) + return 0; + } else if (attr == &dev_attr_vendor_name.attr) { + if (!sw->vendor_name) + return 0; + } else if (attr == &dev_attr_key.attr) { + if (tb_route(sw) && + sw->tb->security_level == TB_SECURITY_SECURE && + sw->security_level == TB_SECURITY_SECURE) + return attr->mode; + return 0; + } else if (attr == &dev_attr_rx_speed.attr || + attr == &dev_attr_rx_lanes.attr || + attr == &dev_attr_tx_speed.attr || + attr == &dev_attr_tx_lanes.attr) { + if (tb_route(sw)) + return attr->mode; + return 0; + } else if (attr == &dev_attr_nvm_authenticate.attr) { + if (nvm_upgradeable(sw)) + return attr->mode; + return 0; + } else if (attr == &dev_attr_nvm_version.attr) { + if (nvm_readable(sw)) + return attr->mode; + return 0; + } else if (attr == &dev_attr_boot.attr) { + if (tb_route(sw)) + return attr->mode; + return 0; + } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { + if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) + return attr->mode; + return 0; + } + + return sw->safe_mode ? 0 : attr->mode; +} + +static const struct attribute_group switch_group = { + .is_visible = switch_attr_is_visible, + .attrs = switch_attrs, +}; + +static const struct attribute_group *switch_groups[] = { + &switch_group, + NULL, +}; + +static void tb_switch_release(struct device *dev) +{ + struct tb_switch *sw = tb_to_switch(dev); + struct tb_port *port; + + dma_port_free(sw->dma_port); + + tb_switch_for_each_port(sw, port) { + ida_destroy(&port->in_hopids); + ida_destroy(&port->out_hopids); + } + + kfree(sw->uuid); + kfree(sw->device_name); + kfree(sw->vendor_name); + kfree(sw->ports); + kfree(sw->drom); + kfree(sw->key); + kfree(sw); +} + +static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct tb_switch *sw = tb_to_switch(dev); + const char *type; + + if (tb_switch_is_usb4(sw)) { + if (add_uevent_var(env, "USB4_VERSION=%u.0", + usb4_switch_version(sw))) + return -ENOMEM; + } + + if (!tb_route(sw)) { + type = "host"; + } else { + const struct tb_port *port; + bool hub = false; + + /* Device is hub if it has any downstream ports */ + tb_switch_for_each_port(sw, port) { + if (!port->disabled && !tb_is_upstream_port(port) && + tb_port_is_null(port)) { + hub = true; + break; + } + } + + type = hub ? "hub" : "device"; + } + + if (add_uevent_var(env, "USB4_TYPE=%s", type)) + return -ENOMEM; + return 0; +} + +/* + * Currently only need to provide the callbacks. Everything else is handled + * in the connection manager. + */ +static int __maybe_unused tb_switch_runtime_suspend(struct device *dev) +{ + struct tb_switch *sw = tb_to_switch(dev); + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (cm_ops->runtime_suspend_switch) + return cm_ops->runtime_suspend_switch(sw); + + return 0; +} + +static int __maybe_unused tb_switch_runtime_resume(struct device *dev) +{ + struct tb_switch *sw = tb_to_switch(dev); + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (cm_ops->runtime_resume_switch) + return cm_ops->runtime_resume_switch(sw); + return 0; +} + +static const struct dev_pm_ops tb_switch_pm_ops = { + SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume, + NULL) +}; + +struct device_type tb_switch_type = { + .name = "thunderbolt_device", + .release = tb_switch_release, + .uevent = tb_switch_uevent, + .pm = &tb_switch_pm_ops, +}; + +static int tb_switch_get_generation(struct tb_switch *sw) +{ + if (tb_switch_is_usb4(sw)) + return 4; + + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: + case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: + case PCI_DEVICE_ID_INTEL_LIGHT_PEAK: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + case PCI_DEVICE_ID_INTEL_PORT_RIDGE: + case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE: + return 1; + + case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: + return 2; + + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + case PCI_DEVICE_ID_INTEL_ICL_NHI0: + case PCI_DEVICE_ID_INTEL_ICL_NHI1: + return 3; + } + } + + /* + * For unknown switches assume generation to be 1 to be on the + * safe side. + */ + tb_sw_warn(sw, "unsupported switch device id %#x\n", + sw->config.device_id); + return 1; +} + +static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) +{ + int max_depth; + + if (tb_switch_is_usb4(sw) || + (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) + max_depth = USB4_SWITCH_MAX_DEPTH; + else + max_depth = TB_SWITCH_MAX_DEPTH; + + return depth > max_depth; +} + +/** + * tb_switch_alloc() - allocate a switch + * @tb: Pointer to the owning domain + * @parent: Parent device for this switch + * @route: Route string for this switch + * + * Allocates and initializes a switch. Will not upload configuration to + * the switch. For that you need to call tb_switch_configure() + * separately. The returned switch should be released by calling + * tb_switch_put(). + * + * Return: Pointer to the allocated switch or ERR_PTR() in case of + * failure. + */ +struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, + u64 route) +{ + struct tb_switch *sw; + int upstream_port; + int i, ret, depth; + + /* Unlock the downstream port so we can access the switch below */ + if (route) { + struct tb_switch *parent_sw = tb_to_switch(parent); + struct tb_port *down; + + down = tb_port_at(route, parent_sw); + tb_port_unlock(down); + } + + depth = tb_route_length(route); + + upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); + if (upstream_port < 0) + return ERR_PTR(upstream_port); + + sw = kzalloc(sizeof(*sw), GFP_KERNEL); + if (!sw) + return ERR_PTR(-ENOMEM); + + sw->tb = tb; + ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); + if (ret) + goto err_free_sw_ports; + + sw->generation = tb_switch_get_generation(sw); + + tb_dbg(tb, "current switch config:\n"); + tb_dump_switch(tb, sw); + + /* configure switch */ + sw->config.upstream_port_number = upstream_port; + sw->config.depth = depth; + sw->config.route_hi = upper_32_bits(route); + sw->config.route_lo = lower_32_bits(route); + sw->config.enabled = 0; + + /* Make sure we do not exceed maximum topology limit */ + if (tb_switch_exceeds_max_depth(sw, depth)) { + ret = -EADDRNOTAVAIL; + goto err_free_sw_ports; + } + + /* initialize ports */ + sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), + GFP_KERNEL); + if (!sw->ports) { + ret = -ENOMEM; + goto err_free_sw_ports; + } + + for (i = 0; i <= sw->config.max_port_number; i++) { + /* minimum setup for tb_find_cap and tb_drom_read to work */ + sw->ports[i].sw = sw; + sw->ports[i].port = i; + + /* Control port does not need HopID allocation */ + if (i) { + ida_init(&sw->ports[i].in_hopids); + ida_init(&sw->ports[i].out_hopids); + } + } + + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); + if (ret > 0) + sw->cap_plug_events = ret; + + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); + if (ret > 0) + sw->cap_vsec_tmu = ret; + + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); + if (ret > 0) + sw->cap_lc = ret; + + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); + if (ret > 0) + sw->cap_lp = ret; + + /* Root switch is always authorized */ + if (!route) + sw->authorized = true; + + device_initialize(&sw->dev); + sw->dev.parent = parent; + sw->dev.bus = &tb_bus_type; + sw->dev.type = &tb_switch_type; + sw->dev.groups = switch_groups; + dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); + + return sw; + +err_free_sw_ports: + kfree(sw->ports); + kfree(sw); + + return ERR_PTR(ret); +} + +/** + * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode + * @tb: Pointer to the owning domain + * @parent: Parent device for this switch + * @route: Route string for this switch + * + * This creates a switch in safe mode. This means the switch pretty much + * lacks all capabilities except DMA configuration port before it is + * flashed with a valid NVM firmware. + * + * The returned switch must be released by calling tb_switch_put(). + * + * Return: Pointer to the allocated switch or ERR_PTR() in case of failure + */ +struct tb_switch * +tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) +{ + struct tb_switch *sw; + + sw = kzalloc(sizeof(*sw), GFP_KERNEL); + if (!sw) + return ERR_PTR(-ENOMEM); + + sw->tb = tb; + sw->config.depth = tb_route_length(route); + sw->config.route_hi = upper_32_bits(route); + sw->config.route_lo = lower_32_bits(route); + sw->safe_mode = true; + + device_initialize(&sw->dev); + sw->dev.parent = parent; + sw->dev.bus = &tb_bus_type; + sw->dev.type = &tb_switch_type; + sw->dev.groups = switch_groups; + dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); + + return sw; +} + +/** + * tb_switch_configure() - Uploads configuration to the switch + * @sw: Switch to configure + * + * Call this function before the switch is added to the system. It will + * upload configuration to the switch and makes it available for the + * connection manager to use. Can be called to the switch again after + * resume from low power states to re-initialize it. + * + * Return: %0 in case of success and negative errno in case of failure + */ +int tb_switch_configure(struct tb_switch *sw) +{ + struct tb *tb = sw->tb; + u64 route; + int ret; + + route = tb_route(sw); + + tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", + sw->config.enabled ? "restoring" : "initializing", route, + tb_route_length(route), sw->config.upstream_port_number); + + sw->config.enabled = 1; + + if (tb_switch_is_usb4(sw)) { + /* + * For USB4 devices, we need to program the CM version + * accordingly so that it knows to expose all the + * additional capabilities. Program it according to USB4 + * version to avoid changing existing (v1) routers behaviour. + */ + if (usb4_switch_version(sw) < 2) + sw->config.cmuv = ROUTER_CS_4_CMUV_V1; + else + sw->config.cmuv = ROUTER_CS_4_CMUV_V2; + sw->config.plug_events_delay = 0xa; + + /* Enumerate the switch */ + ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, + ROUTER_CS_1, 4); + if (ret) + return ret; + + ret = usb4_switch_setup(sw); + } else { + if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) + tb_sw_warn(sw, "unknown switch vendor id %#x\n", + sw->config.vendor_id); + + if (!sw->cap_plug_events) { + tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); + return -ENODEV; + } + + /* Enumerate the switch */ + ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, + ROUTER_CS_1, 3); + } + if (ret) + return ret; + + return tb_plug_events_active(sw, true); +} + +/** + * tb_switch_configuration_valid() - Set the tunneling configuration to be valid + * @sw: Router to configure + * + * Needs to be called before any tunnels can be setup through the + * router. Can be called to any router. + * + * Returns %0 in success and negative errno otherwise. + */ +int tb_switch_configuration_valid(struct tb_switch *sw) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_configuration_valid(sw); + return 0; +} + +static int tb_switch_set_uuid(struct tb_switch *sw) +{ + bool uid = false; + u32 uuid[4]; + int ret; + + if (sw->uuid) + return 0; + + if (tb_switch_is_usb4(sw)) { + ret = usb4_switch_read_uid(sw, &sw->uid); + if (ret) + return ret; + uid = true; + } else { + /* + * The newer controllers include fused UUID as part of + * link controller specific registers + */ + ret = tb_lc_read_uuid(sw, uuid); + if (ret) { + if (ret != -EINVAL) + return ret; + uid = true; + } + } + + if (uid) { + /* + * ICM generates UUID based on UID and fills the upper + * two words with ones. This is not strictly following + * UUID format but we want to be compatible with it so + * we do the same here. + */ + uuid[0] = sw->uid & 0xffffffff; + uuid[1] = (sw->uid >> 32) & 0xffffffff; + uuid[2] = 0xffffffff; + uuid[3] = 0xffffffff; + } + + sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); + if (!sw->uuid) + return -ENOMEM; + return 0; +} + +static int tb_switch_add_dma_port(struct tb_switch *sw) +{ + u32 status; + int ret; + + switch (sw->generation) { + case 2: + /* Only root switch can be upgraded */ + if (tb_route(sw)) + return 0; + + fallthrough; + case 3: + case 4: + ret = tb_switch_set_uuid(sw); + if (ret) + return ret; + break; + + default: + /* + * DMA port is the only thing available when the switch + * is in safe mode. + */ + if (!sw->safe_mode) + return 0; + break; + } + + if (sw->no_nvm_upgrade) + return 0; + + if (tb_switch_is_usb4(sw)) { + ret = usb4_switch_nvm_authenticate_status(sw, &status); + if (ret) + return ret; + + if (status) { + tb_sw_info(sw, "switch flash authentication failed\n"); + nvm_set_auth_status(sw, status); + } + + return 0; + } + + /* Root switch DMA port requires running firmware */ + if (!tb_route(sw) && !tb_switch_is_icm(sw)) + return 0; + + sw->dma_port = dma_port_alloc(sw); + if (!sw->dma_port) + return 0; + + /* + * If there is status already set then authentication failed + * when the dma_port_flash_update_auth() returned. Power cycling + * is not needed (it was done already) so only thing we do here + * is to unblock runtime PM of the root port. + */ + nvm_get_auth_status(sw, &status); + if (status) { + if (!tb_route(sw)) + nvm_authenticate_complete_dma_port(sw); + return 0; + } + + /* + * Check status of the previous flash authentication. If there + * is one we need to power cycle the switch in any case to make + * it functional again. + */ + ret = dma_port_flash_update_auth_status(sw->dma_port, &status); + if (ret <= 0) + return ret; + + /* Now we can allow root port to suspend again */ + if (!tb_route(sw)) + nvm_authenticate_complete_dma_port(sw); + + if (status) { + tb_sw_info(sw, "switch flash authentication failed\n"); + nvm_set_auth_status(sw, status); + } + + tb_sw_info(sw, "power cycling the switch now\n"); + dma_port_power_cycle(sw->dma_port); + + /* + * We return error here which causes the switch adding failure. + * It should appear back after power cycle is complete. + */ + return -ESHUTDOWN; +} + +static void tb_switch_default_link_ports(struct tb_switch *sw) +{ + int i; + + for (i = 1; i <= sw->config.max_port_number; i++) { + struct tb_port *port = &sw->ports[i]; + struct tb_port *subordinate; + + if (!tb_port_is_null(port)) + continue; + + /* Check for the subordinate port */ + if (i == sw->config.max_port_number || + !tb_port_is_null(&sw->ports[i + 1])) + continue; + + /* Link them if not already done so (by DROM) */ + subordinate = &sw->ports[i + 1]; + if (!port->dual_link_port && !subordinate->dual_link_port) { + port->link_nr = 0; + port->dual_link_port = subordinate; + subordinate->link_nr = 1; + subordinate->dual_link_port = port; + + tb_sw_dbg(sw, "linked ports %d <-> %d\n", + port->port, subordinate->port); + } + } +} + +static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) +{ + const struct tb_port *up = tb_upstream_port(sw); + + if (!up->dual_link_port || !up->dual_link_port->remote) + return false; + + if (tb_switch_is_usb4(sw)) + return usb4_switch_lane_bonding_possible(sw); + return tb_lc_lane_bonding_possible(sw); +} + +static int tb_switch_update_link_attributes(struct tb_switch *sw) +{ + struct tb_port *up; + bool change = false; + int ret; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return 0; + + up = tb_upstream_port(sw); + + ret = tb_port_get_link_speed(up); + if (ret < 0) + return ret; + if (sw->link_speed != ret) + change = true; + sw->link_speed = ret; + + ret = tb_port_get_link_width(up); + if (ret < 0) + return ret; + if (sw->link_width != ret) + change = true; + sw->link_width = ret; + + /* Notify userspace that there is possible link attribute change */ + if (device_is_registered(&sw->dev) && change) + kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); + + return 0; +} + +/** + * tb_switch_lane_bonding_enable() - Enable lane bonding + * @sw: Switch to enable lane bonding + * + * Connection manager can call this function to enable lane bonding of a + * switch. If conditions are correct and both switches support the feature, + * lanes are bonded. It is safe to call this to any switch. + */ +int tb_switch_lane_bonding_enable(struct tb_switch *sw) +{ + struct tb_port *up, *down; + u64 route = tb_route(sw); + unsigned int width_mask; + int ret; + + if (!route) + return 0; + + if (!tb_switch_lane_bonding_possible(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) || + !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL)) + return 0; + + /* + * Both lanes need to be in CL0. Here we assume lane 0 already be in + * CL0 and check just for lane 1. + */ + if (tb_wait_for_port(down->dual_link_port, false) <= 0) + return -ENOTCONN; + + ret = tb_port_lane_bonding_enable(up); + if (ret) { + tb_port_warn(up, "failed to enable lane bonding\n"); + return ret; + } + + ret = tb_port_lane_bonding_enable(down); + if (ret) { + tb_port_warn(down, "failed to enable lane bonding\n"); + tb_port_lane_bonding_disable(up); + return ret; + } + + /* Any of the widths are all bonded */ + width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | + TB_LINK_WIDTH_ASYM_RX; + + ret = tb_port_wait_for_link_width(down, width_mask, 100); + if (ret) { + tb_port_warn(down, "timeout enabling lane bonding\n"); + return ret; + } + + tb_port_update_credits(down); + tb_port_update_credits(up); + tb_switch_update_link_attributes(sw); + + tb_sw_dbg(sw, "lane bonding enabled\n"); + return ret; +} + +/** + * tb_switch_lane_bonding_disable() - Disable lane bonding + * @sw: Switch whose lane bonding to disable + * + * Disables lane bonding between @sw and parent. This can be called even + * if lanes were not bonded originally. + */ +void tb_switch_lane_bonding_disable(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw)) + return; + + up = tb_upstream_port(sw); + if (!up->bonded) + return; + + down = tb_switch_downstream_port(sw); + + tb_port_lane_bonding_disable(up); + tb_port_lane_bonding_disable(down); + + /* + * It is fine if we get other errors as the router might have + * been unplugged. + */ + ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100); + if (ret == -ETIMEDOUT) + tb_sw_warn(sw, "timeout disabling lane bonding\n"); + + tb_port_update_credits(down); + tb_port_update_credits(up); + tb_switch_update_link_attributes(sw); + + tb_sw_dbg(sw, "lane bonding disabled\n"); +} + +/** + * tb_switch_configure_link() - Set link configured + * @sw: Switch whose link is configured + * + * Sets the link upstream from @sw configured (from both ends) so that + * it will not be disconnected when the domain exits sleep. Can be + * called for any switch. + * + * It is recommended that this is called after lane bonding is enabled. + * + * Returns %0 on success and negative errno in case of error. + */ +int tb_switch_configure_link(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return 0; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + ret = usb4_port_configure(up); + else + ret = tb_lc_configure_port(up); + if (ret) + return ret; + + down = up->remote; + if (tb_switch_is_usb4(down->sw)) + return usb4_port_configure(down); + return tb_lc_configure_port(down); +} + +/** + * tb_switch_unconfigure_link() - Unconfigure link + * @sw: Switch whose link is unconfigured + * + * Sets the link unconfigured so the @sw will be disconnected if the + * domain exists sleep. + */ +void tb_switch_unconfigure_link(struct tb_switch *sw) +{ + struct tb_port *up, *down; + + if (sw->is_unplugged) + return; + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + usb4_port_unconfigure(up); + else + tb_lc_unconfigure_port(up); + + down = up->remote; + if (tb_switch_is_usb4(down->sw)) + usb4_port_unconfigure(down); + else + tb_lc_unconfigure_port(down); +} + +static void tb_switch_credits_init(struct tb_switch *sw) +{ + if (tb_switch_is_icm(sw)) + return; + if (!tb_switch_is_usb4(sw)) + return; + if (usb4_switch_credits_init(sw)) + tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); +} + +static int tb_switch_port_hotplug_enable(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw)) + return 0; + + tb_switch_for_each_port(sw, port) { + int res; + + if (!port->cap_usb4) + continue; + + res = usb4_port_hotplug_enable(port); + if (res) + return res; + } + return 0; +} + +/** + * tb_switch_add() - Add a switch to the domain + * @sw: Switch to add + * + * This is the last step in adding switch to the domain. It will read + * identification information from DROM and initializes ports so that + * they can be used to connect other switches. The switch will be + * exposed to the userspace when this function successfully returns. To + * remove and release the switch, call tb_switch_remove(). + * + * Return: %0 in case of success and negative errno in case of failure + */ +int tb_switch_add(struct tb_switch *sw) +{ + int i, ret; + + /* + * Initialize DMA control port now before we read DROM. Recent + * host controllers have more complete DROM on NVM that includes + * vendor and model identification strings which we then expose + * to the userspace. NVM can be accessed through DMA + * configuration based mailbox. + */ + ret = tb_switch_add_dma_port(sw); + if (ret) { + dev_err(&sw->dev, "failed to add DMA port\n"); + return ret; + } + + if (!sw->safe_mode) { + tb_switch_credits_init(sw); + + /* read drom */ + ret = tb_drom_read(sw); + if (ret) + dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); + tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); + + ret = tb_switch_set_uuid(sw); + if (ret) { + dev_err(&sw->dev, "failed to set UUID\n"); + return ret; + } + + for (i = 0; i <= sw->config.max_port_number; i++) { + if (sw->ports[i].disabled) { + tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); + continue; + } + ret = tb_init_port(&sw->ports[i]); + if (ret) { + dev_err(&sw->dev, "failed to initialize port %d\n", i); + return ret; + } + } + + tb_check_quirks(sw); + + tb_switch_default_link_ports(sw); + + ret = tb_switch_update_link_attributes(sw); + if (ret) + return ret; + + ret = tb_switch_clx_init(sw); + if (ret) + return ret; + + ret = tb_switch_tmu_init(sw); + if (ret) + return ret; + } + + ret = tb_switch_port_hotplug_enable(sw); + if (ret) + return ret; + + ret = device_add(&sw->dev); + if (ret) { + dev_err(&sw->dev, "failed to add device: %d\n", ret); + return ret; + } + + if (tb_route(sw)) { + dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", + sw->vendor, sw->device); + if (sw->vendor_name && sw->device_name) + dev_info(&sw->dev, "%s %s\n", sw->vendor_name, + sw->device_name); + } + + ret = usb4_switch_add_ports(sw); + if (ret) { + dev_err(&sw->dev, "failed to add USB4 ports\n"); + goto err_del; + } + + ret = tb_switch_nvm_add(sw); + if (ret) { + dev_err(&sw->dev, "failed to add NVM devices\n"); + goto err_ports; + } + + /* + * Thunderbolt routers do not generate wakeups themselves but + * they forward wakeups from tunneled protocols, so enable it + * here. + */ + device_init_wakeup(&sw->dev, true); + + pm_runtime_set_active(&sw->dev); + if (sw->rpm) { + pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&sw->dev); + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_enable(&sw->dev); + pm_request_autosuspend(&sw->dev); + } + + tb_switch_debugfs_init(sw); + return 0; + +err_ports: + usb4_switch_remove_ports(sw); +err_del: + device_del(&sw->dev); + + return ret; +} + +/** + * tb_switch_remove() - Remove and release a switch + * @sw: Switch to remove + * + * This will remove the switch from the domain and release it after last + * reference count drops to zero. If there are switches connected below + * this switch, they will be removed as well. + */ +void tb_switch_remove(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_debugfs_remove(sw); + + if (sw->rpm) { + pm_runtime_get_sync(&sw->dev); + pm_runtime_disable(&sw->dev); + } + + /* port 0 is the switch itself and never has a remote */ + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) { + tb_switch_remove(port->remote->sw); + port->remote = NULL; + } else if (port->xdomain) { + tb_xdomain_remove(port->xdomain); + port->xdomain = NULL; + } + + /* Remove any downstream retimers */ + tb_retimer_remove_all(port); + } + + if (!sw->is_unplugged) + tb_plug_events_active(sw, false); + + tb_switch_nvm_remove(sw); + usb4_switch_remove_ports(sw); + + if (tb_route(sw)) + dev_info(&sw->dev, "device disconnected\n"); + device_unregister(&sw->dev); +} + +/** + * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches + * @sw: Router to mark unplugged + */ +void tb_sw_set_unplugged(struct tb_switch *sw) +{ + struct tb_port *port; + + if (sw == sw->tb->root_switch) { + tb_sw_WARN(sw, "cannot unplug root switch\n"); + return; + } + if (sw->is_unplugged) { + tb_sw_WARN(sw, "is_unplugged already set\n"); + return; + } + sw->is_unplugged = true; + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_sw_set_unplugged(port->remote->sw); + else if (port->xdomain) + port->xdomain->is_unplugged = true; + } +} + +static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) +{ + if (flags) + tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); + else + tb_sw_dbg(sw, "disabling wakeup\n"); + + if (tb_switch_is_usb4(sw)) + return usb4_switch_set_wake(sw, flags); + return tb_lc_set_wake(sw, flags); +} + +int tb_switch_resume(struct tb_switch *sw) +{ + struct tb_port *port; + int err; + + tb_sw_dbg(sw, "resuming switch\n"); + + /* + * Check for UID of the connected switches except for root + * switch which we assume cannot be removed. + */ + if (tb_route(sw)) { + u64 uid; + + /* + * Check first that we can still read the switch config + * space. It may be that there is now another domain + * connected. + */ + err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); + if (err < 0) { + tb_sw_info(sw, "switch not present anymore\n"); + return err; + } + + /* We don't have any way to confirm this was the same device */ + if (!sw->uid) + return -ENODEV; + + if (tb_switch_is_usb4(sw)) + err = usb4_switch_read_uid(sw, &uid); + else + err = tb_drom_read_uid_only(sw, &uid); + if (err) { + tb_sw_warn(sw, "uid read failed\n"); + return err; + } + if (sw->uid != uid) { + tb_sw_info(sw, + "changed while suspended (uid %#llx -> %#llx)\n", + sw->uid, uid); + return -ENODEV; + } + } + + err = tb_switch_configure(sw); + if (err) + return err; + + /* Disable wakes */ + tb_switch_set_wake(sw, 0); + + err = tb_switch_tmu_init(sw); + if (err) + return err; + + /* check for surviving downstream switches */ + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_null(port)) + continue; + + if (!tb_port_resume(port)) + continue; + + if (tb_wait_for_port(port, true) <= 0) { + tb_port_warn(port, + "lost during suspend, disconnecting\n"); + if (tb_port_has_remote(port)) + tb_sw_set_unplugged(port->remote->sw); + else if (port->xdomain) + port->xdomain->is_unplugged = true; + } else { + /* + * Always unlock the port so the downstream + * switch/domain is accessible. + */ + if (tb_port_unlock(port)) + tb_port_warn(port, "failed to unlock port\n"); + if (port->remote && tb_switch_resume(port->remote->sw)) { + tb_port_warn(port, + "lost during suspend, disconnecting\n"); + tb_sw_set_unplugged(port->remote->sw); + } + } + } + return 0; +} + +/** + * tb_switch_suspend() - Put a switch to sleep + * @sw: Switch to suspend + * @runtime: Is this runtime suspend or system sleep + * + * Suspends router and all its children. Enables wakes according to + * value of @runtime and then sets sleep bit for the router. If @sw is + * host router the domain is ready to go to sleep once this function + * returns. + */ +void tb_switch_suspend(struct tb_switch *sw, bool runtime) +{ + unsigned int flags = 0; + struct tb_port *port; + int err; + + tb_sw_dbg(sw, "suspending switch\n"); + + /* + * Actually only needed for Titan Ridge but for simplicity can be + * done for USB4 device too as CLx is re-enabled at resume. + */ + tb_switch_clx_disable(sw); + + err = tb_plug_events_active(sw, false); + if (err) + return; + + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_switch_suspend(port->remote->sw, runtime); + } + + if (runtime) { + /* Trigger wake when something is plugged in/out */ + flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; + flags |= TB_WAKE_ON_USB4; + flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; + } else if (device_may_wakeup(&sw->dev)) { + flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; + } + + tb_switch_set_wake(sw, flags); + + if (tb_switch_is_usb4(sw)) + usb4_switch_set_sleep(sw); + else + tb_lc_set_sleep(sw); +} + +/** + * tb_switch_query_dp_resource() - Query availability of DP resource + * @sw: Switch whose DP resource is queried + * @in: DP IN port + * + * Queries availability of DP resource for DP tunneling using switch + * specific means. Returns %true if resource is available. + */ +bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_query_dp_resource(sw, in); + return tb_lc_dp_sink_query(sw, in); +} + +/** + * tb_switch_alloc_dp_resource() - Allocate available DP resource + * @sw: Switch whose DP resource is allocated + * @in: DP IN port + * + * Allocates DP resource for DP tunneling. The resource must be + * available for this to succeed (see tb_switch_query_dp_resource()). + * Returns %0 in success and negative errno otherwise. + */ +int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + int ret; + + if (tb_switch_is_usb4(sw)) + ret = usb4_switch_alloc_dp_resource(sw, in); + else + ret = tb_lc_dp_sink_alloc(sw, in); + + if (ret) + tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", + in->port); + else + tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); + + return ret; +} + +/** + * tb_switch_dealloc_dp_resource() - De-allocate DP resource + * @sw: Switch whose DP resource is de-allocated + * @in: DP IN port + * + * De-allocates DP resource that was previously allocated for DP + * tunneling. + */ +void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + int ret; + + if (tb_switch_is_usb4(sw)) + ret = usb4_switch_dealloc_dp_resource(sw, in); + else + ret = tb_lc_dp_sink_dealloc(sw, in); + + if (ret) + tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", + in->port); + else + tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); +} + +struct tb_sw_lookup { + struct tb *tb; + u8 link; + u8 depth; + const uuid_t *uuid; + u64 route; +}; + +static int tb_switch_match(struct device *dev, const void *data) +{ + struct tb_switch *sw = tb_to_switch(dev); + const struct tb_sw_lookup *lookup = data; + + if (!sw) + return 0; + if (sw->tb != lookup->tb) + return 0; + + if (lookup->uuid) + return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); + + if (lookup->route) { + return sw->config.route_lo == lower_32_bits(lookup->route) && + sw->config.route_hi == upper_32_bits(lookup->route); + } + + /* Root switch is matched only by depth */ + if (!lookup->depth) + return !sw->depth; + + return sw->link == lookup->link && sw->depth == lookup->depth; +} + +/** + * tb_switch_find_by_link_depth() - Find switch by link and depth + * @tb: Domain the switch belongs + * @link: Link number the switch is connected + * @depth: Depth of the switch in link + * + * Returned switch has reference count increased so the caller needs to + * call tb_switch_put() when done with the switch. + */ +struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth) +{ + struct tb_sw_lookup lookup; + struct device *dev; + + memset(&lookup, 0, sizeof(lookup)); + lookup.tb = tb; + lookup.link = link; + lookup.depth = depth; + + dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); + if (dev) + return tb_to_switch(dev); + + return NULL; +} + +/** + * tb_switch_find_by_uuid() - Find switch by UUID + * @tb: Domain the switch belongs + * @uuid: UUID to look for + * + * Returned switch has reference count increased so the caller needs to + * call tb_switch_put() when done with the switch. + */ +struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid) +{ + struct tb_sw_lookup lookup; + struct device *dev; + + memset(&lookup, 0, sizeof(lookup)); + lookup.tb = tb; + lookup.uuid = uuid; + + dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); + if (dev) + return tb_to_switch(dev); + + return NULL; +} + +/** + * tb_switch_find_by_route() - Find switch by route string + * @tb: Domain the switch belongs + * @route: Route string to look for + * + * Returned switch has reference count increased so the caller needs to + * call tb_switch_put() when done with the switch. + */ +struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) +{ + struct tb_sw_lookup lookup; + struct device *dev; + + if (!route) + return tb_switch_get(tb->root_switch); + + memset(&lookup, 0, sizeof(lookup)); + lookup.tb = tb; + lookup.route = route; + + dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match); + if (dev) + return tb_to_switch(dev); + + return NULL; +} + +/** + * tb_switch_find_port() - return the first port of @type on @sw or NULL + * @sw: Switch to find the port from + * @type: Port type to look for + */ +struct tb_port *tb_switch_find_port(struct tb_switch *sw, + enum tb_port_type type) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (port->config.type == type) + return port; + } + + return NULL; +} + +/* + * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 + * device. For now used only for Titan Ridge. + */ +static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, + unsigned int pcie_offset, u32 value) +{ + u32 offset, command, val; + int ret; + + if (sw->generation != 3) + return -EOPNOTSUPP; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; + ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; + command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); + command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; + command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL + << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; + command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; + + ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + ret = tb_switch_wait_for_bit(sw, offset, + TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) + return -ETIMEDOUT; + + return 0; +} + +/** + * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state + * @sw: Router to enable PCIe L1 + * + * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable + * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel + * was configured. Due to Intel platforms limitation, shall be called only + * for first hop switch. + */ +int tb_switch_pcie_l1_enable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + int ret; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + /* Enable PCIe L1 enable only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + /* Write to downstream PCIe bridge #5 aka Dn4 */ + ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); + if (ret) + return ret; + + /* Write to Upstream PCIe bridge #0 aka Up0 */ + return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); +} + +/** + * tb_switch_xhci_connect() - Connect internal xHCI + * @sw: Router whose xHCI to connect + * + * Can be called to any router. For Alpine Ridge and Titan Ridge + * performs special flows that bring the xHCI functional for any device + * connected to the type-C port. Call only after PCIe tunnel has been + * established. The function only does the connect if not done already + * so can be called several times for the same router. + */ +int tb_switch_xhci_connect(struct tb_switch *sw) +{ + struct tb_port *port1, *port3; + int ret; + + if (sw->generation != 3) + return 0; + + port1 = &sw->ports[1]; + port3 = &sw->ports[3]; + + if (tb_switch_is_alpine_ridge(sw)) { + bool usb_port1, usb_port3, xhci_port1, xhci_port3; + + usb_port1 = tb_lc_is_usb_plugged(port1); + usb_port3 = tb_lc_is_usb_plugged(port3); + xhci_port1 = tb_lc_is_xhci_connected(port1); + xhci_port3 = tb_lc_is_xhci_connected(port3); + + /* Figure out correct USB port to connect */ + if (usb_port1 && !xhci_port1) { + ret = tb_lc_xhci_connect(port1); + if (ret) + return ret; + } + if (usb_port3 && !xhci_port3) + return tb_lc_xhci_connect(port3); + } else if (tb_switch_is_titan_ridge(sw)) { + ret = tb_lc_xhci_connect(port1); + if (ret) + return ret; + return tb_lc_xhci_connect(port3); + } + + return 0; +} + +/** + * tb_switch_xhci_disconnect() - Disconnect internal xHCI + * @sw: Router whose xHCI to disconnect + * + * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both + * ports. + */ +void tb_switch_xhci_disconnect(struct tb_switch *sw) +{ + if (sw->generation == 3) { + struct tb_port *port1 = &sw->ports[1]; + struct tb_port *port3 = &sw->ports[3]; + + tb_lc_xhci_disconnect(port1); + tb_port_dbg(port1, "disconnected xHCI\n"); + tb_lc_xhci_disconnect(port3); + tb_port_dbg(port3, "disconnected xHCI\n"); + } +} diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c new file mode 100644 index 0000000000..27bd6ca6f9 --- /dev/null +++ b/drivers/thunderbolt/tb.c @@ -0,0 +1,2467 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - bus logic (NHI independent) + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2019, Intel Corporation + */ + +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/platform_data/x86/apple.h> + +#include "tb.h" +#include "tb_regs.h" +#include "tunnel.h" + +#define TB_TIMEOUT 100 /* ms */ +#define MAX_GROUPS 7 /* max Group_ID is 7 */ + +/** + * struct tb_cm - Simple Thunderbolt connection manager + * @tunnel_list: List of active tunnels + * @dp_resources: List of available DP resources for DP tunneling + * @hotplug_active: tb_handle_hotplug will stop progressing plug + * events and exit if this is not set (it needs to + * acquire the lock one more time). Used to drain wq + * after cfg has been paused. + * @remove_work: Work used to remove any unplugged routers after + * runtime resume + * @groups: Bandwidth groups used in this domain. + */ +struct tb_cm { + struct list_head tunnel_list; + struct list_head dp_resources; + bool hotplug_active; + struct delayed_work remove_work; + struct tb_bandwidth_group groups[MAX_GROUPS]; +}; + +static inline struct tb *tcm_to_tb(struct tb_cm *tcm) +{ + return ((void *)tcm - sizeof(struct tb)); +} + +struct tb_hotplug_event { + struct work_struct work; + struct tb *tb; + u64 route; + u8 port; + bool unplug; +}; + +static void tb_init_bandwidth_groups(struct tb_cm *tcm) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { + struct tb_bandwidth_group *group = &tcm->groups[i]; + + group->tb = tcm_to_tb(tcm); + group->index = i + 1; + INIT_LIST_HEAD(&group->ports); + } +} + +static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group, + struct tb_port *in) +{ + if (!group || WARN_ON(in->group)) + return; + + in->group = group; + list_add_tail(&in->group_list, &group->ports); + + tb_port_dbg(in, "attached to bandwidth group %d\n", group->index); +} + +static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { + struct tb_bandwidth_group *group = &tcm->groups[i]; + + if (list_empty(&group->ports)) + return group; + } + + return NULL; +} + +static struct tb_bandwidth_group * +tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, + struct tb_port *out) +{ + struct tb_bandwidth_group *group; + struct tb_tunnel *tunnel; + + /* + * Find all DP tunnels that go through all the same USB4 links + * as this one. Because we always setup tunnels the same way we + * can just check for the routers at both ends of the tunnels + * and if they are the same we have a match. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (!tb_tunnel_is_dp(tunnel)) + continue; + + if (tunnel->src_port->sw == in->sw && + tunnel->dst_port->sw == out->sw) { + group = tunnel->src_port->group; + if (group) { + tb_bandwidth_group_attach_port(group, in); + return group; + } + } + } + + /* Pick up next available group then */ + group = tb_find_free_bandwidth_group(tcm); + if (group) + tb_bandwidth_group_attach_port(group, in); + else + tb_port_warn(in, "no available bandwidth groups\n"); + + return group; +} + +static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in, + struct tb_port *out) +{ + if (usb4_dp_port_bandwidth_mode_enabled(in)) { + int index, i; + + index = usb4_dp_port_group_id(in); + for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { + if (tcm->groups[i].index == index) { + tb_bandwidth_group_attach_port(&tcm->groups[i], in); + return; + } + } + } + + tb_attach_bandwidth_group(tcm, in, out); +} + +static void tb_detach_bandwidth_group(struct tb_port *in) +{ + struct tb_bandwidth_group *group = in->group; + + if (group) { + in->group = NULL; + list_del_init(&in->group_list); + + tb_port_dbg(in, "detached from bandwidth group %d\n", group->index); + } +} + +static void tb_handle_hotplug(struct work_struct *work); + +static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) +{ + struct tb_hotplug_event *ev; + + ev = kmalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) + return; + + ev->tb = tb; + ev->route = route; + ev->port = port; + ev->unplug = unplug; + INIT_WORK(&ev->work, tb_handle_hotplug); + queue_work(tb->wq, &ev->work); +} + +/* enumeration & hot plug handling */ + +static void tb_add_dp_resources(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_dpin(port)) + continue; + + if (!tb_switch_query_dp_resource(sw, port)) + continue; + + list_add_tail(&port->list, &tcm->dp_resources); + tb_port_dbg(port, "DP IN resource available\n"); + } +} + +static void tb_remove_dp_resources(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + struct tb_port *port, *tmp; + + /* Clear children resources first */ + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) + tb_remove_dp_resources(port->remote->sw); + } + + list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { + if (port->sw == sw) { + tb_port_dbg(port, "DP OUT resource unavailable\n"); + list_del_init(&port->list); + } + } +} + +static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *p; + + list_for_each_entry(p, &tcm->dp_resources, list) { + if (p == port) + return; + } + + tb_port_dbg(port, "DP %s resource available discovered\n", + tb_port_is_dpin(port) ? "IN" : "OUT"); + list_add_tail(&port->list, &tcm->dp_resources); +} + +static void tb_discover_dp_resources(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) + tb_discover_dp_resource(tb, tunnel->dst_port); + } +} + +/* Enables CL states up to host router */ +static int tb_enable_clx(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + unsigned int clx = TB_CL0S | TB_CL1; + const struct tb_tunnel *tunnel; + int ret; + + /* + * Currently only enable CLx for the first link. This is enough + * to allow the CPU to save energy at least on Intel hardware + * and makes it slightly simpler to implement. We may change + * this in the future to cover the whole topology if it turns + * out to be beneficial. + */ + while (sw && sw->config.depth > 1) + sw = tb_switch_parent(sw); + + if (!sw) + return 0; + + if (sw->config.depth != 1) + return 0; + + /* + * If we are re-enabling then check if there is an active DMA + * tunnel and in that case bail out. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dma(tunnel)) { + if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) + return 0; + } + } + + /* + * Initially try with CL2. If that's not supported by the + * topology try with CL0s and CL1 and then give up. + */ + ret = tb_switch_clx_enable(sw, clx | TB_CL2); + if (ret == -EOPNOTSUPP) + ret = tb_switch_clx_enable(sw, clx); + return ret == -EOPNOTSUPP ? 0 : ret; +} + +/* Disables CL states up to the host router */ +static void tb_disable_clx(struct tb_switch *sw) +{ + do { + if (tb_switch_clx_disable(sw) < 0) + tb_sw_warn(sw, "failed to disable CL states\n"); + sw = tb_switch_parent(sw); + } while (sw); +} + +static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data) +{ + struct tb_switch *sw; + + sw = tb_to_switch(dev); + if (!sw) + return 0; + + if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { + enum tb_switch_tmu_mode mode; + int ret; + + if (tb_switch_clx_is_enabled(sw, TB_CL1)) + mode = TB_SWITCH_TMU_MODE_HIFI_UNI; + else + mode = TB_SWITCH_TMU_MODE_HIFI_BI; + + ret = tb_switch_tmu_configure(sw, mode); + if (ret) + return ret; + + return tb_switch_tmu_enable(sw); + } + + return 0; +} + +static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel) +{ + struct tb_switch *sw; + + if (!tunnel) + return; + + /* + * Once first DP tunnel is established we change the TMU + * accuracy of first depth child routers (and the host router) + * to the highest. This is needed for the DP tunneling to work + * but also allows CL0s. + * + * If both routers are v2 then we don't need to do anything as + * they are using enhanced TMU mode that allows all CLx. + */ + sw = tunnel->tb->root_switch; + device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); +} + +static int tb_enable_tmu(struct tb_switch *sw) +{ + int ret; + + /* + * If both routers at the end of the link are v2 we simply + * enable the enhanched uni-directional mode. That covers all + * the CL states. For v1 and before we need to use the normal + * rate to allow CL1 (when supported). Otherwise we keep the TMU + * running at the highest accuracy. + */ + ret = tb_switch_tmu_configure(sw, + TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI); + if (ret == -EOPNOTSUPP) { + if (tb_switch_clx_is_enabled(sw, TB_CL1)) + ret = tb_switch_tmu_configure(sw, + TB_SWITCH_TMU_MODE_LOWRES); + else + ret = tb_switch_tmu_configure(sw, + TB_SWITCH_TMU_MODE_HIFI_BI); + } + if (ret) + return ret; + + /* If it is already enabled in correct mode, don't touch it */ + if (tb_switch_tmu_is_enabled(sw)) + return 0; + + ret = tb_switch_tmu_disable(sw); + if (ret) + return ret; + + ret = tb_switch_tmu_post_time(sw); + if (ret) + return ret; + + return tb_switch_tmu_enable(sw); +} + +static void tb_switch_discover_tunnels(struct tb_switch *sw, + struct list_head *list, + bool alloc_hopids) +{ + struct tb *tb = sw->tb; + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + struct tb_tunnel *tunnel = NULL; + + switch (port->config.type) { + case TB_TYPE_DP_HDMI_IN: + tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); + tb_increase_tmu_accuracy(tunnel); + break; + + case TB_TYPE_PCIE_DOWN: + tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); + break; + + case TB_TYPE_USB3_DOWN: + tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); + break; + + default: + break; + } + + if (tunnel) + list_add_tail(&tunnel->list, list); + } + + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) { + tb_switch_discover_tunnels(port->remote->sw, list, + alloc_hopids); + } + } +} + +static void tb_discover_tunnels(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tb_tunnel_is_pci(tunnel)) { + struct tb_switch *parent = tunnel->dst_port->sw; + + while (parent != tunnel->src_port->sw) { + parent->boot = true; + parent = tb_switch_parent(parent); + } + } else if (tb_tunnel_is_dp(tunnel)) { + struct tb_port *in = tunnel->src_port; + struct tb_port *out = tunnel->dst_port; + + /* Keep the domain from powering down */ + pm_runtime_get_sync(&in->sw->dev); + pm_runtime_get_sync(&out->sw->dev); + + tb_discover_bandwidth_group(tcm, in, out); + } + } +} + +static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) +{ + if (tb_switch_is_usb4(port->sw)) + return usb4_port_configure_xdomain(port, xd); + return tb_lc_configure_xdomain(port); +} + +static void tb_port_unconfigure_xdomain(struct tb_port *port) +{ + if (tb_switch_is_usb4(port->sw)) + usb4_port_unconfigure_xdomain(port); + else + tb_lc_unconfigure_xdomain(port); + + tb_port_enable(port->dual_link_port); +} + +static void tb_scan_xdomain(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + struct tb_xdomain *xd; + u64 route; + + if (!tb_is_xdomain_enabled()) + return; + + route = tb_downstream_route(port); + xd = tb_xdomain_find_by_route(tb, route); + if (xd) { + tb_xdomain_put(xd); + return; + } + + xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, + NULL); + if (xd) { + tb_port_at(route, sw)->xdomain = xd; + tb_port_configure_xdomain(port, xd); + tb_xdomain_add(xd); + } +} + +/** + * tb_find_unused_port() - return the first inactive port on @sw + * @sw: Switch to find the port on + * @type: Port type to look for + */ +static struct tb_port *tb_find_unused_port(struct tb_switch *sw, + enum tb_port_type type) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (tb_is_upstream_port(port)) + continue; + if (port->config.type != type) + continue; + if (!port->cap_adap) + continue; + if (tb_port_is_enabled(port)) + continue; + return port; + } + return NULL; +} + +static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, + const struct tb_port *port) +{ + struct tb_port *down; + + down = usb4_switch_map_usb3_down(sw, port); + if (down && !tb_usb3_port_is_enabled(down)) + return down; + return NULL; +} + +static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tunnel->type == type && + ((src_port && src_port == tunnel->src_port) || + (dst_port && dst_port == tunnel->dst_port))) { + return tunnel; + } + } + + return NULL; +} + +static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_port *port, *usb3_down; + struct tb_switch *sw; + + /* Pick the router that is deepest in the topology */ + if (dst_port->sw->config.depth > src_port->sw->config.depth) + sw = dst_port->sw; + else + sw = src_port->sw; + + /* Can't be the host router */ + if (sw == tb->root_switch) + return NULL; + + /* Find the downstream USB4 port that leads to this router */ + port = tb_port_at(tb_route(sw), tb->root_switch); + /* Find the corresponding host router USB3 downstream port */ + usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); + if (!usb3_down) + return NULL; + + return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); +} + +static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, int *available_up, int *available_down) +{ + int usb3_consumed_up, usb3_consumed_down, ret; + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + struct tb_port *port; + + tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n", + tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw), + dst_port->port); + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + if (tunnel && tunnel->src_port != src_port && + tunnel->dst_port != dst_port) { + ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, + &usb3_consumed_down); + if (ret) + return ret; + } else { + usb3_consumed_up = 0; + usb3_consumed_down = 0; + } + + /* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */ + *available_up = *available_down = 120000; + + /* Find the minimum available bandwidth over all links */ + tb_for_each_port_on_path(src_port, dst_port, port) { + int link_speed, link_width, up_bw, down_bw; + + if (!tb_port_is_null(port)) + continue; + + if (tb_is_upstream_port(port)) { + link_speed = port->sw->link_speed; + /* + * sw->link_width is from upstream perspective + * so we use the opposite for downstream of the + * host router. + */ + if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { + up_bw = link_speed * 3 * 1000; + down_bw = link_speed * 1 * 1000; + } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { + up_bw = link_speed * 1 * 1000; + down_bw = link_speed * 3 * 1000; + } else { + up_bw = link_speed * port->sw->link_width * 1000; + down_bw = up_bw; + } + } else { + link_speed = tb_port_get_link_speed(port); + if (link_speed < 0) + return link_speed; + + link_width = tb_port_get_link_width(port); + if (link_width < 0) + return link_width; + + if (link_width == TB_LINK_WIDTH_ASYM_TX) { + up_bw = link_speed * 1 * 1000; + down_bw = link_speed * 3 * 1000; + } else if (link_width == TB_LINK_WIDTH_ASYM_RX) { + up_bw = link_speed * 3 * 1000; + down_bw = link_speed * 1 * 1000; + } else { + up_bw = link_speed * link_width * 1000; + down_bw = up_bw; + } + } + + /* Leave 10% guard band */ + up_bw -= up_bw / 10; + down_bw -= down_bw / 10; + + tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw, + down_bw); + + /* + * Find all DP tunnels that cross the port and reduce + * their consumed bandwidth from the available. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + int dp_consumed_up, dp_consumed_down; + + if (tb_tunnel_is_invalid(tunnel)) + continue; + + if (!tb_tunnel_is_dp(tunnel)) + continue; + + if (!tb_tunnel_port_on_path(tunnel, port)) + continue; + + /* + * Ignore the DP tunnel between src_port and + * dst_port because it is the same tunnel and we + * may be re-calculating estimated bandwidth. + */ + if (tunnel->src_port == src_port && + tunnel->dst_port == dst_port) + continue; + + ret = tb_tunnel_consumed_bandwidth(tunnel, + &dp_consumed_up, + &dp_consumed_down); + if (ret) + return ret; + + up_bw -= dp_consumed_up; + down_bw -= dp_consumed_down; + } + + /* + * If USB3 is tunneled from the host router down to the + * branch leading to port we need to take USB3 consumed + * bandwidth into account regardless whether it actually + * crosses the port. + */ + up_bw -= usb3_consumed_up; + down_bw -= usb3_consumed_down; + + if (up_bw < *available_up) + *available_up = up_bw; + if (down_bw < *available_down) + *available_down = down_bw; + } + + if (*available_up < 0) + *available_up = 0; + if (*available_down < 0) + *available_down = 0; + + return 0; +} + +static int tb_release_unused_usb3_bandwidth(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_tunnel *tunnel; + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; +} + +static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port) +{ + int ret, available_up, available_down; + struct tb_tunnel *tunnel; + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + if (!tunnel) + return; + + tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); + + /* + * Calculate available bandwidth for the first hop USB3 tunnel. + * That determines the whole USB3 bandwidth for this branch. + */ + ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, + &available_up, &available_down); + if (ret) { + tb_warn(tb, "failed to calculate available bandwidth\n"); + return; + } + + tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", + available_up, available_down); + + tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); +} + +static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + int ret, available_up, available_down; + struct tb_port *up, *down, *port; + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + if (!tb_acpi_may_tunnel_usb3()) { + tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); + return 0; + } + + up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); + if (!up) + return 0; + + if (!sw->link_usb4) + return 0; + + /* + * Look up available down port. Since we are chaining it should + * be found right above this switch. + */ + port = tb_switch_downstream_port(sw); + down = tb_find_usb3_down(parent, port); + if (!down) + return 0; + + if (tb_route(parent)) { + struct tb_port *parent_up; + /* + * Check first that the parent switch has its upstream USB3 + * port enabled. Otherwise the chain is not complete and + * there is no point setting up a new tunnel. + */ + parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); + if (!parent_up || !tb_port_is_enabled(parent_up)) + return 0; + + /* Make all unused bandwidth available for the new tunnel */ + ret = tb_release_unused_usb3_bandwidth(tb, down, up); + if (ret) + return ret; + } + + ret = tb_available_bandwidth(tb, down, up, &available_up, + &available_down); + if (ret) + goto err_reclaim; + + tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", + available_up, available_down); + + tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, + available_down); + if (!tunnel) { + ret = -ENOMEM; + goto err_reclaim; + } + + if (tb_tunnel_activate(tunnel)) { + tb_port_info(up, + "USB3 tunnel activation failed, aborting\n"); + ret = -EIO; + goto err_free; + } + + list_add_tail(&tunnel->list, &tcm->tunnel_list); + if (tb_route(parent)) + tb_reclaim_usb3_bandwidth(tb, down, up); + + return 0; + +err_free: + tb_tunnel_free(tunnel); +err_reclaim: + if (tb_route(parent)) + tb_reclaim_usb3_bandwidth(tb, down, up); + + return ret; +} + +static int tb_create_usb3_tunnels(struct tb_switch *sw) +{ + struct tb_port *port; + int ret; + + if (!tb_acpi_may_tunnel_usb3()) + return 0; + + if (tb_route(sw)) { + ret = tb_tunnel_usb3(sw->tb, sw); + if (ret) + return ret; + } + + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port)) + continue; + ret = tb_create_usb3_tunnels(port->remote->sw); + if (ret) + return ret; + } + + return 0; +} + +static void tb_scan_port(struct tb_port *port); + +/* + * tb_scan_switch() - scan for and initialize downstream switches + */ +static void tb_scan_switch(struct tb_switch *sw) +{ + struct tb_port *port; + + pm_runtime_get_sync(&sw->dev); + + tb_switch_for_each_port(sw, port) + tb_scan_port(port); + + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); +} + +/* + * tb_scan_port() - check for and initialize switches below port + */ +static void tb_scan_port(struct tb_port *port) +{ + struct tb_cm *tcm = tb_priv(port->sw->tb); + struct tb_port *upstream_port; + bool discovery = false; + struct tb_switch *sw; + + if (tb_is_upstream_port(port)) + return; + + if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 && + !tb_dp_port_is_enabled(port)) { + tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n"); + tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, + false); + return; + } + + if (port->config.type != TB_TYPE_PORT) + return; + if (port->dual_link_port && port->link_nr) + return; /* + * Downstream switch is reachable through two ports. + * Only scan on the primary port (link_nr == 0). + */ + + if (port->usb4) + pm_runtime_get_sync(&port->usb4->dev); + + if (tb_wait_for_port(port, false) <= 0) + goto out_rpm_put; + if (port->remote) { + tb_port_dbg(port, "port already has a remote\n"); + goto out_rpm_put; + } + + tb_retimer_scan(port, true); + + sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, + tb_downstream_route(port)); + if (IS_ERR(sw)) { + /* + * If there is an error accessing the connected switch + * it may be connected to another domain. Also we allow + * the other domain to be connected to a max depth switch. + */ + if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) + tb_scan_xdomain(port); + goto out_rpm_put; + } + + if (tb_switch_configure(sw)) { + tb_switch_put(sw); + goto out_rpm_put; + } + + /* + * If there was previously another domain connected remove it + * first. + */ + if (port->xdomain) { + tb_xdomain_remove(port->xdomain); + tb_port_unconfigure_xdomain(port); + port->xdomain = NULL; + } + + /* + * Do not send uevents until we have discovered all existing + * tunnels and know which switches were authorized already by + * the boot firmware. + */ + if (!tcm->hotplug_active) { + dev_set_uevent_suppress(&sw->dev, true); + discovery = true; + } + + /* + * At the moment Thunderbolt 2 and beyond (devices with LC) we + * can support runtime PM. + */ + sw->rpm = sw->generation > 1; + + if (tb_switch_add(sw)) { + tb_switch_put(sw); + goto out_rpm_put; + } + + /* Link the switches using both links if available */ + upstream_port = tb_upstream_port(sw); + port->remote = upstream_port; + upstream_port->remote = port; + if (port->dual_link_port && upstream_port->dual_link_port) { + port->dual_link_port->remote = upstream_port->dual_link_port; + upstream_port->dual_link_port->remote = port->dual_link_port; + } + + /* Enable lane bonding if supported */ + tb_switch_lane_bonding_enable(sw); + /* Set the link configured */ + tb_switch_configure_link(sw); + /* + * CL0s and CL1 are enabled and supported together. + * Silently ignore CLx enabling in case CLx is not supported. + */ + if (discovery) + tb_sw_dbg(sw, "discovery, not touching CL states\n"); + else if (tb_enable_clx(sw)) + tb_sw_warn(sw, "failed to enable CL states\n"); + + if (tb_enable_tmu(sw)) + tb_sw_warn(sw, "failed to enable TMU\n"); + + /* + * Configuration valid needs to be set after the TMU has been + * enabled for the upstream port of the router so we do it here. + */ + tb_switch_configuration_valid(sw); + + /* Scan upstream retimers */ + tb_retimer_scan(upstream_port, true); + + /* + * Create USB 3.x tunnels only when the switch is plugged to the + * domain. This is because we scan the domain also during discovery + * and want to discover existing USB 3.x tunnels before we create + * any new. + */ + if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) + tb_sw_warn(sw, "USB3 tunnel creation failed\n"); + + tb_add_dp_resources(sw); + tb_scan_switch(sw); + +out_rpm_put: + if (port->usb4) { + pm_runtime_mark_last_busy(&port->usb4->dev); + pm_runtime_put_autosuspend(&port->usb4->dev); + } +} + +static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) +{ + struct tb_port *src_port, *dst_port; + struct tb *tb; + + if (!tunnel) + return; + + tb_tunnel_deactivate(tunnel); + list_del(&tunnel->list); + + tb = tunnel->tb; + src_port = tunnel->src_port; + dst_port = tunnel->dst_port; + + switch (tunnel->type) { + case TB_TUNNEL_DP: + tb_detach_bandwidth_group(src_port); + /* + * In case of DP tunnel make sure the DP IN resource is + * deallocated properly. + */ + tb_switch_dealloc_dp_resource(src_port->sw, src_port); + /* Now we can allow the domain to runtime suspend again */ + pm_runtime_mark_last_busy(&dst_port->sw->dev); + pm_runtime_put_autosuspend(&dst_port->sw->dev); + pm_runtime_mark_last_busy(&src_port->sw->dev); + pm_runtime_put_autosuspend(&src_port->sw->dev); + fallthrough; + + case TB_TUNNEL_USB3: + tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); + break; + + default: + /* + * PCIe and DMA tunnels do not consume guaranteed + * bandwidth. + */ + break; + } + + tb_tunnel_free(tunnel); +} + +/* + * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away + */ +static void tb_free_invalid_tunnels(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + struct tb_tunnel *n; + + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + if (tb_tunnel_is_invalid(tunnel)) + tb_deactivate_and_free_tunnel(tunnel); + } +} + +/* + * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches + */ +static void tb_free_unplugged_children(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port)) + continue; + + if (port->remote->sw->is_unplugged) { + tb_retimer_remove_all(port); + tb_remove_dp_resources(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); + tb_switch_lane_bonding_disable(port->remote->sw); + tb_switch_remove(port->remote->sw); + port->remote = NULL; + if (port->dual_link_port) + port->dual_link_port->remote = NULL; + } else { + tb_free_unplugged_children(port->remote->sw); + } + } +} + +static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, + const struct tb_port *port) +{ + struct tb_port *down = NULL; + + /* + * To keep plugging devices consistently in the same PCIe + * hierarchy, do mapping here for switch downstream PCIe ports. + */ + if (tb_switch_is_usb4(sw)) { + down = usb4_switch_map_pcie_down(sw, port); + } else if (!tb_route(sw)) { + int phy_port = tb_phy_port_from_link(port->port); + int index; + + /* + * Hard-coded Thunderbolt port to PCIe down port mapping + * per controller. + */ + if (tb_switch_is_cactus_ridge(sw) || + tb_switch_is_alpine_ridge(sw)) + index = !phy_port ? 6 : 7; + else if (tb_switch_is_falcon_ridge(sw)) + index = !phy_port ? 6 : 8; + else if (tb_switch_is_titan_ridge(sw)) + index = !phy_port ? 8 : 9; + else + goto out; + + /* Validate the hard-coding */ + if (WARN_ON(index > sw->config.max_port_number)) + goto out; + + down = &sw->ports[index]; + } + + if (down) { + if (WARN_ON(!tb_port_is_pcie_down(down))) + goto out; + if (tb_pci_port_is_enabled(down)) + goto out; + + return down; + } + +out: + return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); +} + +static void +tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group) +{ + struct tb_tunnel *first_tunnel; + struct tb *tb = group->tb; + struct tb_port *in; + int ret; + + tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n", + group->index); + + first_tunnel = NULL; + list_for_each_entry(in, &group->ports, group_list) { + int estimated_bw, estimated_up, estimated_down; + struct tb_tunnel *tunnel; + struct tb_port *out; + + if (!usb4_dp_port_bandwidth_mode_enabled(in)) + continue; + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); + if (WARN_ON(!tunnel)) + break; + + if (!first_tunnel) { + /* + * Since USB3 bandwidth is shared by all DP + * tunnels under the host router USB4 port, even + * if they do not begin from the host router, we + * can release USB3 bandwidth just once and not + * for each tunnel separately. + */ + first_tunnel = tunnel; + ret = tb_release_unused_usb3_bandwidth(tb, + first_tunnel->src_port, first_tunnel->dst_port); + if (ret) { + tb_port_warn(in, + "failed to release unused bandwidth\n"); + break; + } + } + + out = tunnel->dst_port; + ret = tb_available_bandwidth(tb, in, out, &estimated_up, + &estimated_down); + if (ret) { + tb_port_warn(in, + "failed to re-calculate estimated bandwidth\n"); + break; + } + + /* + * Estimated bandwidth includes: + * - already allocated bandwidth for the DP tunnel + * - available bandwidth along the path + * - bandwidth allocated for USB 3.x but not used. + */ + tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n", + estimated_up, estimated_down); + + if (in->sw->config.depth < out->sw->config.depth) + estimated_bw = estimated_down; + else + estimated_bw = estimated_up; + + if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw)) + tb_port_warn(in, "failed to update estimated bandwidth\n"); + } + + if (first_tunnel) + tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port, + first_tunnel->dst_port); + + tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index); +} + +static void tb_recalc_estimated_bandwidth(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + int i; + + tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n"); + + for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) { + struct tb_bandwidth_group *group = &tcm->groups[i]; + + if (!list_empty(&group->ports)) + tb_recalc_estimated_bandwidth_for_group(group); + } + + tb_dbg(tb, "bandwidth re-calculation done\n"); +} + +static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) +{ + struct tb_port *host_port, *port; + struct tb_cm *tcm = tb_priv(tb); + + host_port = tb_route(in->sw) ? + tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; + + list_for_each_entry(port, &tcm->dp_resources, list) { + if (!tb_port_is_dpout(port)) + continue; + + if (tb_port_is_enabled(port)) { + tb_port_dbg(port, "DP OUT in use\n"); + continue; + } + + tb_port_dbg(port, "DP OUT available\n"); + + /* + * Keep the DP tunnel under the topology starting from + * the same host router downstream port. + */ + if (host_port && tb_route(port->sw)) { + struct tb_port *p; + + p = tb_port_at(tb_route(port->sw), tb->root_switch); + if (p != host_port) + continue; + } + + return port; + } + + return NULL; +} + +static void tb_tunnel_dp(struct tb *tb) +{ + int available_up, available_down, ret, link_nr; + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *port, *in, *out; + struct tb_tunnel *tunnel; + + if (!tb_acpi_may_tunnel_dp()) { + tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); + return; + } + + /* + * Find pair of inactive DP IN and DP OUT adapters and then + * establish a DP tunnel between them. + */ + tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); + + in = NULL; + out = NULL; + list_for_each_entry(port, &tcm->dp_resources, list) { + if (!tb_port_is_dpin(port)) + continue; + + if (tb_port_is_enabled(port)) { + tb_port_dbg(port, "DP IN in use\n"); + continue; + } + + tb_port_dbg(port, "DP IN available\n"); + + out = tb_find_dp_out(tb, port); + if (out) { + in = port; + break; + } + } + + if (!in) { + tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); + return; + } + if (!out) { + tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); + return; + } + + /* + * This is only applicable to links that are not bonded (so + * when Thunderbolt 1 hardware is involved somewhere in the + * topology). For these try to share the DP bandwidth between + * the two lanes. + */ + link_nr = 1; + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) { + link_nr = 0; + break; + } + } + + /* + * DP stream needs the domain to be active so runtime resume + * both ends of the tunnel. + * + * This should bring the routers in the middle active as well + * and keeps the domain from runtime suspending while the DP + * tunnel is active. + */ + pm_runtime_get_sync(&in->sw->dev); + pm_runtime_get_sync(&out->sw->dev); + + if (tb_switch_alloc_dp_resource(in->sw, in)) { + tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); + goto err_rpm_put; + } + + if (!tb_attach_bandwidth_group(tcm, in, out)) + goto err_dealloc_dp; + + /* Make all unused USB3 bandwidth available for the new DP tunnel */ + ret = tb_release_unused_usb3_bandwidth(tb, in, out); + if (ret) { + tb_warn(tb, "failed to release unused bandwidth\n"); + goto err_detach_group; + } + + ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); + if (ret) + goto err_reclaim_usb; + + tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", + available_up, available_down); + + tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, + available_down); + if (!tunnel) { + tb_port_dbg(out, "could not allocate DP tunnel\n"); + goto err_reclaim_usb; + } + + if (tb_tunnel_activate(tunnel)) { + tb_port_info(out, "DP tunnel activation failed, aborting\n"); + goto err_free; + } + + list_add_tail(&tunnel->list, &tcm->tunnel_list); + tb_reclaim_usb3_bandwidth(tb, in, out); + + /* Update the domain with the new bandwidth estimation */ + tb_recalc_estimated_bandwidth(tb); + + /* + * In case of DP tunnel exists, change host router's 1st children + * TMU mode to HiFi for CL0s to work. + */ + tb_increase_tmu_accuracy(tunnel); + return; + +err_free: + tb_tunnel_free(tunnel); +err_reclaim_usb: + tb_reclaim_usb3_bandwidth(tb, in, out); +err_detach_group: + tb_detach_bandwidth_group(in); +err_dealloc_dp: + tb_switch_dealloc_dp_resource(in->sw, in); +err_rpm_put: + pm_runtime_mark_last_busy(&out->sw->dev); + pm_runtime_put_autosuspend(&out->sw->dev); + pm_runtime_mark_last_busy(&in->sw->dev); + pm_runtime_put_autosuspend(&in->sw->dev); +} + +static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) +{ + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + if (tb_port_is_dpin(port)) { + tb_port_dbg(port, "DP IN resource unavailable\n"); + in = port; + out = NULL; + } else { + tb_port_dbg(port, "DP OUT resource unavailable\n"); + in = NULL; + out = port; + } + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); + tb_deactivate_and_free_tunnel(tunnel); + list_del_init(&port->list); + + /* + * See if there is another DP OUT port that can be used for + * to create another tunnel. + */ + tb_recalc_estimated_bandwidth(tb); + tb_tunnel_dp(tb); +} + +static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *p; + + if (tb_port_is_enabled(port)) + return; + + list_for_each_entry(p, &tcm->dp_resources, list) { + if (p == port) + return; + } + + tb_port_dbg(port, "DP %s resource available\n", + tb_port_is_dpin(port) ? "IN" : "OUT"); + list_add_tail(&port->list, &tcm->dp_resources); + + /* Look for suitable DP IN <-> DP OUT pairs now */ + tb_tunnel_dp(tb); +} + +static void tb_disconnect_and_release_dp(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + + /* + * Tear down all DP tunnels and release their resources. They + * will be re-established after resume based on plug events. + */ + list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) + tb_deactivate_and_free_tunnel(tunnel); + } + + while (!list_empty(&tcm->dp_resources)) { + struct tb_port *port; + + port = list_first_entry(&tcm->dp_resources, + struct tb_port, list); + list_del_init(&port->list); + } +} + +static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) +{ + struct tb_tunnel *tunnel; + struct tb_port *up; + + up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); + if (WARN_ON(!up)) + return -ENODEV; + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); + if (WARN_ON(!tunnel)) + return -ENODEV; + + tb_switch_xhci_disconnect(sw); + + tb_tunnel_deactivate(tunnel); + list_del(&tunnel->list); + tb_tunnel_free(tunnel); + return 0; +} + +static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) +{ + struct tb_port *up, *down, *port; + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); + if (!up) + return 0; + + /* + * Look up available down port. Since we are chaining it should + * be found right above this switch. + */ + port = tb_switch_downstream_port(sw); + down = tb_find_pcie_down(tb_switch_parent(sw), port); + if (!down) + return 0; + + tunnel = tb_tunnel_alloc_pci(tb, up, down); + if (!tunnel) + return -ENOMEM; + + if (tb_tunnel_activate(tunnel)) { + tb_port_info(up, + "PCIe tunnel activation failed, aborting\n"); + tb_tunnel_free(tunnel); + return -EIO; + } + + /* + * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it + * here. + */ + if (tb_switch_pcie_l1_enable(sw)) + tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); + + if (tb_switch_xhci_connect(sw)) + tb_sw_warn(sw, "failed to connect xHCI\n"); + + list_add_tail(&tunnel->list, &tcm->tunnel_list); + return 0; +} + +static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *nhi_port, *dst_port; + struct tb_tunnel *tunnel; + struct tb_switch *sw; + int ret; + + sw = tb_to_switch(xd->dev.parent); + dst_port = tb_port_at(xd->route, sw); + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); + + mutex_lock(&tb->lock); + + /* + * When tunneling DMA paths the link should not enter CL states + * so disable them now. + */ + tb_disable_clx(sw); + + tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, + transmit_ring, receive_path, receive_ring); + if (!tunnel) { + ret = -ENOMEM; + goto err_clx; + } + + if (tb_tunnel_activate(tunnel)) { + tb_port_info(nhi_port, + "DMA tunnel activation failed, aborting\n"); + ret = -EIO; + goto err_free; + } + + list_add_tail(&tunnel->list, &tcm->tunnel_list); + mutex_unlock(&tb->lock); + return 0; + +err_free: + tb_tunnel_free(tunnel); +err_clx: + tb_enable_clx(sw); + mutex_unlock(&tb->lock); + + return ret; +} + +static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *nhi_port, *dst_port; + struct tb_tunnel *tunnel, *n; + struct tb_switch *sw; + + sw = tb_to_switch(xd->dev.parent); + dst_port = tb_port_at(xd->route, sw); + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); + + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + if (!tb_tunnel_is_dma(tunnel)) + continue; + if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) + continue; + + if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, + receive_path, receive_ring)) + tb_deactivate_and_free_tunnel(tunnel); + } + + /* + * Try to re-enable CL states now, it is OK if this fails + * because we may still have another DMA tunnel active through + * the same host router USB4 downstream port. + */ + tb_enable_clx(sw); +} + +static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) +{ + if (!xd->is_unplugged) { + mutex_lock(&tb->lock); + __tb_disconnect_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, + receive_ring); + mutex_unlock(&tb->lock); + } + return 0; +} + +/* hotplug handling */ + +/* + * tb_handle_hotplug() - handle hotplug event + * + * Executes on tb->wq. + */ +static void tb_handle_hotplug(struct work_struct *work) +{ + struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); + struct tb *tb = ev->tb; + struct tb_cm *tcm = tb_priv(tb); + struct tb_switch *sw; + struct tb_port *port; + + /* Bring the domain back from sleep if it was suspended */ + pm_runtime_get_sync(&tb->dev); + + mutex_lock(&tb->lock); + if (!tcm->hotplug_active) + goto out; /* during init, suspend or shutdown */ + + sw = tb_switch_find_by_route(tb, ev->route); + if (!sw) { + tb_warn(tb, + "hotplug event from non existent switch %llx:%x (unplug: %d)\n", + ev->route, ev->port, ev->unplug); + goto out; + } + if (ev->port > sw->config.max_port_number) { + tb_warn(tb, + "hotplug event from non existent port %llx:%x (unplug: %d)\n", + ev->route, ev->port, ev->unplug); + goto put_sw; + } + port = &sw->ports[ev->port]; + if (tb_is_upstream_port(port)) { + tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n", + ev->route, ev->port, ev->unplug); + goto put_sw; + } + + pm_runtime_get_sync(&sw->dev); + + if (ev->unplug) { + tb_retimer_remove_all(port); + + if (tb_port_has_remote(port)) { + tb_port_dbg(port, "switch unplugged\n"); + tb_sw_set_unplugged(port->remote->sw); + tb_free_invalid_tunnels(tb); + tb_remove_dp_resources(port->remote->sw); + tb_switch_tmu_disable(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); + tb_switch_lane_bonding_disable(port->remote->sw); + tb_switch_remove(port->remote->sw); + port->remote = NULL; + if (port->dual_link_port) + port->dual_link_port->remote = NULL; + /* Maybe we can create another DP tunnel */ + tb_recalc_estimated_bandwidth(tb); + tb_tunnel_dp(tb); + } else if (port->xdomain) { + struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); + + tb_port_dbg(port, "xdomain unplugged\n"); + /* + * Service drivers are unbound during + * tb_xdomain_remove() so setting XDomain as + * unplugged here prevents deadlock if they call + * tb_xdomain_disable_paths(). We will tear down + * all the tunnels below. + */ + xd->is_unplugged = true; + tb_xdomain_remove(xd); + port->xdomain = NULL; + __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); + tb_xdomain_put(xd); + tb_port_unconfigure_xdomain(port); + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { + tb_dp_resource_unavailable(tb, port); + } else if (!port->port) { + tb_sw_dbg(sw, "xHCI disconnect request\n"); + tb_switch_xhci_disconnect(sw); + } else { + tb_port_dbg(port, + "got unplug event for disconnected port, ignoring\n"); + } + } else if (port->remote) { + tb_port_dbg(port, "got plug event for connected port, ignoring\n"); + } else if (!port->port && sw->authorized) { + tb_sw_dbg(sw, "xHCI connect request\n"); + tb_switch_xhci_connect(sw); + } else { + if (tb_port_is_null(port)) { + tb_port_dbg(port, "hotplug: scanning\n"); + tb_scan_port(port); + if (!port->remote) + tb_port_dbg(port, "hotplug: no switch found\n"); + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { + tb_dp_resource_available(tb, port); + } + } + + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + +put_sw: + tb_switch_put(sw); +out: + mutex_unlock(&tb->lock); + + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + + kfree(ev); +} + +static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up, + int *requested_down) +{ + int allocated_up, allocated_down, available_up, available_down, ret; + int requested_up_corrected, requested_down_corrected, granularity; + int max_up, max_down, max_up_rounded, max_down_rounded; + struct tb *tb = tunnel->tb; + struct tb_port *in, *out; + + ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down); + if (ret) + return ret; + + in = tunnel->src_port; + out = tunnel->dst_port; + + tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n", + allocated_up, allocated_down); + + /* + * If we get rounded up request from graphics side, say HBR2 x 4 + * that is 17500 instead of 17280 (this is because of the + * granularity), we allow it too. Here the graphics has already + * negotiated with the DPRX the maximum possible rates (which is + * 17280 in this case). + * + * Since the link cannot go higher than 17280 we use that in our + * calculations but the DP IN adapter Allocated BW write must be + * the same value (17500) otherwise the adapter will mark it as + * failed for graphics. + */ + ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down); + if (ret) + return ret; + + ret = usb4_dp_port_granularity(in); + if (ret < 0) + return ret; + granularity = ret; + + max_up_rounded = roundup(max_up, granularity); + max_down_rounded = roundup(max_down, granularity); + + /* + * This will "fix" the request down to the maximum supported + * rate * lanes if it is at the maximum rounded up level. + */ + requested_up_corrected = *requested_up; + if (requested_up_corrected == max_up_rounded) + requested_up_corrected = max_up; + else if (requested_up_corrected < 0) + requested_up_corrected = 0; + requested_down_corrected = *requested_down; + if (requested_down_corrected == max_down_rounded) + requested_down_corrected = max_down; + else if (requested_down_corrected < 0) + requested_down_corrected = 0; + + tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n", + requested_up_corrected, requested_down_corrected); + + if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) || + (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) { + tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n", + requested_up_corrected, requested_down_corrected, + max_up_rounded, max_down_rounded); + return -ENOBUFS; + } + + if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) || + (*requested_down >= 0 && requested_down_corrected <= allocated_down)) { + /* + * If requested bandwidth is less or equal than what is + * currently allocated to that tunnel we simply change + * the reservation of the tunnel. Since all the tunnels + * going out from the same USB4 port are in the same + * group the released bandwidth will be taken into + * account for the other tunnels automatically below. + */ + return tb_tunnel_alloc_bandwidth(tunnel, requested_up, + requested_down); + } + + /* + * More bandwidth is requested. Release all the potential + * bandwidth from USB3 first. + */ + ret = tb_release_unused_usb3_bandwidth(tb, in, out); + if (ret) + return ret; + + /* + * Then go over all tunnels that cross the same USB4 ports (they + * are also in the same group but we use the same function here + * that we use with the normal bandwidth allocation). + */ + ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down); + if (ret) + goto reclaim; + + tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n", + available_up, available_down); + + if ((*requested_up >= 0 && available_up >= requested_up_corrected) || + (*requested_down >= 0 && available_down >= requested_down_corrected)) { + ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up, + requested_down); + } else { + ret = -ENOBUFS; + } + +reclaim: + tb_reclaim_usb3_bandwidth(tb, in, out); + return ret; +} + +static void tb_handle_dp_bandwidth_request(struct work_struct *work) +{ + struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); + int requested_bw, requested_up, requested_down, ret; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + struct tb *tb = ev->tb; + struct tb_cm *tcm = tb_priv(tb); + struct tb_switch *sw; + + pm_runtime_get_sync(&tb->dev); + + mutex_lock(&tb->lock); + if (!tcm->hotplug_active) + goto unlock; + + sw = tb_switch_find_by_route(tb, ev->route); + if (!sw) { + tb_warn(tb, "bandwidth request from non-existent router %llx\n", + ev->route); + goto unlock; + } + + in = &sw->ports[ev->port]; + if (!tb_port_is_dpin(in)) { + tb_port_warn(in, "bandwidth request to non-DP IN adapter\n"); + goto put_sw; + } + + tb_port_dbg(in, "handling bandwidth allocation request\n"); + + if (!usb4_dp_port_bandwidth_mode_enabled(in)) { + tb_port_warn(in, "bandwidth allocation mode not enabled\n"); + goto put_sw; + } + + ret = usb4_dp_port_requested_bandwidth(in); + if (ret < 0) { + if (ret == -ENODATA) + tb_port_dbg(in, "no bandwidth request active\n"); + else + tb_port_warn(in, "failed to read requested bandwidth\n"); + goto put_sw; + } + requested_bw = ret; + + tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw); + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL); + if (!tunnel) { + tb_port_warn(in, "failed to find tunnel\n"); + goto put_sw; + } + + out = tunnel->dst_port; + + if (in->sw->config.depth < out->sw->config.depth) { + requested_up = -1; + requested_down = requested_bw; + } else { + requested_up = requested_bw; + requested_down = -1; + } + + ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down); + if (ret) { + if (ret == -ENOBUFS) + tb_port_warn(in, "not enough bandwidth available\n"); + else + tb_port_warn(in, "failed to change bandwidth allocation\n"); + } else { + tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n", + requested_up, requested_down); + + /* Update other clients about the allocation change */ + tb_recalc_estimated_bandwidth(tb); + } + +put_sw: + tb_switch_put(sw); +unlock: + mutex_unlock(&tb->lock); + + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + + kfree(ev); +} + +static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port) +{ + struct tb_hotplug_event *ev; + + ev = kmalloc(sizeof(*ev), GFP_KERNEL); + if (!ev) + return; + + ev->tb = tb; + ev->route = route; + ev->port = port; + INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request); + queue_work(tb->wq, &ev->work); +} + +static void tb_handle_notification(struct tb *tb, u64 route, + const struct cfg_error_pkg *error) +{ + + switch (error->error) { + case TB_CFG_ERROR_PCIE_WAKE: + case TB_CFG_ERROR_DP_CON_CHANGE: + case TB_CFG_ERROR_DPTX_DISCOVERY: + if (tb_cfg_ack_notification(tb->ctl, route, error)) + tb_warn(tb, "could not ack notification on %llx\n", + route); + break; + + case TB_CFG_ERROR_DP_BW: + if (tb_cfg_ack_notification(tb->ctl, route, error)) + tb_warn(tb, "could not ack notification on %llx\n", + route); + tb_queue_dp_bandwidth_request(tb, route, error->port); + break; + + default: + /* Ignore for now */ + break; + } +} + +/* + * tb_schedule_hotplug_handler() - callback function for the control channel + * + * Delegates to tb_handle_hotplug. + */ +static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, + const void *buf, size_t size) +{ + const struct cfg_event_pkg *pkg = buf; + u64 route = tb_cfg_get_route(&pkg->header); + + switch (type) { + case TB_CFG_PKG_ERROR: + tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf); + return; + case TB_CFG_PKG_EVENT: + break; + default: + tb_warn(tb, "unexpected event %#x, ignoring\n", type); + return; + } + + if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { + tb_warn(tb, "could not ack plug event on %llx:%x\n", route, + pkg->port); + } + + tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); +} + +static void tb_stop(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + struct tb_tunnel *n; + + cancel_delayed_work(&tcm->remove_work); + /* tunnels are only present after everything has been initialized */ + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + /* + * DMA tunnels require the driver to be functional so we + * tear them down. Other protocol tunnels can be left + * intact. + */ + if (tb_tunnel_is_dma(tunnel)) + tb_tunnel_deactivate(tunnel); + tb_tunnel_free(tunnel); + } + tb_switch_remove(tb->root_switch); + tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ +} + +static int tb_scan_finalize_switch(struct device *dev, void *data) +{ + if (tb_is_switch(dev)) { + struct tb_switch *sw = tb_to_switch(dev); + + /* + * If we found that the switch was already setup by the + * boot firmware, mark it as authorized now before we + * send uevent to userspace. + */ + if (sw->boot) + sw->authorized = 1; + + dev_set_uevent_suppress(dev, false); + kobject_uevent(&dev->kobj, KOBJ_ADD); + device_for_each_child(dev, NULL, tb_scan_finalize_switch); + } + + return 0; +} + +static int tb_start(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + int ret; + + tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); + if (IS_ERR(tb->root_switch)) + return PTR_ERR(tb->root_switch); + + /* + * ICM firmware upgrade needs running firmware and in native + * mode that is not available so disable firmware upgrade of the + * root switch. + * + * However, USB4 routers support NVM firmware upgrade if they + * implement the necessary router operations. + */ + tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch); + /* All USB4 routers support runtime PM */ + tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); + + ret = tb_switch_configure(tb->root_switch); + if (ret) { + tb_switch_put(tb->root_switch); + return ret; + } + + /* Announce the switch to the world */ + ret = tb_switch_add(tb->root_switch); + if (ret) { + tb_switch_put(tb->root_switch); + return ret; + } + + /* + * To support highest CLx state, we set host router's TMU to + * Normal mode. + */ + tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES); + /* Enable TMU if it is off */ + tb_switch_tmu_enable(tb->root_switch); + /* Full scan to discover devices added before the driver was loaded. */ + tb_scan_switch(tb->root_switch); + /* Find out tunnels created by the boot firmware */ + tb_discover_tunnels(tb); + /* Add DP resources from the DP tunnels created by the boot firmware */ + tb_discover_dp_resources(tb); + /* + * If the boot firmware did not create USB 3.x tunnels create them + * now for the whole topology. + */ + tb_create_usb3_tunnels(tb->root_switch); + /* Add DP IN resources for the root switch */ + tb_add_dp_resources(tb->root_switch); + /* Make the discovered switches available to the userspace */ + device_for_each_child(&tb->root_switch->dev, NULL, + tb_scan_finalize_switch); + + /* Allow tb_handle_hotplug to progress events */ + tcm->hotplug_active = true; + return 0; +} + +static int tb_suspend_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tb_dbg(tb, "suspending...\n"); + tb_disconnect_and_release_dp(tb); + tb_switch_suspend(tb->root_switch, false); + tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ + tb_dbg(tb, "suspend finished\n"); + + return 0; +} + +static void tb_restore_children(struct tb_switch *sw) +{ + struct tb_port *port; + + /* No need to restore if the router is already unplugged */ + if (sw->is_unplugged) + return; + + if (tb_enable_clx(sw)) + tb_sw_warn(sw, "failed to re-enable CL states\n"); + + if (tb_enable_tmu(sw)) + tb_sw_warn(sw, "failed to restore TMU configuration\n"); + + tb_switch_configuration_valid(sw); + + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port) && !port->xdomain) + continue; + + if (port->remote) { + tb_switch_lane_bonding_enable(port->remote->sw); + tb_switch_configure_link(port->remote->sw); + + tb_restore_children(port->remote->sw); + } else if (port->xdomain) { + tb_port_configure_xdomain(port, port->xdomain); + } + } +} + +static int tb_resume_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + unsigned int usb3_delay = 0; + LIST_HEAD(tunnels); + + tb_dbg(tb, "resuming...\n"); + + /* remove any pci devices the firmware might have setup */ + tb_switch_reset(tb->root_switch); + + tb_switch_resume(tb->root_switch); + tb_free_invalid_tunnels(tb); + tb_free_unplugged_children(tb->root_switch); + tb_restore_children(tb->root_switch); + + /* + * If we get here from suspend to disk the boot firmware or the + * restore kernel might have created tunnels of its own. Since + * we cannot be sure they are usable for us we find and tear + * them down. + */ + tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); + list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { + if (tb_tunnel_is_usb3(tunnel)) + usb3_delay = 500; + tb_tunnel_deactivate(tunnel); + tb_tunnel_free(tunnel); + } + + /* Re-create our tunnels now */ + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + /* USB3 requires delay before it can be re-activated */ + if (tb_tunnel_is_usb3(tunnel)) { + msleep(usb3_delay); + /* Only need to do it once */ + usb3_delay = 0; + } + tb_tunnel_restart(tunnel); + } + if (!list_empty(&tcm->tunnel_list)) { + /* + * the pcie links need some time to get going. + * 100ms works for me... + */ + tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); + msleep(100); + } + /* Allow tb_handle_hotplug to progress events */ + tcm->hotplug_active = true; + tb_dbg(tb, "resume finished\n"); + + return 0; +} + +static int tb_free_unplugged_xdomains(struct tb_switch *sw) +{ + struct tb_port *port; + int ret = 0; + + tb_switch_for_each_port(sw, port) { + if (tb_is_upstream_port(port)) + continue; + if (port->xdomain && port->xdomain->is_unplugged) { + tb_retimer_remove_all(port); + tb_xdomain_remove(port->xdomain); + tb_port_unconfigure_xdomain(port); + port->xdomain = NULL; + ret++; + } else if (port->remote) { + ret += tb_free_unplugged_xdomains(port->remote->sw); + } + } + + return ret; +} + +static int tb_freeze_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tcm->hotplug_active = false; + return 0; +} + +static int tb_thaw_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tcm->hotplug_active = true; + return 0; +} + +static void tb_complete(struct tb *tb) +{ + /* + * Release any unplugged XDomains and if there is a case where + * another domain is swapped in place of unplugged XDomain we + * need to run another rescan. + */ + mutex_lock(&tb->lock); + if (tb_free_unplugged_xdomains(tb->root_switch)) + tb_scan_switch(tb->root_switch); + mutex_unlock(&tb->lock); +} + +static int tb_runtime_suspend(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + mutex_lock(&tb->lock); + tb_switch_suspend(tb->root_switch, true); + tcm->hotplug_active = false; + mutex_unlock(&tb->lock); + + return 0; +} + +static void tb_remove_work(struct work_struct *work) +{ + struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); + struct tb *tb = tcm_to_tb(tcm); + + mutex_lock(&tb->lock); + if (tb->root_switch) { + tb_free_unplugged_children(tb->root_switch); + tb_free_unplugged_xdomains(tb->root_switch); + } + mutex_unlock(&tb->lock); +} + +static int tb_runtime_resume(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + + mutex_lock(&tb->lock); + tb_switch_resume(tb->root_switch); + tb_free_invalid_tunnels(tb); + tb_restore_children(tb->root_switch); + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) + tb_tunnel_restart(tunnel); + tcm->hotplug_active = true; + mutex_unlock(&tb->lock); + + /* + * Schedule cleanup of any unplugged devices. Run this in a + * separate thread to avoid possible deadlock if the device + * removal runtime resumes the unplugged device. + */ + queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); + return 0; +} + +static const struct tb_cm_ops tb_cm_ops = { + .start = tb_start, + .stop = tb_stop, + .suspend_noirq = tb_suspend_noirq, + .resume_noirq = tb_resume_noirq, + .freeze_noirq = tb_freeze_noirq, + .thaw_noirq = tb_thaw_noirq, + .complete = tb_complete, + .runtime_suspend = tb_runtime_suspend, + .runtime_resume = tb_runtime_resume, + .handle_event = tb_handle_event, + .disapprove_switch = tb_disconnect_pci, + .approve_switch = tb_tunnel_pci, + .approve_xdomain_paths = tb_approve_xdomain_paths, + .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, +}; + +/* + * During suspend the Thunderbolt controller is reset and all PCIe + * tunnels are lost. The NHI driver will try to reestablish all tunnels + * during resume. This adds device links between the tunneled PCIe + * downstream ports and the NHI so that the device core will make sure + * NHI is resumed first before the rest. + */ +static bool tb_apple_add_links(struct tb_nhi *nhi) +{ + struct pci_dev *upstream, *pdev; + bool ret; + + if (!x86_apple_machine) + return false; + + switch (nhi->pdev->device) { + case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + break; + default: + return false; + } + + upstream = pci_upstream_bridge(nhi->pdev); + while (upstream) { + if (!pci_is_pcie(upstream)) + return false; + if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) + break; + upstream = pci_upstream_bridge(upstream); + } + + if (!upstream) + return false; + + /* + * For each hotplug downstream port, create add device link + * back to NHI so that PCIe tunnels can be re-established after + * sleep. + */ + ret = false; + for_each_pci_bridge(pdev, upstream->subordinate) { + const struct device_link *link; + + if (!pci_is_pcie(pdev)) + continue; + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || + !pdev->is_hotplug_bridge) + continue; + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_PM_RUNTIME); + if (link) { + dev_dbg(&nhi->pdev->dev, "created link from %s\n", + dev_name(&pdev->dev)); + ret = true; + } else { + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", + dev_name(&pdev->dev)); + } + } + + return ret; +} + +struct tb *tb_probe(struct tb_nhi *nhi) +{ + struct tb_cm *tcm; + struct tb *tb; + + tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); + if (!tb) + return NULL; + + if (tb_acpi_may_tunnel_pcie()) + tb->security_level = TB_SECURITY_USER; + else + tb->security_level = TB_SECURITY_NOPCIE; + + tb->cm_ops = &tb_cm_ops; + + tcm = tb_priv(tb); + INIT_LIST_HEAD(&tcm->tunnel_list); + INIT_LIST_HEAD(&tcm->dp_resources); + INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); + tb_init_bandwidth_groups(tcm); + + tb_dbg(tb, "using software connection manager\n"); + + /* + * Device links are needed to make sure we establish tunnels + * before the PCIe/USB stack is resumed so complain here if we + * found them missing. + */ + if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi)) + tb_warn(tb, "device links to tunneled native ports are missing!\n"); + + return tb; +} diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h new file mode 100644 index 0000000000..d2a55ad2fd --- /dev/null +++ b/drivers/thunderbolt/tb.h @@ -0,0 +1,1383 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt driver - bus logic (NHI independent) + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#ifndef TB_H_ +#define TB_H_ + +#include <linux/nvmem-provider.h> +#include <linux/pci.h> +#include <linux/thunderbolt.h> +#include <linux/uuid.h> +#include <linux/bitfield.h> + +#include "tb_regs.h" +#include "ctl.h" +#include "dma_port.h" + +/* Keep link controller awake during update */ +#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) +/* Disable CLx if not supported */ +#define QUIRK_NO_CLX BIT(1) + +/** + * struct tb_nvm - Structure holding NVM information + * @dev: Owner of the NVM + * @major: Major version number of the active NVM portion + * @minor: Minor version number of the active NVM portion + * @id: Identifier used with both NVM portions + * @active: Active portion NVMem device + * @active_size: Size in bytes of the active NVM + * @non_active: Non-active portion NVMem device + * @buf: Buffer where the NVM image is stored before it is written to + * the actual NVM flash device + * @buf_data_start: Where the actual image starts after skipping + * possible headers + * @buf_data_size: Number of bytes actually consumed by the new NVM + * image + * @authenticating: The device is authenticating the new NVM + * @flushed: The image has been flushed to the storage area + * @vops: Router vendor specific NVM operations (optional) + * + * The user of this structure needs to handle serialization of possible + * concurrent access. + */ +struct tb_nvm { + struct device *dev; + u32 major; + u32 minor; + int id; + struct nvmem_device *active; + size_t active_size; + struct nvmem_device *non_active; + void *buf; + void *buf_data_start; + size_t buf_data_size; + bool authenticating; + bool flushed; + const struct tb_nvm_vendor_ops *vops; +}; + +enum tb_nvm_write_ops { + WRITE_AND_AUTHENTICATE = 1, + WRITE_ONLY = 2, + AUTHENTICATE_ONLY = 3, +}; + +#define TB_SWITCH_KEY_SIZE 32 +#define TB_SWITCH_MAX_DEPTH 6 +#define USB4_SWITCH_MAX_DEPTH 5 + +/** + * enum tb_switch_tmu_mode - TMU mode + * @TB_SWITCH_TMU_MODE_OFF: TMU is off + * @TB_SWITCH_TMU_MODE_LOWRES: Uni-directional, normal mode + * @TB_SWITCH_TMU_MODE_HIFI_UNI: Uni-directional, HiFi mode + * @TB_SWITCH_TMU_MODE_HIFI_BI: Bi-directional, HiFi mode + * @TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: Enhanced Uni-directional, MedRes mode + * + * Ordering is based on TMU accuracy level (highest last). + */ +enum tb_switch_tmu_mode { + TB_SWITCH_TMU_MODE_OFF, + TB_SWITCH_TMU_MODE_LOWRES, + TB_SWITCH_TMU_MODE_HIFI_UNI, + TB_SWITCH_TMU_MODE_HIFI_BI, + TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI, +}; + +/** + * struct tb_switch_tmu - Structure holding router TMU configuration + * @cap: Offset to the TMU capability (%0 if not found) + * @has_ucap: Does the switch support uni-directional mode + * @mode: TMU mode related to the upstream router. Reflects the HW + * setting. Don't care for host router. + * @mode_request: TMU mode requested to set. Related to upstream router. + * Don't care for host router. + */ +struct tb_switch_tmu { + int cap; + bool has_ucap; + enum tb_switch_tmu_mode mode; + enum tb_switch_tmu_mode mode_request; +}; + +/** + * struct tb_switch - a thunderbolt switch + * @dev: Device for the switch + * @config: Switch configuration + * @ports: Ports in this switch + * @dma_port: If the switch has port supporting DMA configuration based + * mailbox this will hold the pointer to that (%NULL + * otherwise). If set it also means the switch has + * upgradeable NVM. + * @tmu: The switch TMU configuration + * @tb: Pointer to the domain the switch belongs to + * @uid: Unique ID of the switch + * @uuid: UUID of the switch (or %NULL if not supported) + * @vendor: Vendor ID of the switch + * @device: Device ID of the switch + * @vendor_name: Name of the vendor (or %NULL if not known) + * @device_name: Name of the device (or %NULL if not known) + * @link_speed: Speed of the link in Gb/s + * @link_width: Width of the upstream facing link + * @link_usb4: Upstream link is USB4 + * @generation: Switch Thunderbolt generation + * @cap_plug_events: Offset to the plug events capability (%0 if not found) + * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found) + * @cap_lc: Offset to the link controller capability (%0 if not found) + * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found) + * @is_unplugged: The switch is going away + * @drom: DROM of the switch (%NULL if not found) + * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) + * @no_nvm_upgrade: Prevent NVM upgrade of this switch + * @safe_mode: The switch is in safe-mode + * @boot: Whether the switch was already authorized on boot or not + * @rpm: The switch supports runtime PM + * @authorized: Whether the switch is authorized by user or policy + * @security_level: Switch supported security level + * @debugfs_dir: Pointer to the debugfs structure + * @key: Contains the key used to challenge the device or %NULL if not + * supported. Size of the key is %TB_SWITCH_KEY_SIZE. + * @connection_id: Connection ID used with ICM messaging + * @connection_key: Connection key used with ICM messaging + * @link: Root switch link this switch is connected (ICM only) + * @depth: Depth in the chain this switch is connected (ICM only) + * @rpm_complete: Completion used to wait for runtime resume to + * complete (ICM only) + * @quirks: Quirks used for this Thunderbolt switch + * @credit_allocation: Are the below buffer allocation parameters valid + * @max_usb3_credits: Router preferred number of buffers for USB 3.x + * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX + * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN + * @max_pcie_credits: Router preferred number of buffers for PCIe + * @max_dma_credits: Router preferred number of buffers for DMA/P2P + * @clx: CLx states on the upstream link of the router + * + * When the switch is being added or removed to the domain (other + * switches) you need to have domain lock held. + * + * In USB4 terminology this structure represents a router. + * + * Note @link_width is not the same as whether link is bonded or not. + * For Gen 4 links the link is also bonded when it is asymmetric. The + * correct way to find out whether the link is bonded or not is to look + * @bonded field of the upstream port. + */ +struct tb_switch { + struct device dev; + struct tb_regs_switch_header config; + struct tb_port *ports; + struct tb_dma_port *dma_port; + struct tb_switch_tmu tmu; + struct tb *tb; + u64 uid; + uuid_t *uuid; + u16 vendor; + u16 device; + const char *vendor_name; + const char *device_name; + unsigned int link_speed; + enum tb_link_width link_width; + bool link_usb4; + unsigned int generation; + int cap_plug_events; + int cap_vsec_tmu; + int cap_lc; + int cap_lp; + bool is_unplugged; + u8 *drom; + struct tb_nvm *nvm; + bool no_nvm_upgrade; + bool safe_mode; + bool boot; + bool rpm; + unsigned int authorized; + enum tb_security_level security_level; + struct dentry *debugfs_dir; + u8 *key; + u8 connection_id; + u8 connection_key; + u8 link; + u8 depth; + struct completion rpm_complete; + unsigned long quirks; + bool credit_allocation; + unsigned int max_usb3_credits; + unsigned int min_dp_aux_credits; + unsigned int min_dp_main_credits; + unsigned int max_pcie_credits; + unsigned int max_dma_credits; + unsigned int clx; +}; + +/** + * struct tb_bandwidth_group - Bandwidth management group + * @tb: Pointer to the domain the group belongs to + * @index: Index of the group (aka Group_ID). Valid values %1-%7 + * @ports: DP IN adapters belonging to this group are linked here + * + * Any tunnel that requires isochronous bandwidth (that's DP for now) is + * attached to a bandwidth group. All tunnels going through the same + * USB4 links share the same group and can dynamically distribute the + * bandwidth within the group. + */ +struct tb_bandwidth_group { + struct tb *tb; + int index; + struct list_head ports; +}; + +/** + * struct tb_port - a thunderbolt port, part of a tb_switch + * @config: Cached port configuration read from registers + * @sw: Switch the port belongs to + * @remote: Remote port (%NULL if not connected) + * @xdomain: Remote host (%NULL if not connected) + * @cap_phy: Offset, zero if not found + * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) + * @cap_adap: Offset of the adapter specific capability (%0 if not present) + * @cap_usb4: Offset to the USB4 port capability (%0 if not present) + * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0) + * @port: Port number on switch + * @disabled: Disabled by eeprom or enabled but not implemented + * @bonded: true if the port is bonded (two lanes combined as one) + * @dual_link_port: If the switch is connected using two ports, points + * to the other port. + * @link_nr: Is this primary or secondary port on the dual_link. + * @in_hopids: Currently allocated input HopIDs + * @out_hopids: Currently allocated output HopIDs + * @list: Used to link ports to DP resources list + * @total_credits: Total number of buffers available for this port + * @ctl_credits: Buffers reserved for control path + * @dma_credits: Number of credits allocated for DMA tunneling for all + * DMA paths through this port. + * @group: Bandwidth allocation group the adapter is assigned to. Only + * used for DP IN adapters for now. + * @group_list: The adapter is linked to the group's list of ports through this + * @max_bw: Maximum possible bandwidth through this adapter if set to + * non-zero. + * + * In USB4 terminology this structure represents an adapter (protocol or + * lane adapter). + */ +struct tb_port { + struct tb_regs_port_header config; + struct tb_switch *sw; + struct tb_port *remote; + struct tb_xdomain *xdomain; + int cap_phy; + int cap_tmu; + int cap_adap; + int cap_usb4; + struct usb4_port *usb4; + u8 port; + bool disabled; + bool bonded; + struct tb_port *dual_link_port; + u8 link_nr:1; + struct ida in_hopids; + struct ida out_hopids; + struct list_head list; + unsigned int total_credits; + unsigned int ctl_credits; + unsigned int dma_credits; + struct tb_bandwidth_group *group; + struct list_head group_list; + unsigned int max_bw; +}; + +/** + * struct usb4_port - USB4 port device + * @dev: Device for the port + * @port: Pointer to the lane 0 adapter + * @can_offline: Does the port have necessary platform support to moved + * it into offline mode and back + * @offline: The port is currently in offline mode + * @margining: Pointer to margining structure if enabled + */ +struct usb4_port { + struct device dev; + struct tb_port *port; + bool can_offline; + bool offline; +#ifdef CONFIG_USB4_DEBUGFS_MARGINING + struct tb_margining *margining; +#endif +}; + +/** + * tb_retimer: Thunderbolt retimer + * @dev: Device for the retimer + * @tb: Pointer to the domain the retimer belongs to + * @index: Retimer index facing the router USB4 port + * @vendor: Vendor ID of the retimer + * @device: Device ID of the retimer + * @port: Pointer to the lane 0 adapter + * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise) + * @no_nvm_upgrade: Prevent NVM upgrade of this retimer + * @auth_status: Status of last NVM authentication + */ +struct tb_retimer { + struct device dev; + struct tb *tb; + u8 index; + u32 vendor; + u32 device; + struct tb_port *port; + struct tb_nvm *nvm; + bool no_nvm_upgrade; + u32 auth_status; +}; + +/** + * struct tb_path_hop - routing information for a tb_path + * @in_port: Ingress port of a switch + * @out_port: Egress port of a switch where the packet is routed out + * (must be on the same switch than @in_port) + * @in_hop_index: HopID where the path configuration entry is placed in + * the path config space of @in_port. + * @in_counter_index: Used counter index (not used in the driver + * currently, %-1 to disable) + * @next_hop_index: HopID of the packet when it is routed out from @out_port + * @initial_credits: Number of initial flow control credits allocated for + * the path + * @nfc_credits: Number of non-flow controlled buffers allocated for the + * @in_port. + * + * Hop configuration is always done on the IN port of a switch. + * in_port and out_port have to be on the same switch. Packets arriving on + * in_port with "hop" = in_hop_index will get routed to through out_port. The + * next hop to take (on out_port->remote) is determined by + * next_hop_index. When routing packet to another switch (out->remote is + * set) the @next_hop_index must match the @in_hop_index of that next + * hop to make routing possible. + * + * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in + * port. + */ +struct tb_path_hop { + struct tb_port *in_port; + struct tb_port *out_port; + int in_hop_index; + int in_counter_index; + int next_hop_index; + unsigned int initial_credits; + unsigned int nfc_credits; +}; + +/** + * enum tb_path_port - path options mask + * @TB_PATH_NONE: Do not activate on any hop on path + * @TB_PATH_SOURCE: Activate on the first hop (out of src) + * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last) + * @TB_PATH_DESTINATION: Activate on the last hop (into dst) + * @TB_PATH_ALL: Activate on all hops on the path + */ +enum tb_path_port { + TB_PATH_NONE = 0, + TB_PATH_SOURCE = 1, + TB_PATH_INTERNAL = 2, + TB_PATH_DESTINATION = 4, + TB_PATH_ALL = 7, +}; + +/** + * struct tb_path - a unidirectional path between two ports + * @tb: Pointer to the domain structure + * @name: Name of the path (used for debugging) + * @ingress_shared_buffer: Shared buffering used for ingress ports on the path + * @egress_shared_buffer: Shared buffering used for egress ports on the path + * @ingress_fc_enable: Flow control for ingress ports on the path + * @egress_fc_enable: Flow control for egress ports on the path + * @priority: Priority group if the path + * @weight: Weight of the path inside the priority group + * @drop_packages: Drop packages from queue tail or head + * @activated: Is the path active + * @clear_fc: Clear all flow control from the path config space entries + * when deactivating this path + * @hops: Path hops + * @path_length: How many hops the path uses + * @alloc_hopid: Does this path consume port HopID + * + * A path consists of a number of hops (see &struct tb_path_hop). To + * establish a PCIe tunnel two paths have to be created between the two + * PCIe ports. + */ +struct tb_path { + struct tb *tb; + const char *name; + enum tb_path_port ingress_shared_buffer; + enum tb_path_port egress_shared_buffer; + enum tb_path_port ingress_fc_enable; + enum tb_path_port egress_fc_enable; + + unsigned int priority:3; + int weight:4; + bool drop_packages; + bool activated; + bool clear_fc; + struct tb_path_hop *hops; + int path_length; + bool alloc_hopid; +}; + +/* HopIDs 0-7 are reserved by the Thunderbolt protocol */ +#define TB_PATH_MIN_HOPID 8 +/* + * Support paths from the farthest (depth 6) router to the host and back + * to the same level (not necessarily to the same router). + */ +#define TB_PATH_MAX_HOPS (7 * 2) + +/* Possible wake types */ +#define TB_WAKE_ON_CONNECT BIT(0) +#define TB_WAKE_ON_DISCONNECT BIT(1) +#define TB_WAKE_ON_USB4 BIT(2) +#define TB_WAKE_ON_USB3 BIT(3) +#define TB_WAKE_ON_PCIE BIT(4) +#define TB_WAKE_ON_DP BIT(5) + +/* CL states */ +#define TB_CL0S BIT(0) +#define TB_CL1 BIT(1) +#define TB_CL2 BIT(2) + +/** + * struct tb_cm_ops - Connection manager specific operations vector + * @driver_ready: Called right after control channel is started. Used by + * ICM to send driver ready message to the firmware. + * @start: Starts the domain + * @stop: Stops the domain + * @suspend_noirq: Connection manager specific suspend_noirq + * @resume_noirq: Connection manager specific resume_noirq + * @suspend: Connection manager specific suspend + * @freeze_noirq: Connection manager specific freeze_noirq + * @thaw_noirq: Connection manager specific thaw_noirq + * @complete: Connection manager specific complete + * @runtime_suspend: Connection manager specific runtime_suspend + * @runtime_resume: Connection manager specific runtime_resume + * @runtime_suspend_switch: Runtime suspend a switch + * @runtime_resume_switch: Runtime resume a switch + * @handle_event: Handle thunderbolt event + * @get_boot_acl: Get boot ACL list + * @set_boot_acl: Set boot ACL list + * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel) + * @approve_switch: Approve switch + * @add_switch_key: Add key to switch + * @challenge_switch_key: Challenge switch using key + * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update + * @approve_xdomain_paths: Approve (establish) XDomain DMA paths + * @disconnect_xdomain_paths: Disconnect XDomain DMA paths + * @usb4_switch_op: Optional proxy for USB4 router operations. If set + * this will be called whenever USB4 router operation is + * performed. If this returns %-EOPNOTSUPP then the + * native USB4 router operation is called. + * @usb4_switch_nvm_authenticate_status: Optional callback that the CM + * implementation can be used to + * return status of USB4 NVM_AUTH + * router operation. + */ +struct tb_cm_ops { + int (*driver_ready)(struct tb *tb); + int (*start)(struct tb *tb); + void (*stop)(struct tb *tb); + int (*suspend_noirq)(struct tb *tb); + int (*resume_noirq)(struct tb *tb); + int (*suspend)(struct tb *tb); + int (*freeze_noirq)(struct tb *tb); + int (*thaw_noirq)(struct tb *tb); + void (*complete)(struct tb *tb); + int (*runtime_suspend)(struct tb *tb); + int (*runtime_resume)(struct tb *tb); + int (*runtime_suspend_switch)(struct tb_switch *sw); + int (*runtime_resume_switch)(struct tb_switch *sw); + void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, + const void *buf, size_t size); + int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); + int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids); + int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw); + int (*approve_switch)(struct tb *tb, struct tb_switch *sw); + int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); + int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, + const u8 *challenge, u8 *response); + int (*disconnect_pcie_paths)(struct tb *tb); + int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); + int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); + int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_data_len, + void *rx_data, size_t rx_data_len); + int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw, + u32 *status); +}; + +static inline void *tb_priv(struct tb *tb) +{ + return (void *)tb->privdata; +} + +#define TB_AUTOSUSPEND_DELAY 15000 /* ms */ + +/* helper functions & macros */ + +/** + * tb_upstream_port() - return the upstream port of a switch + * + * Every switch has an upstream port (for the root switch it is the NHI). + * + * During switch alloc/init tb_upstream_port()->remote may be NULL, even for + * non root switches (on the NHI port remote is always NULL). + * + * Return: Returns the upstream port of the switch. + */ +static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) +{ + return &sw->ports[sw->config.upstream_port_number]; +} + +/** + * tb_is_upstream_port() - Is the port upstream facing + * @port: Port to check + * + * Returns true if @port is upstream facing port. In case of dual link + * ports both return true. + */ +static inline bool tb_is_upstream_port(const struct tb_port *port) +{ + const struct tb_port *upstream_port = tb_upstream_port(port->sw); + return port == upstream_port || port->dual_link_port == upstream_port; +} + +static inline u64 tb_route(const struct tb_switch *sw) +{ + return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; +} + +static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) +{ + u8 port; + + port = route >> (sw->config.depth * 8); + if (WARN_ON(port > sw->config.max_port_number)) + return NULL; + return &sw->ports[port]; +} + +/** + * tb_port_has_remote() - Does the port have switch connected downstream + * @port: Port to check + * + * Returns true only when the port is primary port and has remote set. + */ +static inline bool tb_port_has_remote(const struct tb_port *port) +{ + if (tb_is_upstream_port(port)) + return false; + if (!port->remote) + return false; + if (port->dual_link_port && port->link_nr) + return false; + + return true; +} + +static inline bool tb_port_is_null(const struct tb_port *port) +{ + return port && port->port && port->config.type == TB_TYPE_PORT; +} + +static inline bool tb_port_is_nhi(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_NHI; +} + +static inline bool tb_port_is_pcie_down(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_PCIE_DOWN; +} + +static inline bool tb_port_is_pcie_up(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_PCIE_UP; +} + +static inline bool tb_port_is_dpin(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_DP_HDMI_IN; +} + +static inline bool tb_port_is_dpout(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_DP_HDMI_OUT; +} + +static inline bool tb_port_is_usb3_down(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_USB3_DOWN; +} + +static inline bool tb_port_is_usb3_up(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_USB3_UP; +} + +static inline int tb_sw_read(struct tb_switch *sw, void *buffer, + enum tb_cfg_space space, u32 offset, u32 length) +{ + if (sw->is_unplugged) + return -ENODEV; + return tb_cfg_read(sw->tb->ctl, + buffer, + tb_route(sw), + 0, + space, + offset, + length); +} + +static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, + enum tb_cfg_space space, u32 offset, u32 length) +{ + if (sw->is_unplugged) + return -ENODEV; + return tb_cfg_write(sw->tb->ctl, + buffer, + tb_route(sw), + 0, + space, + offset, + length); +} + +static inline int tb_port_read(struct tb_port *port, void *buffer, + enum tb_cfg_space space, u32 offset, u32 length) +{ + if (port->sw->is_unplugged) + return -ENODEV; + return tb_cfg_read(port->sw->tb->ctl, + buffer, + tb_route(port->sw), + port->port, + space, + offset, + length); +} + +static inline int tb_port_write(struct tb_port *port, const void *buffer, + enum tb_cfg_space space, u32 offset, u32 length) +{ + if (port->sw->is_unplugged) + return -ENODEV; + return tb_cfg_write(port->sw->tb->ctl, + buffer, + tb_route(port->sw), + port->port, + space, + offset, + length); +} + +#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) +#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) +#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) +#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) +#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg) + +#define __TB_SW_PRINT(level, sw, fmt, arg...) \ + do { \ + const struct tb_switch *__sw = (sw); \ + level(__sw->tb, "%llx: " fmt, \ + tb_route(__sw), ## arg); \ + } while (0) +#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) +#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) +#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) +#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg) + +#define __TB_PORT_PRINT(level, _port, fmt, arg...) \ + do { \ + const struct tb_port *__port = (_port); \ + level(__port->sw->tb, "%llx:%u: " fmt, \ + tb_route(__port->sw), __port->port, ## arg); \ + } while (0) +#define tb_port_WARN(port, fmt, arg...) \ + __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg) +#define tb_port_warn(port, fmt, arg...) \ + __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) +#define tb_port_info(port, fmt, arg...) \ + __TB_PORT_PRINT(tb_info, port, fmt, ##arg) +#define tb_port_dbg(port, fmt, arg...) \ + __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg) + +struct tb *icm_probe(struct tb_nhi *nhi); +struct tb *tb_probe(struct tb_nhi *nhi); + +extern struct device_type tb_domain_type; +extern struct device_type tb_retimer_type; +extern struct device_type tb_switch_type; +extern struct device_type usb4_port_device_type; + +int tb_domain_init(void); +void tb_domain_exit(void); +int tb_xdomain_init(void); +void tb_xdomain_exit(void); + +struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); +int tb_domain_add(struct tb *tb); +void tb_domain_remove(struct tb *tb); +int tb_domain_suspend_noirq(struct tb *tb); +int tb_domain_resume_noirq(struct tb *tb); +int tb_domain_suspend(struct tb *tb); +int tb_domain_freeze_noirq(struct tb *tb); +int tb_domain_thaw_noirq(struct tb *tb); +void tb_domain_complete(struct tb *tb); +int tb_domain_runtime_suspend(struct tb *tb); +int tb_domain_runtime_resume(struct tb *tb); +int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw); +int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); +int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); +int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); +int tb_domain_disconnect_pcie_paths(struct tb *tb); +int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); +int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); +int tb_domain_disconnect_all_paths(struct tb *tb); + +static inline struct tb *tb_domain_get(struct tb *tb) +{ + if (tb) + get_device(&tb->dev); + return tb; +} + +static inline void tb_domain_put(struct tb *tb) +{ + put_device(&tb->dev); +} + +struct tb_nvm *tb_nvm_alloc(struct device *dev); +int tb_nvm_read_version(struct tb_nvm *nvm); +int tb_nvm_validate(struct tb_nvm *nvm); +int tb_nvm_write_headers(struct tb_nvm *nvm); +int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read); +int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, + size_t bytes); +int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write); +void tb_nvm_free(struct tb_nvm *nvm); +void tb_nvm_exit(void); + +typedef int (*read_block_fn)(void *, unsigned int, void *, size_t); +typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t); + +int tb_nvm_read_data(unsigned int address, void *buf, size_t size, + unsigned int retries, read_block_fn read_block, + void *read_block_data); +int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, + unsigned int retries, write_block_fn write_next_block, + void *write_block_data); + +int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, + u64 route); +struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, + struct device *parent, u64 route); +int tb_switch_configure(struct tb_switch *sw); +int tb_switch_configuration_valid(struct tb_switch *sw); +int tb_switch_add(struct tb_switch *sw); +void tb_switch_remove(struct tb_switch *sw); +void tb_switch_suspend(struct tb_switch *sw, bool runtime); +int tb_switch_resume(struct tb_switch *sw); +int tb_switch_reset(struct tb_switch *sw); +int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec); +void tb_sw_set_unplugged(struct tb_switch *sw); +struct tb_port *tb_switch_find_port(struct tb_switch *sw, + enum tb_port_type type); +struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, + u8 depth); +struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); +struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); + +/** + * tb_switch_for_each_port() - Iterate over each switch port + * @sw: Switch whose ports to iterate + * @p: Port used as iterator + * + * Iterates over each switch port skipping the control port (port %0). + */ +#define tb_switch_for_each_port(sw, p) \ + for ((p) = &(sw)->ports[1]; \ + (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++) + +static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) +{ + if (sw) + get_device(&sw->dev); + return sw; +} + +static inline void tb_switch_put(struct tb_switch *sw) +{ + put_device(&sw->dev); +} + +static inline bool tb_is_switch(const struct device *dev) +{ + return dev->type == &tb_switch_type; +} + +static inline struct tb_switch *tb_to_switch(const struct device *dev) +{ + if (tb_is_switch(dev)) + return container_of(dev, struct tb_switch, dev); + return NULL; +} + +static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) +{ + return tb_to_switch(sw->dev.parent); +} + +/** + * tb_switch_downstream_port() - Return downstream facing port of parent router + * @sw: Device router pointer + * + * Only call for device routers. Returns the downstream facing port of + * the parent router. + */ +static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw) +{ + if (WARN_ON(!tb_route(sw))) + return NULL; + return tb_port_at(tb_route(sw), tb_switch_parent(sw)); +} + +static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) +{ + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && + sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; +} + +static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) +{ + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && + sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; +} + +static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TGL_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_NHI1: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: + return true; + } + } + return false; +} + +/** + * tb_switch_is_icm() - Is the switch handled by ICM firmware + * @sw: Switch to check + * + * In case there is a need to differentiate whether ICM firmware or SW CM + * is handling @sw this function can be called. It is valid to call this + * after tb_switch_alloc() and tb_switch_configure() has been called + * (latter only for SW CM case). + */ +static inline bool tb_switch_is_icm(const struct tb_switch *sw) +{ + return !sw->config.enabled; +} + +int tb_switch_lane_bonding_enable(struct tb_switch *sw); +void tb_switch_lane_bonding_disable(struct tb_switch *sw); +int tb_switch_configure_link(struct tb_switch *sw); +void tb_switch_unconfigure_link(struct tb_switch *sw); + +bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); +int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); + +int tb_switch_tmu_init(struct tb_switch *sw); +int tb_switch_tmu_post_time(struct tb_switch *sw); +int tb_switch_tmu_disable(struct tb_switch *sw); +int tb_switch_tmu_enable(struct tb_switch *sw); +int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode); + +/** + * tb_switch_tmu_is_configured() - Is given TMU mode configured + * @sw: Router whose mode to check + * @mode: Mode to check + * + * Checks if given router TMU mode is configured to @mode. Note the + * router TMU might not be enabled to this mode. + */ +static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw, + enum tb_switch_tmu_mode mode) +{ + return sw->tmu.mode_request == mode; +} + +/** + * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled + * @sw: Router whose TMU mode to check + * + * Return true if hardware TMU configuration matches the requested + * configuration (and is not %TB_SWITCH_TMU_MODE_OFF). + */ +static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) +{ + return sw->tmu.mode != TB_SWITCH_TMU_MODE_OFF && + sw->tmu.mode == sw->tmu.mode_request; +} + +bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx); + +int tb_switch_clx_init(struct tb_switch *sw); +bool tb_switch_clx_is_supported(const struct tb_switch *sw); +int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx); +int tb_switch_clx_disable(struct tb_switch *sw); + +/** + * tb_switch_clx_is_enabled() - Checks if the CLx is enabled + * @sw: Router to check for the CLx + * @clx: The CLx states to check for + * + * Checks if the specified CLx is enabled on the router upstream link. + * Returns true if any of the given states is enabled. + * + * Not applicable for a host router. + */ +static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw, + unsigned int clx) +{ + return sw->clx & clx; +} + +int tb_switch_pcie_l1_enable(struct tb_switch *sw); + +int tb_switch_xhci_connect(struct tb_switch *sw); +void tb_switch_xhci_disconnect(struct tb_switch *sw); + +int tb_port_state(struct tb_port *port); +int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); +int tb_port_add_nfc_credits(struct tb_port *port, int credits); +int tb_port_clear_counter(struct tb_port *port, int counter); +int tb_port_unlock(struct tb_port *port); +int tb_port_enable(struct tb_port *port); +int tb_port_disable(struct tb_port *port); +int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); +void tb_port_release_in_hopid(struct tb_port *port, int hopid); +int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); +void tb_port_release_out_hopid(struct tb_port *port, int hopid); +struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, + struct tb_port *prev); + +static inline bool tb_port_use_credit_allocation(const struct tb_port *port) +{ + return tb_port_is_null(port) && port->sw->credit_allocation; +} + +/** + * tb_for_each_port_on_path() - Iterate over each port on path + * @src: Source port + * @dst: Destination port + * @p: Port used as iterator + * + * Walks over each port on path from @src to @dst. + */ +#define tb_for_each_port_on_path(src, dst, p) \ + for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ + (p) = tb_next_port_on_path((src), (dst), (p))) + +int tb_port_get_link_speed(struct tb_port *port); +int tb_port_get_link_width(struct tb_port *port); +int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width); +int tb_port_lane_bonding_enable(struct tb_port *port); +void tb_port_lane_bonding_disable(struct tb_port *port); +int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask, + int timeout_msec); +int tb_port_update_credits(struct tb_port *port); + +int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); +int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); +int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset); +int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); +int tb_port_next_cap(struct tb_port *port, unsigned int offset); +bool tb_port_is_enabled(struct tb_port *port); + +bool tb_usb3_port_is_enabled(struct tb_port *port); +int tb_usb3_port_enable(struct tb_port *port, bool enable); + +bool tb_pci_port_is_enabled(struct tb_port *port); +int tb_pci_port_enable(struct tb_port *port, bool enable); + +int tb_dp_port_hpd_is_active(struct tb_port *port); +int tb_dp_port_hpd_clear(struct tb_port *port); +int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, + unsigned int aux_tx, unsigned int aux_rx); +bool tb_dp_port_is_enabled(struct tb_port *port); +int tb_dp_port_enable(struct tb_port *port, bool enable); + +struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, + struct tb_port *dst, int dst_hopid, + struct tb_port **last, const char *name, + bool alloc_hopid); +struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, + struct tb_port *dst, int dst_hopid, int link_nr, + const char *name); +void tb_path_free(struct tb_path *path); +int tb_path_activate(struct tb_path *path); +void tb_path_deactivate(struct tb_path *path); +bool tb_path_is_invalid(struct tb_path *path); +bool tb_path_port_on_path(const struct tb_path *path, + const struct tb_port *port); + +/** + * tb_path_for_each_hop() - Iterate over each hop on path + * @path: Path whose hops to iterate + * @hop: Hop used as iterator + * + * Iterates over each hop on path. + */ +#define tb_path_for_each_hop(path, hop) \ + for ((hop) = &(path)->hops[0]; \ + (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++) + +int tb_drom_read(struct tb_switch *sw); +int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); + +int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); +int tb_lc_configure_port(struct tb_port *port); +void tb_lc_unconfigure_port(struct tb_port *port); +int tb_lc_configure_xdomain(struct tb_port *port); +void tb_lc_unconfigure_xdomain(struct tb_port *port); +int tb_lc_start_lane_initialization(struct tb_port *port); +bool tb_lc_is_clx_supported(struct tb_port *port); +bool tb_lc_is_usb_plugged(struct tb_port *port); +bool tb_lc_is_xhci_connected(struct tb_port *port); +int tb_lc_xhci_connect(struct tb_port *port); +void tb_lc_xhci_disconnect(struct tb_port *port); +int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); +int tb_lc_set_sleep(struct tb_switch *sw); +bool tb_lc_lane_bonding_possible(struct tb_switch *sw); +bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); +int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); +int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); +int tb_lc_force_power(struct tb_switch *sw); + +static inline int tb_route_length(u64 route) +{ + return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; +} + +/** + * tb_downstream_route() - get route to downstream switch + * + * Port must not be the upstream port (otherwise a loop is created). + * + * Return: Returns a route to the switch behind @port. + */ +static inline u64 tb_downstream_route(struct tb_port *port) +{ + return tb_route(port->sw) + | ((u64) port->port << (port->sw->config.depth * 8)); +} + +bool tb_is_xdomain_enabled(void); +bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, + const void *buf, size_t size); +struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, + u64 route, const uuid_t *local_uuid, + const uuid_t *remote_uuid); +void tb_xdomain_add(struct tb_xdomain *xd); +void tb_xdomain_remove(struct tb_xdomain *xd); +struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, + u8 depth); + +static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) +{ + return tb_to_switch(xd->dev.parent); +} + +/** + * tb_xdomain_downstream_port() - Return downstream facing port of parent router + * @xd: Xdomain pointer + * + * Returns the downstream port the XDomain is connected to. + */ +static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd) +{ + return tb_port_at(xd->route, tb_xdomain_parent(xd)); +} + +int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, + size_t size); +int tb_retimer_scan(struct tb_port *port, bool add); +void tb_retimer_remove_all(struct tb_port *port); + +static inline bool tb_is_retimer(const struct device *dev) +{ + return dev->type == &tb_retimer_type; +} + +static inline struct tb_retimer *tb_to_retimer(struct device *dev) +{ + if (tb_is_retimer(dev)) + return container_of(dev, struct tb_retimer, dev); + return NULL; +} + +/** + * usb4_switch_version() - Returns USB4 version of the router + * @sw: Router to check + * + * Returns major version of USB4 router (%1 for v1, %2 for v2 and so + * on). Can be called to pre-USB4 router too and in that case returns %0. + */ +static inline unsigned int usb4_switch_version(const struct tb_switch *sw) +{ + return FIELD_GET(USB4_VERSION_MAJOR_MASK, sw->config.thunderbolt_version); +} + +/** + * tb_switch_is_usb4() - Is the switch USB4 compliant + * @sw: Switch to check + * + * Returns true if the @sw is USB4 compliant router, false otherwise. + */ +static inline bool tb_switch_is_usb4(const struct tb_switch *sw) +{ + return usb4_switch_version(sw) > 0; +} + +int usb4_switch_setup(struct tb_switch *sw); +int usb4_switch_configuration_valid(struct tb_switch *sw); +int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); +int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); +int usb4_switch_set_sleep(struct tb_switch *sw); +int usb4_switch_nvm_sector_size(struct tb_switch *sw); +int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address); +int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, + const void *buf, size_t size); +int usb4_switch_nvm_authenticate(struct tb_switch *sw); +int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status); +int usb4_switch_credits_init(struct tb_switch *sw); +bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); +int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, + const struct tb_port *port); +struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, + const struct tb_port *port); +int usb4_switch_add_ports(struct tb_switch *sw); +void usb4_switch_remove_ports(struct tb_switch *sw); + +int usb4_port_unlock(struct tb_port *port); +int usb4_port_hotplug_enable(struct tb_port *port); +int usb4_port_configure(struct tb_port *port); +void usb4_port_unconfigure(struct tb_port *port); +int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd); +void usb4_port_unconfigure_xdomain(struct tb_port *port); +int usb4_port_router_offline(struct tb_port *port); +int usb4_port_router_online(struct tb_port *port); +int usb4_port_enumerate_retimers(struct tb_port *port); +bool usb4_port_clx_supported(struct tb_port *port); +int usb4_port_margining_caps(struct tb_port *port, u32 *caps); +int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, + unsigned int ber_level, bool timing, bool right_high, + u32 *results); +int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, + bool right_high, u32 counter); +int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); + +int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); +int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index); +int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, + u8 size); +int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, + const void *buf, u8 size); +int usb4_port_retimer_is_last(struct tb_port *port, u8 index); +int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); +int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, + unsigned int address); +int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, + unsigned int address, const void *buf, + size_t size); +int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index); +int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, + u32 *status); +int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, + unsigned int address, void *buf, size_t size); + +int usb4_usb3_port_max_link_rate(struct tb_port *port); +int usb4_usb3_port_actual_link_rate(struct tb_port *port); +int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw); +int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw); +int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw); + +int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id); +bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port); +bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port); +int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, + bool supported); +int usb4_dp_port_group_id(struct tb_port *port); +int usb4_dp_port_set_group_id(struct tb_port *port, int group_id); +int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes); +int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes); +int usb4_dp_port_granularity(struct tb_port *port); +int usb4_dp_port_set_granularity(struct tb_port *port, int granularity); +int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw); +int usb4_dp_port_allocated_bandwidth(struct tb_port *port); +int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw); +int usb4_dp_port_requested_bandwidth(struct tb_port *port); + +int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable); + +static inline bool tb_is_usb4_port_device(const struct device *dev) +{ + return dev->type == &usb4_port_device_type; +} + +static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev) +{ + if (tb_is_usb4_port_device(dev)) + return container_of(dev, struct usb4_port, dev); + return NULL; +} + +struct usb4_port *usb4_port_device_add(struct tb_port *port); +void usb4_port_device_remove(struct usb4_port *usb4); +int usb4_port_device_resume(struct usb4_port *usb4); + +static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4) +{ + return usb4->offline; +} + +void tb_check_quirks(struct tb_switch *sw); + +#ifdef CONFIG_ACPI +bool tb_acpi_add_links(struct tb_nhi *nhi); + +bool tb_acpi_is_native(void); +bool tb_acpi_may_tunnel_usb3(void); +bool tb_acpi_may_tunnel_dp(void); +bool tb_acpi_may_tunnel_pcie(void); +bool tb_acpi_is_xdomain_allowed(void); + +int tb_acpi_init(void); +void tb_acpi_exit(void); +int tb_acpi_power_on_retimers(struct tb_port *port); +int tb_acpi_power_off_retimers(struct tb_port *port); +#else +static inline bool tb_acpi_add_links(struct tb_nhi *nhi) { return false; } + +static inline bool tb_acpi_is_native(void) { return true; } +static inline bool tb_acpi_may_tunnel_usb3(void) { return true; } +static inline bool tb_acpi_may_tunnel_dp(void) { return true; } +static inline bool tb_acpi_may_tunnel_pcie(void) { return true; } +static inline bool tb_acpi_is_xdomain_allowed(void) { return true; } + +static inline int tb_acpi_init(void) { return 0; } +static inline void tb_acpi_exit(void) { } +static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; } +static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; } +#endif + +#ifdef CONFIG_DEBUG_FS +void tb_debugfs_init(void); +void tb_debugfs_exit(void); +void tb_switch_debugfs_init(struct tb_switch *sw); +void tb_switch_debugfs_remove(struct tb_switch *sw); +void tb_xdomain_debugfs_init(struct tb_xdomain *xd); +void tb_xdomain_debugfs_remove(struct tb_xdomain *xd); +void tb_service_debugfs_init(struct tb_service *svc); +void tb_service_debugfs_remove(struct tb_service *svc); +#else +static inline void tb_debugfs_init(void) { } +static inline void tb_debugfs_exit(void) { } +static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } +static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } +static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { } +static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { } +static inline void tb_service_debugfs_init(struct tb_service *svc) { } +static inline void tb_service_debugfs_remove(struct tb_service *svc) { } +#endif + +#endif diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h new file mode 100644 index 0000000000..cd750e4b34 --- /dev/null +++ b/drivers/thunderbolt/tb_msgs.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt control channel messages + * + * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2017, Intel Corporation + */ + +#ifndef _TB_MSGS +#define _TB_MSGS + +#include <linux/types.h> +#include <linux/uuid.h> + +enum tb_cfg_space { + TB_CFG_HOPS = 0, + TB_CFG_PORT = 1, + TB_CFG_SWITCH = 2, + TB_CFG_COUNTERS = 3, +}; + +enum tb_cfg_error { + TB_CFG_ERROR_PORT_NOT_CONNECTED = 0, + TB_CFG_ERROR_LINK_ERROR = 1, + TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2, + TB_CFG_ERROR_NO_SUCH_PORT = 4, + TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */ + TB_CFG_ERROR_LOOP = 8, + TB_CFG_ERROR_HEC_ERROR_DETECTED = 12, + TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13, + TB_CFG_ERROR_LOCK = 15, + TB_CFG_ERROR_DP_BW = 32, + TB_CFG_ERROR_ROP_CMPLT = 33, + TB_CFG_ERROR_POP_CMPLT = 34, + TB_CFG_ERROR_PCIE_WAKE = 35, + TB_CFG_ERROR_DP_CON_CHANGE = 36, + TB_CFG_ERROR_DPTX_DISCOVERY = 37, + TB_CFG_ERROR_LINK_RECOVERY = 38, + TB_CFG_ERROR_ASYM_LINK = 39, +}; + +/* common header */ +struct tb_cfg_header { + u32 route_hi:22; + u32 unknown:10; /* highest order bit is set on replies */ + u32 route_lo; +} __packed; + +/* additional header for read/write packets */ +struct tb_cfg_address { + u32 offset:13; /* in dwords */ + u32 length:6; /* in dwords */ + u32 port:6; + enum tb_cfg_space space:2; + u32 seq:2; /* sequence number */ + u32 zero:3; +} __packed; + +/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */ +struct cfg_read_pkg { + struct tb_cfg_header header; + struct tb_cfg_address addr; +} __packed; + +/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */ +struct cfg_write_pkg { + struct tb_cfg_header header; + struct tb_cfg_address addr; + u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */ +} __packed; + +/* TB_CFG_PKG_ERROR */ +struct cfg_error_pkg { + struct tb_cfg_header header; + enum tb_cfg_error error:8; + u32 port:6; + u32 reserved:16; + u32 pg:2; +} __packed; + +struct cfg_ack_pkg { + struct tb_cfg_header header; +}; + +#define TB_CFG_ERROR_PG_HOT_PLUG 0x2 +#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3 + +/* TB_CFG_PKG_EVENT */ +struct cfg_event_pkg { + struct tb_cfg_header header; + u32 port:6; + u32 zero:25; + bool unplug:1; +} __packed; + +/* TB_CFG_PKG_RESET */ +struct cfg_reset_pkg { + struct tb_cfg_header header; +} __packed; + +/* TB_CFG_PKG_PREPARE_TO_SLEEP */ +struct cfg_pts_pkg { + struct tb_cfg_header header; + u32 data; +} __packed; + +/* ICM messages */ + +enum icm_pkg_code { + ICM_GET_TOPOLOGY = 0x1, + ICM_DRIVER_READY = 0x3, + ICM_APPROVE_DEVICE = 0x4, + ICM_CHALLENGE_DEVICE = 0x5, + ICM_ADD_DEVICE_KEY = 0x6, + ICM_GET_ROUTE = 0xa, + ICM_APPROVE_XDOMAIN = 0x10, + ICM_DISCONNECT_XDOMAIN = 0x11, + ICM_PREBOOT_ACL = 0x18, + ICM_USB4_SWITCH_OP = 0x20, +}; + +enum icm_event_code { + ICM_EVENT_DEVICE_CONNECTED = 0x3, + ICM_EVENT_DEVICE_DISCONNECTED = 0x4, + ICM_EVENT_XDOMAIN_CONNECTED = 0x6, + ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7, + ICM_EVENT_RTD3_VETO = 0xa, +}; + +struct icm_pkg_header { + u8 code; + u8 flags; + u8 packet_id; + u8 total_packets; +}; + +#define ICM_FLAGS_ERROR BIT(0) +#define ICM_FLAGS_NO_KEY BIT(1) +#define ICM_FLAGS_SLEVEL_SHIFT 3 +#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3) +#define ICM_FLAGS_DUAL_LANE BIT(5) +#define ICM_FLAGS_SPEED_GEN3 BIT(7) +#define ICM_FLAGS_WRITE BIT(7) + +struct icm_pkg_driver_ready { + struct icm_pkg_header hdr; +}; + +/* Falcon Ridge only messages */ + +struct icm_fr_pkg_driver_ready_response { + struct icm_pkg_header hdr; + u8 romver; + u8 ramver; + u16 security_level; +}; + +#define ICM_FR_SLEVEL_MASK 0xf + +/* Falcon Ridge & Alpine Ridge common messages */ + +struct icm_fr_pkg_get_topology { + struct icm_pkg_header hdr; +}; + +#define ICM_GET_TOPOLOGY_PACKETS 14 + +struct icm_fr_pkg_get_topology_response { + struct icm_pkg_header hdr; + u32 route_lo; + u32 route_hi; + u8 first_data; + u8 second_data; + u8 drom_i2c_address_index; + u8 switch_index; + u32 reserved[2]; + u32 ports[16]; + u32 port_hop_info[16]; +}; + +#define ICM_SWITCH_USED BIT(0) +#define ICM_SWITCH_UPSTREAM_PORT_MASK GENMASK(7, 1) +#define ICM_SWITCH_UPSTREAM_PORT_SHIFT 1 + +#define ICM_PORT_TYPE_MASK GENMASK(23, 0) +#define ICM_PORT_INDEX_SHIFT 24 +#define ICM_PORT_INDEX_MASK GENMASK(31, 24) + +struct icm_fr_event_device_connected { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u8 connection_key; + u8 connection_id; + u16 link_info; + u32 ep_name[55]; +}; + +#define ICM_LINK_INFO_LINK_MASK 0x7 +#define ICM_LINK_INFO_DEPTH_SHIFT 4 +#define ICM_LINK_INFO_DEPTH_MASK GENMASK(7, 4) +#define ICM_LINK_INFO_APPROVED BIT(8) +#define ICM_LINK_INFO_REJECTED BIT(9) +#define ICM_LINK_INFO_BOOT BIT(10) + +struct icm_fr_pkg_approve_device { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u8 connection_key; + u8 connection_id; + u16 reserved; +}; + +struct icm_fr_event_device_disconnected { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; +}; + +struct icm_fr_event_xdomain_connected { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; + uuid_t remote_uuid; + uuid_t local_uuid; + u32 local_route_hi; + u32 local_route_lo; + u32 remote_route_hi; + u32 remote_route_lo; +}; + +struct icm_fr_event_xdomain_disconnected { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; + uuid_t remote_uuid; +}; + +struct icm_fr_pkg_add_device_key { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u8 connection_key; + u8 connection_id; + u16 reserved; + u32 key[8]; +}; + +struct icm_fr_pkg_add_device_key_response { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u8 connection_key; + u8 connection_id; + u16 reserved; +}; + +struct icm_fr_pkg_challenge_device { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u8 connection_key; + u8 connection_id; + u16 reserved; + u32 challenge[8]; +}; + +struct icm_fr_pkg_challenge_device_response { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u8 connection_key; + u8 connection_id; + u16 reserved; + u32 challenge[8]; + u32 response[8]; +}; + +struct icm_fr_pkg_approve_xdomain { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; + uuid_t remote_uuid; + u16 transmit_path; + u16 transmit_ring; + u16 receive_path; + u16 receive_ring; +}; + +struct icm_fr_pkg_approve_xdomain_response { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; + uuid_t remote_uuid; + u16 transmit_path; + u16 transmit_ring; + u16 receive_path; + u16 receive_ring; +}; + +/* Alpine Ridge only messages */ + +struct icm_ar_pkg_driver_ready_response { + struct icm_pkg_header hdr; + u8 romver; + u8 ramver; + u16 info; +}; + +#define ICM_AR_FLAGS_RTD3 BIT(6) + +#define ICM_AR_INFO_SLEVEL_MASK GENMASK(3, 0) +#define ICM_AR_INFO_BOOT_ACL_SHIFT 7 +#define ICM_AR_INFO_BOOT_ACL_MASK GENMASK(11, 7) +#define ICM_AR_INFO_BOOT_ACL_SUPPORTED BIT(13) + +struct icm_ar_pkg_get_route { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; +}; + +struct icm_ar_pkg_get_route_response { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; + u32 route_hi; + u32 route_lo; +}; + +struct icm_ar_boot_acl_entry { + u32 uuid_lo; + u32 uuid_hi; +}; + +#define ICM_AR_PREBOOT_ACL_ENTRIES 16 + +struct icm_ar_pkg_preboot_acl { + struct icm_pkg_header hdr; + struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES]; +}; + +struct icm_ar_pkg_preboot_acl_response { + struct icm_pkg_header hdr; + struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES]; +}; + +/* Titan Ridge messages */ + +struct icm_tr_pkg_driver_ready_response { + struct icm_pkg_header hdr; + u16 reserved1; + u16 info; + u32 nvm_version; + u16 device_id; + u16 reserved2; +}; + +#define ICM_TR_FLAGS_RTD3 BIT(6) + +#define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0) +#define ICM_TR_INFO_PROTO_VERSION_MASK GENMASK(6, 4) +#define ICM_TR_INFO_PROTO_VERSION_SHIFT 4 +#define ICM_TR_INFO_BOOT_ACL_SHIFT 7 +#define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7) + +struct icm_tr_event_device_connected { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u32 route_hi; + u32 route_lo; + u8 connection_id; + u8 reserved; + u16 link_info; + u32 ep_name[55]; +}; + +struct icm_tr_event_device_disconnected { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; +}; + +struct icm_tr_event_xdomain_connected { + struct icm_pkg_header hdr; + u16 reserved; + u16 link_info; + uuid_t remote_uuid; + uuid_t local_uuid; + u32 local_route_hi; + u32 local_route_lo; + u32 remote_route_hi; + u32 remote_route_lo; +}; + +struct icm_tr_event_xdomain_disconnected { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + uuid_t remote_uuid; +}; + +struct icm_tr_pkg_approve_device { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u32 route_hi; + u32 route_lo; + u8 connection_id; + u8 reserved1[3]; +}; + +struct icm_tr_pkg_add_device_key { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u32 route_hi; + u32 route_lo; + u8 connection_id; + u8 reserved[3]; + u32 key[8]; +}; + +struct icm_tr_pkg_challenge_device { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u32 route_hi; + u32 route_lo; + u8 connection_id; + u8 reserved[3]; + u32 challenge[8]; +}; + +struct icm_tr_pkg_approve_xdomain { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + uuid_t remote_uuid; + u16 transmit_path; + u16 transmit_ring; + u16 receive_path; + u16 receive_ring; +}; + +struct icm_tr_pkg_disconnect_xdomain { + struct icm_pkg_header hdr; + u8 stage; + u8 reserved[3]; + u32 route_hi; + u32 route_lo; + uuid_t remote_uuid; +}; + +struct icm_tr_pkg_challenge_device_response { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u32 route_hi; + u32 route_lo; + u8 connection_id; + u8 reserved[3]; + u32 challenge[8]; + u32 response[8]; +}; + +struct icm_tr_pkg_add_device_key_response { + struct icm_pkg_header hdr; + uuid_t ep_uuid; + u32 route_hi; + u32 route_lo; + u8 connection_id; + u8 reserved[3]; +}; + +struct icm_tr_pkg_approve_xdomain_response { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + uuid_t remote_uuid; + u16 transmit_path; + u16 transmit_ring; + u16 receive_path; + u16 receive_ring; +}; + +struct icm_tr_pkg_disconnect_xdomain_response { + struct icm_pkg_header hdr; + u8 stage; + u8 reserved[3]; + u32 route_hi; + u32 route_lo; + uuid_t remote_uuid; +}; + +/* Ice Lake messages */ + +struct icm_icl_event_rtd3_veto { + struct icm_pkg_header hdr; + u32 veto_reason; +}; + +/* USB4 ICM messages */ + +struct icm_usb4_switch_op { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + u32 metadata; + u16 opcode; + u16 data_len_valid; + u32 data[16]; +}; + +#define ICM_USB4_SWITCH_DATA_LEN_MASK GENMASK(3, 0) +#define ICM_USB4_SWITCH_DATA_VALID BIT(4) + +struct icm_usb4_switch_op_response { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + u32 metadata; + u16 opcode; + u16 status; + u32 data[16]; +}; + +/* XDomain messages */ + +struct tb_xdomain_header { + u32 route_hi; + u32 route_lo; + u32 length_sn; +}; + +#define TB_XDOMAIN_LENGTH_MASK GENMASK(5, 0) +#define TB_XDOMAIN_SN_MASK GENMASK(28, 27) +#define TB_XDOMAIN_SN_SHIFT 27 + +enum tb_xdp_type { + UUID_REQUEST_OLD = 1, + UUID_RESPONSE = 2, + PROPERTIES_REQUEST, + PROPERTIES_RESPONSE, + PROPERTIES_CHANGED_REQUEST, + PROPERTIES_CHANGED_RESPONSE, + ERROR_RESPONSE, + UUID_REQUEST = 12, + LINK_STATE_STATUS_REQUEST = 15, + LINK_STATE_STATUS_RESPONSE, + LINK_STATE_CHANGE_REQUEST, + LINK_STATE_CHANGE_RESPONSE, +}; + +struct tb_xdp_header { + struct tb_xdomain_header xd_hdr; + uuid_t uuid; + u32 type; +}; + +struct tb_xdp_error_response { + struct tb_xdp_header hdr; + u32 error; +}; + +struct tb_xdp_link_state_status { + struct tb_xdp_header hdr; +}; + +struct tb_xdp_link_state_status_response { + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + u32 status; + u8 slw; + u8 tlw; + u8 sls; + u8 tls; + }; + }; +}; + +struct tb_xdp_link_state_change { + struct tb_xdp_header hdr; + u8 tlw; + u8 tls; + u16 reserved; +}; + +struct tb_xdp_link_state_change_response { + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + u32 status; + }; + }; +}; + +struct tb_xdp_uuid { + struct tb_xdp_header hdr; +}; + +struct tb_xdp_uuid_response { + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + uuid_t src_uuid; + u32 src_route_hi; + u32 src_route_lo; + }; + }; +}; + +struct tb_xdp_properties { + struct tb_xdp_header hdr; + uuid_t src_uuid; + uuid_t dst_uuid; + u16 offset; + u16 reserved; +}; + +struct tb_xdp_properties_response { + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + uuid_t src_uuid; + uuid_t dst_uuid; + u16 offset; + u16 data_length; + u32 generation; + u32 data[]; + }; + }; +}; + +/* + * Max length of data array single XDomain property response is allowed + * to carry. + */ +#define TB_XDP_PROPERTIES_MAX_DATA_LENGTH \ + (((256 - 4 - sizeof(struct tb_xdp_properties_response))) / 4) + +/* Maximum size of the total property block in dwords we allow */ +#define TB_XDP_PROPERTIES_MAX_LENGTH 500 + +struct tb_xdp_properties_changed { + struct tb_xdp_header hdr; + uuid_t src_uuid; +}; + +struct tb_xdp_properties_changed_response { + union { + struct tb_xdp_error_response err; + struct tb_xdp_header hdr; + }; +}; + +enum tb_xdp_error { + ERROR_SUCCESS, + ERROR_UNKNOWN_PACKET, + ERROR_UNKNOWN_DOMAIN, + ERROR_NOT_SUPPORTED, + ERROR_NOT_READY, +}; + +#endif diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h new file mode 100644 index 0000000000..cf9f237087 --- /dev/null +++ b/drivers/thunderbolt/tb_regs.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt driver - Port/Switch config area registers + * + * Every thunderbolt device consists (logically) of a switch with multiple + * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH, + * COUNTERS) which are used to configure the device. + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2018, Intel Corporation + */ + +#ifndef _TB_REGS +#define _TB_REGS + +#include <linux/types.h> + + +#define TB_ROUTE_SHIFT 8 /* number of bits in a port entry of a route */ + + +/* + * TODO: should be 63? But we do not know how to receive frames larger than 256 + * bytes at the frame level. (header + checksum = 16, 60*4 = 240) + */ +#define TB_MAX_CONFIG_RW_LENGTH 60 + +enum tb_switch_cap { + TB_SWITCH_CAP_TMU = 0x03, + TB_SWITCH_CAP_VSE = 0x05, +}; + +enum tb_switch_vse_cap { + TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */ + TB_VSE_CAP_TIME2 = 0x03, + TB_VSE_CAP_CP_LP = 0x04, + TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */ +}; + +enum tb_port_cap { + TB_PORT_CAP_PHY = 0x01, + TB_PORT_CAP_POWER = 0x02, + TB_PORT_CAP_TIME1 = 0x03, + TB_PORT_CAP_ADAP = 0x04, + TB_PORT_CAP_VSE = 0x05, + TB_PORT_CAP_USB4 = 0x06, +}; + +enum tb_port_state { + TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */ + TB_PORT_CONNECTING = 1, /* retry */ + TB_PORT_UP = 2, + TB_PORT_TX_CL0S = 3, + TB_PORT_RX_CL0S = 4, + TB_PORT_CL1 = 5, + TB_PORT_CL2 = 6, + TB_PORT_UNPLUGGED = 7, +}; + +/* capability headers */ + +struct tb_cap_basic { + u8 next; + /* enum tb_cap cap:8; prevent "narrower than values of its type" */ + u8 cap; /* if cap == 0x05 then we have a extended capability */ +} __packed; + +/** + * struct tb_cap_extended_short - Switch extended short capability + * @next: Pointer to the next capability. If @next and @length are zero + * then we have a long cap. + * @cap: Base capability ID (see &enum tb_switch_cap) + * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap) + * @length: Length of this capability + */ +struct tb_cap_extended_short { + u8 next; + u8 cap; + u8 vsec_id; + u8 length; +} __packed; + +/** + * struct tb_cap_extended_long - Switch extended long capability + * @zero1: This field should be zero + * @cap: Base capability ID (see &enum tb_switch_cap) + * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap) + * @zero2: This field should be zero + * @next: Pointer to the next capability + * @length: Length of this capability + */ +struct tb_cap_extended_long { + u8 zero1; + u8 cap; + u8 vsec_id; + u8 zero2; + u16 next; + u16 length; +} __packed; + +/** + * struct tb_cap_any - Structure capable of hold every capability + * @basic: Basic capability + * @extended_short: Vendor specific capability + * @extended_long: Vendor specific extended capability + */ +struct tb_cap_any { + union { + struct tb_cap_basic basic; + struct tb_cap_extended_short extended_short; + struct tb_cap_extended_long extended_long; + }; +} __packed; + +/* capabilities */ + +struct tb_cap_link_controller { + struct tb_cap_extended_long cap_header; + u32 count:4; /* number of link controllers */ + u32 unknown1:4; + u32 base_offset:8; /* + * offset (into this capability) of the configuration + * area of the first link controller + */ + u32 length:12; /* link controller configuration area length */ + u32 unknown2:4; /* TODO check that length is correct */ +} __packed; + +struct tb_cap_phy { + struct tb_cap_basic cap_header; + u32 unknown1:16; + u32 unknown2:14; + bool disable:1; + u32 unknown3:11; + enum tb_port_state state:4; + u32 unknown4:2; +} __packed; + +struct tb_eeprom_ctl { + bool fl_sk:1; /* send pulse to transfer one bit */ + bool fl_cs:1; /* set to 0 before access */ + bool fl_di:1; /* to eeprom */ + bool fl_do:1; /* from eeprom */ + bool bit_banging_enable:1; /* set to 1 before access */ + bool not_present:1; /* should be 0 */ + bool unknown1:1; + bool present:1; /* should be 1 */ + u32 unknown2:24; +} __packed; + +struct tb_cap_plug_events { + struct tb_cap_extended_short cap_header; + u32 __unknown1:2; /* VSC_CS_1 */ + u32 plug_events:5; /* VSC_CS_1 */ + u32 __unknown2:25; /* VSC_CS_1 */ + u32 vsc_cs_2; + u32 vsc_cs_3; + struct tb_eeprom_ctl eeprom_ctl; + u32 __unknown5[7]; /* VSC_CS_5 -> VSC_CS_11 */ + u32 drom_offset; /* VSC_CS_12: 32 bit register, but eeprom addresses are 16 bit */ +} __packed; + +/* device headers */ + +/* Present on port 0 in TB_CFG_SWITCH at address zero. */ +struct tb_regs_switch_header { + /* DWORD 0 */ + u16 vendor_id; + u16 device_id; + /* DWORD 1 */ + u32 first_cap_offset:8; + u32 upstream_port_number:6; + u32 max_port_number:6; + u32 depth:3; + u32 __unknown1:1; + u32 revision:8; + /* DWORD 2 */ + u32 route_lo; + /* DWORD 3 */ + u32 route_hi:31; + bool enabled:1; + /* DWORD 4 */ + u32 plug_events_delay:8; /* + * RW, pause between plug events in + * milliseconds. Writing 0x00 is interpreted + * as 255ms. + */ + u32 cmuv:8; + u32 __unknown4:8; + u32 thunderbolt_version:8; +} __packed; + +/* Used with the router thunderbolt_version */ +#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5) + +#define ROUTER_CS_1 0x01 +#define ROUTER_CS_4 0x04 +/* Used with the router cmuv field */ +#define ROUTER_CS_4_CMUV_V1 0x10 +#define ROUTER_CS_4_CMUV_V2 0x20 +#define ROUTER_CS_5 0x05 +#define ROUTER_CS_5_SLP BIT(0) +#define ROUTER_CS_5_WOP BIT(1) +#define ROUTER_CS_5_WOU BIT(2) +#define ROUTER_CS_5_WOD BIT(3) +#define ROUTER_CS_5_C3S BIT(23) +#define ROUTER_CS_5_PTO BIT(24) +#define ROUTER_CS_5_UTO BIT(25) +#define ROUTER_CS_5_HCO BIT(26) +#define ROUTER_CS_5_CV BIT(31) +#define ROUTER_CS_6 0x06 +#define ROUTER_CS_6_SLPR BIT(0) +#define ROUTER_CS_6_TNS BIT(1) +#define ROUTER_CS_6_WOPS BIT(2) +#define ROUTER_CS_6_WOUS BIT(3) +#define ROUTER_CS_6_HCI BIT(18) +#define ROUTER_CS_6_CR BIT(25) +#define ROUTER_CS_7 0x07 +#define ROUTER_CS_9 0x09 +#define ROUTER_CS_25 0x19 +#define ROUTER_CS_26 0x1a +#define ROUTER_CS_26_OPCODE_MASK GENMASK(15, 0) +#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24) +#define ROUTER_CS_26_STATUS_SHIFT 24 +#define ROUTER_CS_26_ONS BIT(30) +#define ROUTER_CS_26_OV BIT(31) + +/* USB4 router operations opcodes */ +enum usb4_switch_op { + USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, + USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, + USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, + USB4_SWITCH_OP_NVM_WRITE = 0x20, + USB4_SWITCH_OP_NVM_AUTH = 0x21, + USB4_SWITCH_OP_NVM_READ = 0x22, + USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, + USB4_SWITCH_OP_DROM_READ = 0x24, + USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, + USB4_SWITCH_OP_BUFFER_ALLOC = 0x33, +}; + +/* Router TMU configuration */ +#define TMU_RTR_CS_0 0x00 +#define TMU_RTR_CS_0_FREQ_WIND_MASK GENMASK(26, 16) +#define TMU_RTR_CS_0_TD BIT(27) +#define TMU_RTR_CS_0_UCAP BIT(30) +#define TMU_RTR_CS_1 0x01 +#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16) +#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16 +#define TMU_RTR_CS_2 0x02 +#define TMU_RTR_CS_3 0x03 +#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0) +#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16) +#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16 +#define TMU_RTR_CS_15 0x0f +#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0) +#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6) +#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12) +#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18) +#define TMU_RTR_CS_18 0x12 +#define TMU_RTR_CS_18_DELTA_AVG_CONST_MASK GENMASK(23, 16) +#define TMU_RTR_CS_22 0x16 +#define TMU_RTR_CS_24 0x18 +#define TMU_RTR_CS_25 0x19 + +enum tb_port_type { + TB_TYPE_INACTIVE = 0x000000, + TB_TYPE_PORT = 0x000001, + TB_TYPE_NHI = 0x000002, + /* TB_TYPE_ETHERNET = 0x020000, lower order bits are not known */ + /* TB_TYPE_SATA = 0x080000, lower order bits are not known */ + TB_TYPE_DP_HDMI_IN = 0x0e0101, + TB_TYPE_DP_HDMI_OUT = 0x0e0102, + TB_TYPE_PCIE_DOWN = 0x100101, + TB_TYPE_PCIE_UP = 0x100102, + TB_TYPE_USB3_DOWN = 0x200101, + TB_TYPE_USB3_UP = 0x200102, +}; + +/* Present on every port in TB_CF_PORT at address zero. */ +struct tb_regs_port_header { + /* DWORD 0 */ + u16 vendor_id; + u16 device_id; + /* DWORD 1 */ + u32 first_cap_offset:8; + u32 max_counters:11; + u32 counters_support:1; + u32 __unknown1:4; + u32 revision:8; + /* DWORD 2 */ + enum tb_port_type type:24; + u32 thunderbolt_version:8; + /* DWORD 3 */ + u32 __unknown2:20; + u32 port_number:6; + u32 __unknown3:6; + /* DWORD 4 */ + u32 nfc_credits; + /* DWORD 5 */ + u32 max_in_hop_id:11; + u32 max_out_hop_id:11; + u32 __unknown4:10; + /* DWORD 6 */ + u32 __unknown5; + /* DWORD 7 */ + u32 __unknown6; + +} __packed; + +/* Basic adapter configuration registers */ +#define ADP_CS_4 0x04 +#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0) +#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20) +#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20 +#define ADP_CS_4_LCK BIT(31) +#define ADP_CS_5 0x05 +#define ADP_CS_5_LCA_MASK GENMASK(28, 22) +#define ADP_CS_5_LCA_SHIFT 22 +#define ADP_CS_5_DHP BIT(31) + +/* TMU adapter registers */ +#define TMU_ADP_CS_3 0x03 +#define TMU_ADP_CS_3_UDM BIT(29) +#define TMU_ADP_CS_6 0x06 +#define TMU_ADP_CS_6_DTS BIT(1) +#define TMU_ADP_CS_8 0x08 +#define TMU_ADP_CS_8_REPL_TIMEOUT_MASK GENMASK(14, 0) +#define TMU_ADP_CS_8_EUDM BIT(15) +#define TMU_ADP_CS_8_REPL_THRESHOLD_MASK GENMASK(25, 16) +#define TMU_ADP_CS_9 0x09 +#define TMU_ADP_CS_9_REPL_N_MASK GENMASK(7, 0) +#define TMU_ADP_CS_9_DIRSWITCH_N_MASK GENMASK(15, 8) +#define TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK GENMASK(31, 16) + +/* Lane adapter registers */ +#define LANE_ADP_CS_0 0x00 +#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK GENMASK(19, 16) +#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT 16 +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20 +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2 +#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26) +#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27) +#define LANE_ADP_CS_0_CL2_SUPPORT BIT(28) +#define LANE_ADP_CS_1 0x01 +#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0) +#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc +#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) +#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 +#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 +#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10) +#define LANE_ADP_CS_1_CL1_ENABLE BIT(11) +#define LANE_ADP_CS_1_CL2_ENABLE BIT(12) +#define LANE_ADP_CS_1_LD BIT(14) +#define LANE_ADP_CS_1_LB BIT(15) +#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) +#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN4 0x2 +#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) +#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 +#define LANE_ADP_CS_1_PMS BIT(30) + +/* USB4 port registers */ +#define PORT_CS_1 0x01 +#define PORT_CS_1_LENGTH_SHIFT 8 +#define PORT_CS_1_TARGET_MASK GENMASK(18, 16) +#define PORT_CS_1_TARGET_SHIFT 16 +#define PORT_CS_1_RETIMER_INDEX_SHIFT 20 +#define PORT_CS_1_WNR_WRITE BIT(24) +#define PORT_CS_1_NR BIT(25) +#define PORT_CS_1_RC BIT(26) +#define PORT_CS_1_PND BIT(31) +#define PORT_CS_2 0x02 +#define PORT_CS_18 0x12 +#define PORT_CS_18_BE BIT(8) +#define PORT_CS_18_TCM BIT(9) +#define PORT_CS_18_CPS BIT(10) +#define PORT_CS_18_WOCS BIT(16) +#define PORT_CS_18_WODS BIT(17) +#define PORT_CS_18_WOU4S BIT(18) +#define PORT_CS_19 0x13 +#define PORT_CS_19_PC BIT(3) +#define PORT_CS_19_PID BIT(4) +#define PORT_CS_19_WOC BIT(16) +#define PORT_CS_19_WOD BIT(17) +#define PORT_CS_19_WOU4 BIT(18) + +/* Display Port adapter registers */ +#define ADP_DP_CS_0 0x00 +#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16) +#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16 +#define ADP_DP_CS_0_AE BIT(30) +#define ADP_DP_CS_0_VE BIT(31) +#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0) +#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11) +#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11 +#define ADP_DP_CS_2 0x02 +#define ADP_DP_CS_2_NRD_MLC_MASK GENMASK(2, 0) +#define ADP_DP_CS_2_HDP BIT(6) +#define ADP_DP_CS_2_NRD_MLR_MASK GENMASK(9, 7) +#define ADP_DP_CS_2_NRD_MLR_SHIFT 7 +#define ADP_DP_CS_2_CA BIT(10) +#define ADP_DP_CS_2_GR_MASK GENMASK(12, 11) +#define ADP_DP_CS_2_GR_SHIFT 11 +#define ADP_DP_CS_2_GR_0_25G 0x0 +#define ADP_DP_CS_2_GR_0_5G 0x1 +#define ADP_DP_CS_2_GR_1G 0x2 +#define ADP_DP_CS_2_GROUP_ID_MASK GENMASK(15, 13) +#define ADP_DP_CS_2_GROUP_ID_SHIFT 13 +#define ADP_DP_CS_2_CM_ID_MASK GENMASK(19, 16) +#define ADP_DP_CS_2_CM_ID_SHIFT 16 +#define ADP_DP_CS_2_CMMS BIT(20) +#define ADP_DP_CS_2_ESTIMATED_BW_MASK GENMASK(31, 24) +#define ADP_DP_CS_2_ESTIMATED_BW_SHIFT 24 +#define ADP_DP_CS_3 0x03 +#define ADP_DP_CS_3_HDPC BIT(9) +#define DP_LOCAL_CAP 0x04 +#define DP_REMOTE_CAP 0x05 +/* For DP IN adapter */ +#define DP_STATUS 0x06 +#define DP_STATUS_ALLOCATED_BW_MASK GENMASK(31, 24) +#define DP_STATUS_ALLOCATED_BW_SHIFT 24 +/* For DP OUT adapter */ +#define DP_STATUS_CTRL 0x06 +#define DP_STATUS_CTRL_CMHS BIT(25) +#define DP_STATUS_CTRL_UF BIT(26) +#define DP_COMMON_CAP 0x07 +/* Only if DP IN supports BW allocation mode */ +#define ADP_DP_CS_8 0x08 +#define ADP_DP_CS_8_REQUESTED_BW_MASK GENMASK(7, 0) +#define ADP_DP_CS_8_DPME BIT(30) +#define ADP_DP_CS_8_DR BIT(31) + +/* + * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP + * with exception of DPRX done. + */ +#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8) +#define DP_COMMON_CAP_RATE_SHIFT 8 +#define DP_COMMON_CAP_RATE_RBR 0x0 +#define DP_COMMON_CAP_RATE_HBR 0x1 +#define DP_COMMON_CAP_RATE_HBR2 0x2 +#define DP_COMMON_CAP_RATE_HBR3 0x3 +#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12) +#define DP_COMMON_CAP_LANES_SHIFT 12 +#define DP_COMMON_CAP_1_LANE 0x0 +#define DP_COMMON_CAP_2_LANES 0x1 +#define DP_COMMON_CAP_4_LANES 0x2 +#define DP_COMMON_CAP_UHBR10 BIT(17) +#define DP_COMMON_CAP_UHBR20 BIT(18) +#define DP_COMMON_CAP_UHBR13_5 BIT(19) +#define DP_COMMON_CAP_LTTPR_NS BIT(27) +#define DP_COMMON_CAP_BW_MODE BIT(28) +#define DP_COMMON_CAP_DPRX_DONE BIT(31) +/* Only present if DP IN supports BW allocation mode */ +#define ADP_DP_CS_8 0x08 +#define ADP_DP_CS_8_DPME BIT(30) +#define ADP_DP_CS_8_DR BIT(31) + +/* PCIe adapter registers */ +#define ADP_PCIE_CS_0 0x00 +#define ADP_PCIE_CS_0_PE BIT(31) +#define ADP_PCIE_CS_1 0x01 +#define ADP_PCIE_CS_1_EE BIT(0) + +/* USB adapter registers */ +#define ADP_USB3_CS_0 0x00 +#define ADP_USB3_CS_0_V BIT(30) +#define ADP_USB3_CS_0_PE BIT(31) +#define ADP_USB3_CS_1 0x01 +#define ADP_USB3_CS_1_CUBW_MASK GENMASK(11, 0) +#define ADP_USB3_CS_1_CDBW_MASK GENMASK(23, 12) +#define ADP_USB3_CS_1_CDBW_SHIFT 12 +#define ADP_USB3_CS_1_HCA BIT(31) +#define ADP_USB3_CS_2 0x02 +#define ADP_USB3_CS_2_AUBW_MASK GENMASK(11, 0) +#define ADP_USB3_CS_2_ADBW_MASK GENMASK(23, 12) +#define ADP_USB3_CS_2_ADBW_SHIFT 12 +#define ADP_USB3_CS_2_CMR BIT(31) +#define ADP_USB3_CS_3 0x03 +#define ADP_USB3_CS_3_SCALE_MASK GENMASK(5, 0) +#define ADP_USB3_CS_4 0x04 +#define ADP_USB3_CS_4_ALR_MASK GENMASK(6, 0) +#define ADP_USB3_CS_4_ALR_20G 0x1 +#define ADP_USB3_CS_4_ULV BIT(7) +#define ADP_USB3_CS_4_MSLR_MASK GENMASK(18, 12) +#define ADP_USB3_CS_4_MSLR_SHIFT 12 +#define ADP_USB3_CS_4_MSLR_20G 0x1 + +/* Hop register from TB_CFG_HOPS. 8 byte per entry. */ +struct tb_regs_hop { + /* DWORD 0 */ + u32 next_hop:11; /* + * hop to take after sending the packet through + * out_port (on the incoming port of the next switch) + */ + u32 out_port:6; /* next port of the path (on the same switch) */ + u32 initial_credits:8; + u32 unknown1:6; /* set to zero */ + bool enable:1; + + /* DWORD 1 */ + u32 weight:4; + u32 unknown2:4; /* set to zero */ + u32 priority:3; + bool drop_packages:1; + u32 counter:11; /* index into TB_CFG_COUNTERS on this port */ + bool counter_enable:1; + bool ingress_fc:1; + bool egress_fc:1; + bool ingress_shared_buffer:1; + bool egress_shared_buffer:1; + bool pending:1; + u32 unknown3:3; /* set to zero */ +} __packed; + +/* TMU Thunderbolt 3 registers */ +#define TB_TIME_VSEC_3_CS_9 0x9 +#define TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK GENMASK(17, 16) +#define TB_TIME_VSEC_3_CS_26 0x1a +#define TB_TIME_VSEC_3_CS_26_TD BIT(22) + +/* + * Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6 + * (see above) as in USB4 spec, but these specific bits used for Titan Ridge + * only and reserved in USB4 spec. + */ +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2 BIT(3) + +/* Plug Events registers */ +#define TB_PLUG_EVENTS_USB_DISABLE BIT(2) +#define TB_PLUG_EVENTS_CS_1_LANE_DISABLE BIT(3) +#define TB_PLUG_EVENTS_CS_1_DPOUT_DISABLE BIT(4) +#define TB_PLUG_EVENTS_CS_1_LOW_DPIN_DISABLE BIT(5) +#define TB_PLUG_EVENTS_CS_1_HIGH_DPIN_DISABLE BIT(6) + +#define TB_PLUG_EVENTS_PCIE_WR_DATA 0x1b +#define TB_PLUG_EVENTS_PCIE_CMD 0x1c +#define TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK GENMASK(9, 0) +#define TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT 10 +#define TB_PLUG_EVENTS_PCIE_CMD_BR_MASK GENMASK(17, 10) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK BIT(21) +#define TB_PLUG_EVENTS_PCIE_CMD_WR 0x1 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT 22 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_MASK GENMASK(24, 22) +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 0x2 +#define TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK BIT(30) +#define TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK BIT(31) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_DATA 0x1d + +/* CP Low Power registers */ +#define TB_LOW_PWR_C1_CL1 0x1 +#define TB_LOW_PWR_C1_CL1_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_CL2_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_PORT_A_MASK GENMASK(2, 1) +#define TB_LOW_PWR_C0_PORT_B_MASK GENMASK(4, 3) +#define TB_LOW_PWR_C3_CL1 0x3 + +/* Common link controller registers */ +#define TB_LC_DESC 0x02 +#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) +#define TB_LC_DESC_SIZE_SHIFT 8 +#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) +#define TB_LC_DESC_PORT_SIZE_SHIFT 16 +#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) +#define TB_LC_FUSE 0x03 +#define TB_LC_SNK_ALLOCATION 0x10 +#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) +#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 +#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 +#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) +#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 +#define TB_LC_POWER 0x740 + +/* Link controller registers */ +#define TB_LC_CS_42 0x2a +#define TB_LC_CS_42_USB_PLUGGED BIT(31) + +#define TB_LC_PORT_ATTR 0x8d +#define TB_LC_PORT_ATTR_BE BIT(12) + +#define TB_LC_SX_CTRL 0x96 +#define TB_LC_SX_CTRL_WOC BIT(1) +#define TB_LC_SX_CTRL_WOD BIT(2) +#define TB_LC_SX_CTRL_WODPC BIT(3) +#define TB_LC_SX_CTRL_WODPD BIT(4) +#define TB_LC_SX_CTRL_WOU4 BIT(5) +#define TB_LC_SX_CTRL_WOP BIT(6) +#define TB_LC_SX_CTRL_L1C BIT(16) +#define TB_LC_SX_CTRL_L1D BIT(17) +#define TB_LC_SX_CTRL_L2C BIT(20) +#define TB_LC_SX_CTRL_L2D BIT(21) +#define TB_LC_SX_CTRL_SLI BIT(29) +#define TB_LC_SX_CTRL_UPSTREAM BIT(30) +#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_LINK_ATTR 0x97 +#define TB_LC_LINK_ATTR_CPS BIT(18) + +#define TB_LC_LINK_REQ 0xad +#define TB_LC_LINK_REQ_XHCI_CONNECT BIT(31) + +#endif diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c new file mode 100644 index 0000000000..9475c6698c --- /dev/null +++ b/drivers/thunderbolt/test.c @@ -0,0 +1,2903 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit tests + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <kunit/test.h> +#include <linux/idr.h> + +#include "tb.h" +#include "tunnel.h" + +static int __ida_init(struct kunit_resource *res, void *context) +{ + struct ida *ida = context; + + ida_init(ida); + res->data = ida; + return 0; +} + +static void __ida_destroy(struct kunit_resource *res) +{ + struct ida *ida = res->data; + + ida_destroy(ida); +} + +static void kunit_ida_init(struct kunit *test, struct ida *ida) +{ + kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida); +} + +static struct tb_switch *alloc_switch(struct kunit *test, u64 route, + u8 upstream_port, u8 max_port_number) +{ + struct tb_switch *sw; + size_t size; + int i; + + sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL); + if (!sw) + return NULL; + + sw->config.upstream_port_number = upstream_port; + sw->config.depth = tb_route_length(route); + sw->config.route_hi = upper_32_bits(route); + sw->config.route_lo = lower_32_bits(route); + sw->config.enabled = 0; + sw->config.max_port_number = max_port_number; + + size = (sw->config.max_port_number + 1) * sizeof(*sw->ports); + sw->ports = kunit_kzalloc(test, size, GFP_KERNEL); + if (!sw->ports) + return NULL; + + for (i = 0; i <= sw->config.max_port_number; i++) { + sw->ports[i].sw = sw; + sw->ports[i].port = i; + sw->ports[i].config.port_number = i; + if (i) { + kunit_ida_init(test, &sw->ports[i].in_hopids); + kunit_ida_init(test, &sw->ports[i].out_hopids); + } + } + + return sw; +} + +static struct tb_switch *alloc_host(struct kunit *test) +{ + struct tb_switch *sw; + + sw = alloc_switch(test, 0, 7, 13); + if (!sw) + return NULL; + + sw->config.vendor_id = 0x8086; + sw->config.device_id = 0x9a1b; + + sw->ports[0].config.type = TB_TYPE_PORT; + sw->ports[0].config.max_in_hop_id = 7; + sw->ports[0].config.max_out_hop_id = 7; + + sw->ports[1].config.type = TB_TYPE_PORT; + sw->ports[1].config.max_in_hop_id = 19; + sw->ports[1].config.max_out_hop_id = 19; + sw->ports[1].total_credits = 60; + sw->ports[1].ctl_credits = 2; + sw->ports[1].dual_link_port = &sw->ports[2]; + + sw->ports[2].config.type = TB_TYPE_PORT; + sw->ports[2].config.max_in_hop_id = 19; + sw->ports[2].config.max_out_hop_id = 19; + sw->ports[2].total_credits = 60; + sw->ports[2].ctl_credits = 2; + sw->ports[2].dual_link_port = &sw->ports[1]; + sw->ports[2].link_nr = 1; + + sw->ports[3].config.type = TB_TYPE_PORT; + sw->ports[3].config.max_in_hop_id = 19; + sw->ports[3].config.max_out_hop_id = 19; + sw->ports[3].total_credits = 60; + sw->ports[3].ctl_credits = 2; + sw->ports[3].dual_link_port = &sw->ports[4]; + + sw->ports[4].config.type = TB_TYPE_PORT; + sw->ports[4].config.max_in_hop_id = 19; + sw->ports[4].config.max_out_hop_id = 19; + sw->ports[4].total_credits = 60; + sw->ports[4].ctl_credits = 2; + sw->ports[4].dual_link_port = &sw->ports[3]; + sw->ports[4].link_nr = 1; + + sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[5].config.max_in_hop_id = 9; + sw->ports[5].config.max_out_hop_id = 9; + sw->ports[5].cap_adap = -1; + + sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[6].config.max_in_hop_id = 9; + sw->ports[6].config.max_out_hop_id = 9; + sw->ports[6].cap_adap = -1; + + sw->ports[7].config.type = TB_TYPE_NHI; + sw->ports[7].config.max_in_hop_id = 11; + sw->ports[7].config.max_out_hop_id = 11; + sw->ports[7].config.nfc_credits = 0x41800000; + + sw->ports[8].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[8].config.max_in_hop_id = 8; + sw->ports[8].config.max_out_hop_id = 8; + + sw->ports[9].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[9].config.max_in_hop_id = 8; + sw->ports[9].config.max_out_hop_id = 8; + + sw->ports[10].disabled = true; + sw->ports[11].disabled = true; + + sw->ports[12].config.type = TB_TYPE_USB3_DOWN; + sw->ports[12].config.max_in_hop_id = 8; + sw->ports[12].config.max_out_hop_id = 8; + + sw->ports[13].config.type = TB_TYPE_USB3_DOWN; + sw->ports[13].config.max_in_hop_id = 8; + sw->ports[13].config.max_out_hop_id = 8; + + return sw; +} + +static struct tb_switch *alloc_host_usb4(struct kunit *test) +{ + struct tb_switch *sw; + + sw = alloc_host(test); + if (!sw) + return NULL; + + sw->generation = 4; + sw->credit_allocation = true; + sw->max_usb3_credits = 32; + sw->min_dp_aux_credits = 1; + sw->min_dp_main_credits = 0; + sw->max_pcie_credits = 64; + sw->max_dma_credits = 14; + + return sw; +} + +static struct tb_switch *alloc_host_br(struct kunit *test) +{ + struct tb_switch *sw; + + sw = alloc_host_usb4(test); + if (!sw) + return NULL; + + sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[10].config.max_in_hop_id = 9; + sw->ports[10].config.max_out_hop_id = 9; + sw->ports[10].cap_adap = -1; + sw->ports[10].disabled = false; + + return sw; +} + +static struct tb_switch *alloc_dev_default(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_port *port, *upstream_port; + struct tb_switch *sw; + + sw = alloc_switch(test, route, 1, 19); + if (!sw) + return NULL; + + sw->config.vendor_id = 0x8086; + sw->config.device_id = 0x15ef; + + sw->ports[0].config.type = TB_TYPE_PORT; + sw->ports[0].config.max_in_hop_id = 8; + sw->ports[0].config.max_out_hop_id = 8; + + sw->ports[1].config.type = TB_TYPE_PORT; + sw->ports[1].config.max_in_hop_id = 19; + sw->ports[1].config.max_out_hop_id = 19; + sw->ports[1].total_credits = 60; + sw->ports[1].ctl_credits = 2; + sw->ports[1].dual_link_port = &sw->ports[2]; + + sw->ports[2].config.type = TB_TYPE_PORT; + sw->ports[2].config.max_in_hop_id = 19; + sw->ports[2].config.max_out_hop_id = 19; + sw->ports[2].total_credits = 60; + sw->ports[2].ctl_credits = 2; + sw->ports[2].dual_link_port = &sw->ports[1]; + sw->ports[2].link_nr = 1; + + sw->ports[3].config.type = TB_TYPE_PORT; + sw->ports[3].config.max_in_hop_id = 19; + sw->ports[3].config.max_out_hop_id = 19; + sw->ports[3].total_credits = 60; + sw->ports[3].ctl_credits = 2; + sw->ports[3].dual_link_port = &sw->ports[4]; + + sw->ports[4].config.type = TB_TYPE_PORT; + sw->ports[4].config.max_in_hop_id = 19; + sw->ports[4].config.max_out_hop_id = 19; + sw->ports[4].total_credits = 60; + sw->ports[4].ctl_credits = 2; + sw->ports[4].dual_link_port = &sw->ports[3]; + sw->ports[4].link_nr = 1; + + sw->ports[5].config.type = TB_TYPE_PORT; + sw->ports[5].config.max_in_hop_id = 19; + sw->ports[5].config.max_out_hop_id = 19; + sw->ports[5].total_credits = 60; + sw->ports[5].ctl_credits = 2; + sw->ports[5].dual_link_port = &sw->ports[6]; + + sw->ports[6].config.type = TB_TYPE_PORT; + sw->ports[6].config.max_in_hop_id = 19; + sw->ports[6].config.max_out_hop_id = 19; + sw->ports[6].total_credits = 60; + sw->ports[6].ctl_credits = 2; + sw->ports[6].dual_link_port = &sw->ports[5]; + sw->ports[6].link_nr = 1; + + sw->ports[7].config.type = TB_TYPE_PORT; + sw->ports[7].config.max_in_hop_id = 19; + sw->ports[7].config.max_out_hop_id = 19; + sw->ports[7].total_credits = 60; + sw->ports[7].ctl_credits = 2; + sw->ports[7].dual_link_port = &sw->ports[8]; + + sw->ports[8].config.type = TB_TYPE_PORT; + sw->ports[8].config.max_in_hop_id = 19; + sw->ports[8].config.max_out_hop_id = 19; + sw->ports[8].total_credits = 60; + sw->ports[8].ctl_credits = 2; + sw->ports[8].dual_link_port = &sw->ports[7]; + sw->ports[8].link_nr = 1; + + sw->ports[9].config.type = TB_TYPE_PCIE_UP; + sw->ports[9].config.max_in_hop_id = 8; + sw->ports[9].config.max_out_hop_id = 8; + + sw->ports[10].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[10].config.max_in_hop_id = 8; + sw->ports[10].config.max_out_hop_id = 8; + + sw->ports[11].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[11].config.max_in_hop_id = 8; + sw->ports[11].config.max_out_hop_id = 8; + + sw->ports[12].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[12].config.max_in_hop_id = 8; + sw->ports[12].config.max_out_hop_id = 8; + + sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT; + sw->ports[13].config.max_in_hop_id = 9; + sw->ports[13].config.max_out_hop_id = 9; + sw->ports[13].cap_adap = -1; + + sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT; + sw->ports[14].config.max_in_hop_id = 9; + sw->ports[14].config.max_out_hop_id = 9; + sw->ports[14].cap_adap = -1; + + sw->ports[15].disabled = true; + + sw->ports[16].config.type = TB_TYPE_USB3_UP; + sw->ports[16].config.max_in_hop_id = 8; + sw->ports[16].config.max_out_hop_id = 8; + + sw->ports[17].config.type = TB_TYPE_USB3_DOWN; + sw->ports[17].config.max_in_hop_id = 8; + sw->ports[17].config.max_out_hop_id = 8; + + sw->ports[18].config.type = TB_TYPE_USB3_DOWN; + sw->ports[18].config.max_in_hop_id = 8; + sw->ports[18].config.max_out_hop_id = 8; + + sw->ports[19].config.type = TB_TYPE_USB3_DOWN; + sw->ports[19].config.max_in_hop_id = 8; + sw->ports[19].config.max_out_hop_id = 8; + + if (!parent) + return sw; + + /* Link them */ + upstream_port = tb_upstream_port(sw); + port = tb_port_at(route, parent); + port->remote = upstream_port; + upstream_port->remote = port; + if (port->dual_link_port && upstream_port->dual_link_port) { + port->dual_link_port->remote = upstream_port->dual_link_port; + upstream_port->dual_link_port->remote = port->dual_link_port; + + if (bonded) { + /* Bonding is used */ + port->bonded = true; + port->total_credits *= 2; + port->dual_link_port->bonded = true; + port->dual_link_port->total_credits = 0; + upstream_port->bonded = true; + upstream_port->total_credits *= 2; + upstream_port->dual_link_port->bonded = true; + upstream_port->dual_link_port->total_credits = 0; + } + } + + return sw; +} + +static struct tb_switch *alloc_dev_with_dpin(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_switch *sw; + + sw = alloc_dev_default(test, parent, route, bonded); + if (!sw) + return NULL; + + sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[13].config.max_in_hop_id = 9; + sw->ports[13].config.max_out_hop_id = 9; + + sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[14].config.max_in_hop_id = 9; + sw->ports[14].config.max_out_hop_id = 9; + + return sw; +} + +static struct tb_switch *alloc_dev_without_dp(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_switch *sw; + int i; + + sw = alloc_dev_default(test, parent, route, bonded); + if (!sw) + return NULL; + /* + * Device with: + * 2x USB4 Adapters (adapters 1,2 and 3,4), + * 1x PCIe Upstream (adapter 9), + * 1x PCIe Downstream (adapter 10), + * 1x USB3 Upstream (adapter 16), + * 1x USB3 Downstream (adapter 17) + */ + for (i = 5; i <= 8; i++) + sw->ports[i].disabled = true; + + for (i = 11; i <= 14; i++) + sw->ports[i].disabled = true; + + sw->ports[13].cap_adap = 0; + sw->ports[14].cap_adap = 0; + + for (i = 18; i <= 19; i++) + sw->ports[i].disabled = true; + + sw->generation = 4; + sw->credit_allocation = true; + sw->max_usb3_credits = 109; + sw->min_dp_aux_credits = 0; + sw->min_dp_main_credits = 0; + sw->max_pcie_credits = 30; + sw->max_dma_credits = 1; + + return sw; +} + +static struct tb_switch *alloc_dev_usb4(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_switch *sw; + + sw = alloc_dev_default(test, parent, route, bonded); + if (!sw) + return NULL; + + sw->generation = 4; + sw->credit_allocation = true; + sw->max_usb3_credits = 14; + sw->min_dp_aux_credits = 1; + sw->min_dp_main_credits = 18; + sw->max_pcie_credits = 32; + sw->max_dma_credits = 14; + + return sw; +} + +static void tb_test_path_basic(struct kunit *test) +{ + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host; + + host = alloc_host(test); + + src_port = &host->ports[5]; + dst_port = src_port; + + p = tb_next_port_on_path(src_port, dst_port, NULL); + KUNIT_EXPECT_PTR_EQ(test, p, dst_port); + + p = tb_next_port_on_path(src_port, dst_port, p); + KUNIT_EXPECT_TRUE(test, !p); +} + +static void tb_test_path_not_connected_walk(struct kunit *test) +{ + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev; + + host = alloc_host(test); + /* No connection between host and dev */ + dev = alloc_dev_default(test, NULL, 3, true); + + src_port = &host->ports[12]; + dst_port = &dev->ports[16]; + + p = tb_next_port_on_path(src_port, dst_port, NULL); + KUNIT_EXPECT_PTR_EQ(test, p, src_port); + + p = tb_next_port_on_path(src_port, dst_port, p); + KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]); + + p = tb_next_port_on_path(src_port, dst_port, p); + KUNIT_EXPECT_TRUE(test, !p); + + /* Other direction */ + + p = tb_next_port_on_path(dst_port, src_port, NULL); + KUNIT_EXPECT_PTR_EQ(test, p, dst_port); + + p = tb_next_port_on_path(dst_port, src_port, p); + KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]); + + p = tb_next_port_on_path(dst_port, src_port, p); + KUNIT_EXPECT_TRUE(test, !p); +} + +struct port_expectation { + u64 route; + u8 port; + enum tb_port_type type; +}; + +static void tb_test_path_single_hop_walk(struct kunit *test) +{ + /* + * Walks from Host PCIe downstream port to Device #1 PCIe + * upstream port. + * + * [Host] + * 1 | + * 1 | + * [Device] + */ + static const struct port_expectation test_data[] = { + { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP }, + }; + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev; + int i; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 1, true); + + src_port = &host->ports[8]; + dst_port = &dev->ports[9]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_daisy_chain_walk(struct kunit *test) +{ + /* + * Walks from Host DP IN to Device #2 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / + * 1 / + * [Device #2] + */ + static const struct port_expectation test_data[] = { + { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT }, + }; + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev1, *dev2; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + + src_port = &host->ports[5]; + dst_port = &dev2->ports[13]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_simple_tree_walk(struct kunit *test) +{ + /* + * Walks from Host DP IN to Device #3 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + */ + static const struct port_expectation test_data[] = { + { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 5, .type = TB_TYPE_PORT }, + { .route = 0x501, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT }, + }; + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev1, *dev3; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_default(test, dev1, 0x501, true); + alloc_dev_default(test, dev1, 0x701, true); + + src_port = &host->ports[5]; + dst_port = &dev3->ports[13]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_complex_tree_walk(struct kunit *test) +{ + /* + * Walks from Device #3 DP IN to Device #9 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #5] + * 5 | | 1 \ 7 + * 1 | [Device #4] \ 1 + * [Device #3] [Device #6] + * 3 / + * 1 / + * [Device #7] + * 3 / | 5 + * 1 / | + * [Device #8] | 1 + * [Device #9] + */ + static const struct port_expectation test_data[] = { + { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 5, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 7, .type = TB_TYPE_PORT }, + { .route = 0x701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x701, .port = 7, .type = TB_TYPE_PORT }, + { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT }, + { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9; + struct tb_port *src_port, *dst_port, *p; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true); + alloc_dev_default(test, dev1, 0x501, true); + dev5 = alloc_dev_default(test, dev1, 0x701, true); + dev6 = alloc_dev_default(test, dev5, 0x70701, true); + dev7 = alloc_dev_default(test, dev6, 0x3070701, true); + alloc_dev_default(test, dev7, 0x303070701, true); + dev9 = alloc_dev_default(test, dev7, 0x503070701, true); + + src_port = &dev3->ports[13]; + dst_port = &dev9->ports[14]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_max_length_walk(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6; + struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12; + struct tb_port *src_port, *dst_port, *p; + int i; + + /* + * Walks from Device #6 DP IN to Device #12 DP OUT. + * + * [Host] + * 1 / \ 3 + * 1 / \ 1 + * [Device #1] [Device #7] + * 3 | | 3 + * 1 | | 1 + * [Device #2] [Device #8] + * 3 | | 3 + * 1 | | 1 + * [Device #3] [Device #9] + * 3 | | 3 + * 1 | | 1 + * [Device #4] [Device #10] + * 3 | | 3 + * 1 | | 1 + * [Device #5] [Device #11] + * 3 | | 3 + * 1 | | 1 + * [Device #6] [Device #12] + */ + static const struct port_expectation test_data[] = { + { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x0, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT }, + }; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_default(test, dev2, 0x30301, true); + dev4 = alloc_dev_default(test, dev3, 0x3030301, true); + dev5 = alloc_dev_default(test, dev4, 0x303030301, true); + dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true); + dev7 = alloc_dev_default(test, host, 0x3, true); + dev8 = alloc_dev_default(test, dev7, 0x303, true); + dev9 = alloc_dev_default(test, dev8, 0x30303, true); + dev10 = alloc_dev_default(test, dev9, 0x3030303, true); + dev11 = alloc_dev_default(test, dev10, 0x303030303, true); + dev12 = alloc_dev_default(test, dev11, 0x30303030303, true); + + src_port = &dev6->ports[13]; + dst_port = &dev12->ports[13]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_not_connected(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_port *down, *up; + struct tb_path *path; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x3, false); + /* Not connected to anything */ + dev2 = alloc_dev_default(test, NULL, 0x303, false); + + down = &dev1->ports[10]; + up = &dev2->ports[9]; + + path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down"); + KUNIT_ASSERT_NULL(test, path); + path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down"); + KUNIT_ASSERT_NULL(test, path); +} + +struct hop_expectation { + u64 route; + u8 in_port; + enum tb_port_type in_type; + u8 out_port; + enum tb_port_type out_type; +}; + +static void tb_test_path_not_bonded_lane0(struct kunit *test) +{ + /* + * PCIe path from host to device using lane 0. + * + * [Host] + * 3 |: 4 + * 1 |: 2 + * [Device] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 9, + .in_type = TB_TYPE_PCIE_DOWN, + .out_port = 3, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x3, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 9, + .out_type = TB_TYPE_PCIE_UP, + }, + }; + struct tb_switch *host, *dev; + struct tb_port *down, *up; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x3, false); + + down = &host->ports[9]; + up = &dev->ports[9]; + + path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_not_bonded_lane1(struct kunit *test) +{ + /* + * DP Video path from host to device using lane 1. Paths like + * these are only used with Thunderbolt 1 devices where lane + * bonding is not possible. USB4 specifically does not allow + * paths like this (you either use lane 0 where lane 1 is + * disabled or both lanes are bonded). + * + * [Host] + * 1 :| 2 + * 1 :| 2 + * [Device] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 5, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 13, + .out_type = TB_TYPE_DP_HDMI_OUT, + }, + }; + struct tb_switch *host, *dev; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x1, false); + + in = &host->ports[5]; + out = &dev->ports[13]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_not_bonded_lane1_chain(struct kunit *test) +{ + /* + * DP Video path from host to device 3 using lane 1. + * + * [Host] + * 1 :| 2 + * 1 :| 2 + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 5, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 8, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 6, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x50701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 13, + .out_type = TB_TYPE_DP_HDMI_OUT, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, false); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_default(test, dev2, 0x50701, false); + + in = &host->ports[5]; + out = &dev3->ports[13]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test) +{ + /* + * DP Video path from device 3 to host using lane 1. + * + * [Host] + * 1 :| 2 + * 1 :| 2 + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x50701, + .in_port = 13, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 6, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 8, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x0, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 5, + .out_type = TB_TYPE_DP_HDMI_IN, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, false); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false); + + in = &dev3->ports[13]; + out = &host->ports[5]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_mixed_chain(struct kunit *test) +{ + /* + * DP Video path from host to device 4 where first and last link + * is bonded. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + * 3 | + * 1 | + * [Device #4] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 5, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 1, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 8, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 6, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x50701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 3, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x3050701, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 13, + .out_type = TB_TYPE_DP_HDMI_OUT, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_default(test, dev2, 0x50701, false); + dev4 = alloc_dev_default(test, dev3, 0x3050701, true); + + in = &host->ports[5]; + out = &dev4->ports[13]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_mixed_chain_reverse(struct kunit *test) +{ + /* + * DP Video path from device 4 to host where first and last link + * is bonded. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + * 3 | + * 1 | + * [Device #4] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x3050701, + .in_port = 13, + .in_type = TB_TYPE_DP_HDMI_OUT, + .out_port = 1, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x50701, + .in_port = 3, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 6, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 8, + .in_type = TB_TYPE_PORT, + .out_port = 1, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x0, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 5, + .out_type = TB_TYPE_DP_HDMI_IN, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_default(test, dev2, 0x50701, false); + dev4 = alloc_dev_default(test, dev3, 0x3050701, true); + + in = &dev4->ports[13]; + out = &host->ports[5]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_tunnel_pcie(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_tunnel *tunnel1, *tunnel2; + struct tb_port *down, *up; + + /* + * Create PCIe tunnel between host and two devices. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 5 | + * 1 | + * [Device #2] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x501, true); + + down = &host->ports[8]; + up = &dev1->ports[9]; + tunnel1 = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel1); + KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down); + + down = &dev1->ports[10]; + up = &dev2->ports[9]; + tunnel2 = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel2); + KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down); + + tb_tunnel_free(tunnel2); + tb_tunnel_free(tunnel1); +} + +static void tb_test_tunnel_dp(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Create DP tunnel between Host and Device + * + * [Host] + * 1 | + * 1 | + * [Device] + */ + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x3, true); + + in = &host->ports[5]; + out = &dev->ports[13]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dp_chain(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev4; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Create DP tunnel from Host DP IN to Device #4 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + alloc_dev_default(test, dev1, 0x301, true); + alloc_dev_default(test, dev1, 0x501, true); + dev4 = alloc_dev_default(test, dev1, 0x701, true); + + in = &host->ports[5]; + out = &dev4->ports[14]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dp_tree(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev5; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT. + * + * [Host] + * 3 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + * | 5 + * | 1 + * [Device #5] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x3, true); + dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true); + dev3 = alloc_dev_default(test, dev1, 0x503, true); + alloc_dev_default(test, dev1, 0x703, true); + dev5 = alloc_dev_default(test, dev3, 0x50503, true); + + in = &dev2->ports[13]; + out = &dev5->ports[13]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dp_max_length(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6; + struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Creates DP tunnel from Device #6 to Device #12. + * + * [Host] + * 1 / \ 3 + * 1 / \ 1 + * [Device #1] [Device #7] + * 3 | | 3 + * 1 | | 1 + * [Device #2] [Device #8] + * 3 | | 3 + * 1 | | 1 + * [Device #3] [Device #9] + * 3 | | 3 + * 1 | | 1 + * [Device #4] [Device #10] + * 3 | | 3 + * 1 | | 1 + * [Device #5] [Device #11] + * 3 | | 3 + * 1 | | 1 + * [Device #6] [Device #12] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_default(test, dev2, 0x30301, true); + dev4 = alloc_dev_default(test, dev3, 0x3030301, true); + dev5 = alloc_dev_default(test, dev4, 0x303030301, true); + dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true); + dev7 = alloc_dev_default(test, host, 0x3, true); + dev8 = alloc_dev_default(test, dev7, 0x303, true); + dev9 = alloc_dev_default(test, dev8, 0x30303, true); + dev10 = alloc_dev_default(test, dev9, 0x3030303, true); + dev11 = alloc_dev_default(test, dev10, 0x303030303, true); + dev12 = alloc_dev_default(test, dev11, 0x30303030303, true); + + in = &dev6->ports[13]; + out = &dev12->ports[13]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13); + /* First hop */ + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + /* Middle */ + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port, + &host->ports[3]); + /* Last */ + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port, + &host->ports[3]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port, + &host->ports[3]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_3dp(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5; + struct tb_port *in1, *in2, *in3, *out1, *out2, *out3; + struct tb_tunnel *tunnel1, *tunnel2, *tunnel3; + + /* + * Create 3 DP tunnels from Host to Devices #2, #5 and #4. + * + * [Host] + * 3 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + * | 5 + * | 1 + * [Device #5] + */ + host = alloc_host_br(test); + dev1 = alloc_dev_default(test, host, 0x3, true); + dev2 = alloc_dev_default(test, dev1, 0x303, true); + dev3 = alloc_dev_default(test, dev1, 0x503, true); + dev4 = alloc_dev_default(test, dev1, 0x703, true); + dev5 = alloc_dev_default(test, dev3, 0x50503, true); + + in1 = &host->ports[5]; + in2 = &host->ports[6]; + in3 = &host->ports[10]; + + out1 = &dev2->ports[13]; + out2 = &dev5->ports[13]; + out3 = &dev4->ports[14]; + + tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0); + KUNIT_ASSERT_TRUE(test, tunnel1 != NULL); + KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3); + + tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0); + KUNIT_ASSERT_TRUE(test, tunnel2 != NULL); + KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4); + + tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0); + KUNIT_ASSERT_TRUE(test, tunnel3 != NULL); + KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3); + KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3); + KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3); + + tb_tunnel_free(tunnel2); + tb_tunnel_free(tunnel1); +} + +static void tb_test_tunnel_usb3(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_tunnel *tunnel1, *tunnel2; + struct tb_port *down, *up; + + /* + * Create USB3 tunnel between host and two devices. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * \ 7 + * \ 1 + * [Device #2] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, true); + + down = &host->ports[12]; + up = &dev1->ports[16]; + tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel1); + KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down); + + down = &dev1->ports[17]; + up = &dev2->ports[16]; + tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel2); + KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down); + + tb_tunnel_free(tunnel2); + tb_tunnel_free(tunnel1); +} + +static void tb_test_tunnel_port_on_path(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5; + struct tb_port *in, *out, *port; + struct tb_tunnel *dp_tunnel; + + /* + * [Host] + * 3 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + * | 5 + * | 1 + * [Device #5] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x3, true); + dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true); + dev3 = alloc_dev_default(test, dev1, 0x503, true); + dev4 = alloc_dev_default(test, dev1, 0x703, true); + dev5 = alloc_dev_default(test, dev3, 0x50503, true); + + in = &dev2->ports[13]; + out = &dev5->ports[13]; + + dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, dp_tunnel); + + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in)); + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out)); + + port = &host->ports[8]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &host->ports[3]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[1]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[3]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[5]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[7]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev3->ports[1]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev5->ports[1]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev4->ports[1]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + tb_tunnel_free(dp_tunnel); +} + +static void tb_test_tunnel_dma(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + /* + * Create DMA tunnel from NHI to port 1 and back. + * + * [Host 1] + * 1 ^ In HopID 1 -> Out HopID 8 + * | + * v In HopID 8 -> Out HopID 1 + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 2); + /* RX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1); + /* TX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_rx(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + /* + * Create DMA RX tunnel from port 1 to NHI. + * + * [Host 1] + * 1 ^ + * | + * | In HopID 15 -> Out HopID 2 + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 1); + /* RX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_tx(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + /* + * Create DMA TX tunnel from NHI to port 1. + * + * [Host 1] + * 1 | In HopID 2 -> Out HopID 15 + * | + * v + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 1); + /* TX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_chain(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + + /* + * Create DMA tunnel from NHI to Device #2 port 3 and back. + * + * [Host 1] + * 1 ^ In HopID 1 -> Out HopID x + * | + * 1 | In HopID x -> Out HopID 1 + * [Device #1] + * 7 \ + * 1 \ + * [Device #2] + * 3 | In HopID x -> Out HopID 8 + * | + * v In HopID 8 -> Out HopID x + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, true); + + nhi = &host->ports[7]; + port = &dev2->ports[3]; + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 2); + /* RX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, + &dev2->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port, + &dev1->ports[7]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, + &dev1->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1); + /* TX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port, + &dev1->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, + &dev1->ports[7]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port, + &dev2->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_match(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1)); + + tb_tunnel_free(tunnel); + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); + + tb_tunnel_free(tunnel); + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x1, false); + + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_legacy_bonded(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x1, true); + + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_pcie(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_without_dp(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_without_dp(test, host, 0x1, true); + + /* + * The device has no DP therefore baMinDPmain = baMinDPaux = 0 + * + * Create PCIe path with buffers less than baMaxPCIe. + * + * For a device with buffers configurations: + * baMaxUSB3 = 109 + * baMinDPaux = 0 + * baMinDPmain = 0 + * baMaxPCIe = 30 + * baMaxHI = 1 + * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118 + * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3) + * = Max(6, Min(30, 9) = 9 + */ + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_TRUE(test, tunnel != NULL); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + /* PCIe downstream path */ + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U); + + /* PCIe upstream path */ + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_dp(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + in = &host->ports[5]; + out = &dev->ports[14]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3); + + /* Video (main) path */ + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U); + + /* AUX TX */ + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + /* AUX RX */ + path = tunnel->paths[2]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_usb3(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + down = &host->ports[12]; + up = &dev->ports[16]; + tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_dma(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + nhi = &host->ports[7]; + port = &dev->ports[3]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + /* DMA RX */ + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + /* DMA TX */ + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_dma_multiple(struct kunit *test) +{ + struct tb_tunnel *tunnel1, *tunnel2, *tunnel3; + struct tb_switch *host, *dev; + struct tb_port *nhi, *port; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + nhi = &host->ports[7]; + port = &dev->ports[3]; + + /* + * Create three DMA tunnels through the same ports. With the + * default buffers we should be able to create two and the last + * one fails. + * + * For default host we have following buffers for DMA: + * + * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20 + * + * For device we have following: + * + * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34 + * + * spare = 14 + 1 = 15 + * + * So on host the first tunnel gets 14 and the second gets the + * remaining 1 and then we run out of buffers. + */ + tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel1); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2); + + path = tunnel1->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = tunnel1->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2); + KUNIT_ASSERT_NOT_NULL(test, tunnel2); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2); + + path = tunnel2->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = tunnel2->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3); + KUNIT_ASSERT_NULL(test, tunnel3); + + /* + * Release the first DMA tunnel. That should make 14 buffers + * available for the next tunnel. + */ + tb_tunnel_free(tunnel1); + + tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3); + KUNIT_ASSERT_NOT_NULL(test, tunnel3); + + path = tunnel3->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = tunnel3->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + tb_tunnel_free(tunnel3); + tb_tunnel_free(tunnel2); +} + +static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *up, *down; + struct tb_tunnel *pcie_tunnel; + struct tb_path *path; + + down = &host->ports[8]; + up = &dev->ports[9]; + pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel); + KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2); + + path = pcie_tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + path = pcie_tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); + + return pcie_tunnel; +} + +static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *in, *out; + struct tb_tunnel *dp_tunnel1; + struct tb_path *path; + + in = &host->ports[5]; + out = &dev->ports[13]; + dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1); + KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3); + + path = dp_tunnel1->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U); + + path = dp_tunnel1->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = dp_tunnel1->paths[2]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + return dp_tunnel1; +} + +static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *in, *out; + struct tb_tunnel *dp_tunnel2; + struct tb_path *path; + + in = &host->ports[6]; + out = &dev->ports[14]; + dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2); + KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3); + + path = dp_tunnel2->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U); + + path = dp_tunnel2->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = dp_tunnel2->paths[2]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + return dp_tunnel2; +} + +static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *up, *down; + struct tb_tunnel *usb3_tunnel; + struct tb_path *path; + + down = &host->ports[12]; + up = &dev->ports[16]; + usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel); + KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2); + + path = usb3_tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = usb3_tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + return usb3_tunnel; +} + +static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *dma_tunnel1; + struct tb_path *path; + + nhi = &host->ports[7]; + port = &dev->ports[3]; + dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1); + KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2); + + path = dma_tunnel1->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = dma_tunnel1->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + return dma_tunnel1; +} + +static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *dma_tunnel2; + struct tb_path *path; + + nhi = &host->ports[7]; + port = &dev->ports[3]; + dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2); + KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2); + KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2); + + path = dma_tunnel2->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = dma_tunnel2->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + return dma_tunnel2; +} + +static void tb_test_credit_alloc_all(struct kunit *test) +{ + struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel; + struct tb_tunnel *dma_tunnel1, *dma_tunnel2; + struct tb_switch *host, *dev; + + /* + * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to + * device. Expectation is that all these can be established with + * the default credit allocation found in Intel hardware. + */ + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev); + dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev); + dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev); + usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev); + dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev); + dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev); + + tb_tunnel_free(dma_tunnel2); + tb_tunnel_free(dma_tunnel1); + tb_tunnel_free(usb3_tunnel); + tb_tunnel_free(dp_tunnel2); + tb_tunnel_free(dp_tunnel1); + tb_tunnel_free(pcie_tunnel); +} + +static const u32 root_directory[] = { + 0x55584401, /* "UXD" v1 */ + 0x00000018, /* Root directory length */ + 0x76656e64, /* "vend" */ + 0x6f726964, /* "orid" */ + 0x76000001, /* "v" R 1 */ + 0x00000a27, /* Immediate value, ! Vendor ID */ + 0x76656e64, /* "vend" */ + 0x6f726964, /* "orid" */ + 0x74000003, /* "t" R 3 */ + 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */ + 0x64657669, /* "devi" */ + 0x63656964, /* "ceid" */ + 0x76000001, /* "v" R 1 */ + 0x0000000a, /* Immediate value, ! Device ID */ + 0x64657669, /* "devi" */ + 0x63656964, /* "ceid" */ + 0x74000003, /* "t" R 3 */ + 0x0000001d, /* Text leaf offset, (“Macintosh”) */ + 0x64657669, /* "devi" */ + 0x63657276, /* "cerv" */ + 0x76000001, /* "v" R 1 */ + 0x80000100, /* Immediate value, Device Revision */ + 0x6e657477, /* "netw" */ + 0x6f726b00, /* "ork" */ + 0x44000014, /* "D" R 20 */ + 0x00000021, /* Directory data offset, (Network Directory) */ + 0x4170706c, /* "Appl" */ + 0x6520496e, /* "e In" */ + 0x632e0000, /* "c." ! */ + 0x4d616369, /* "Maci" */ + 0x6e746f73, /* "ntos" */ + 0x68000000, /* "h" */ + 0x00000000, /* padding */ + 0xca8961c6, /* Directory UUID, Network Directory */ + 0x9541ce1c, /* Directory UUID, Network Directory */ + 0x5949b8bd, /* Directory UUID, Network Directory */ + 0x4f5a5f2e, /* Directory UUID, Network Directory */ + 0x70727463, /* "prtc" */ + 0x69640000, /* "id" */ + 0x76000001, /* "v" R 1 */ + 0x00000001, /* Immediate value, Network Protocol ID */ + 0x70727463, /* "prtc" */ + 0x76657273, /* "vers" */ + 0x76000001, /* "v" R 1 */ + 0x00000001, /* Immediate value, Network Protocol Version */ + 0x70727463, /* "prtc" */ + 0x72657673, /* "revs" */ + 0x76000001, /* "v" R 1 */ + 0x00000001, /* Immediate value, Network Protocol Revision */ + 0x70727463, /* "prtc" */ + 0x73746e73, /* "stns" */ + 0x76000001, /* "v" R 1 */ + 0x00000000, /* Immediate value, Network Protocol Settings */ +}; + +static const uuid_t network_dir_uuid = + UUID_INIT(0xc66189ca, 0x1cce, 0x4195, + 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f); + +static void tb_test_property_parse(struct kunit *test) +{ + struct tb_property_dir *dir, *network_dir; + struct tb_property *p; + + dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory)); + KUNIT_ASSERT_NOT_NULL(test, dir); + + p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT); + KUNIT_ASSERT_NULL(test, p); + + p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc."); + + p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27); + + p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh"); + + p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa); + + p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY); + KUNIT_ASSERT_NULL(test, p); + + p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY); + KUNIT_ASSERT_NOT_NULL(test, p); + + network_dir = p->value.dir; + KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid)); + + p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1); + + p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1); + + p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1); + + p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0); + + p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE); + KUNIT_EXPECT_TRUE(test, !p); + p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT); + KUNIT_EXPECT_TRUE(test, !p); + + tb_property_free_dir(dir); +} + +static void tb_test_property_format(struct kunit *test) +{ + struct tb_property_dir *dir; + ssize_t block_len; + u32 *block; + int ret, i; + + dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory)); + KUNIT_ASSERT_NOT_NULL(test, dir); + + ret = tb_property_format_dir(dir, NULL, 0); + KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory)); + + block_len = ret; + + block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, block); + + ret = tb_property_format_dir(dir, block, block_len); + KUNIT_EXPECT_EQ(test, ret, 0); + + for (i = 0; i < ARRAY_SIZE(root_directory); i++) + KUNIT_EXPECT_EQ(test, root_directory[i], block[i]); + + tb_property_free_dir(dir); +} + +static void compare_dirs(struct kunit *test, struct tb_property_dir *d1, + struct tb_property_dir *d2) +{ + struct tb_property *p1, *p2, *tmp; + int n1, n2, i; + + if (d1->uuid) { + KUNIT_ASSERT_NOT_NULL(test, d2->uuid); + KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid)); + } else { + KUNIT_ASSERT_NULL(test, d2->uuid); + } + + n1 = 0; + tb_property_for_each(d1, tmp) + n1++; + KUNIT_ASSERT_NE(test, n1, 0); + + n2 = 0; + tb_property_for_each(d2, tmp) + n2++; + KUNIT_ASSERT_NE(test, n2, 0); + + KUNIT_ASSERT_EQ(test, n1, n2); + + p1 = NULL; + p2 = NULL; + for (i = 0; i < n1; i++) { + p1 = tb_property_get_next(d1, p1); + KUNIT_ASSERT_NOT_NULL(test, p1); + p2 = tb_property_get_next(d2, p2); + KUNIT_ASSERT_NOT_NULL(test, p2); + + KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]); + KUNIT_ASSERT_EQ(test, p1->type, p2->type); + KUNIT_ASSERT_EQ(test, p1->length, p2->length); + + switch (p1->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + KUNIT_ASSERT_NOT_NULL(test, p1->value.dir); + KUNIT_ASSERT_NOT_NULL(test, p2->value.dir); + compare_dirs(test, p1->value.dir, p2->value.dir); + break; + + case TB_PROPERTY_TYPE_DATA: + KUNIT_ASSERT_NOT_NULL(test, p1->value.data); + KUNIT_ASSERT_NOT_NULL(test, p2->value.data); + KUNIT_ASSERT_TRUE(test, + !memcmp(p1->value.data, p2->value.data, + p1->length * 4) + ); + break; + + case TB_PROPERTY_TYPE_TEXT: + KUNIT_ASSERT_NOT_NULL(test, p1->value.text); + KUNIT_ASSERT_NOT_NULL(test, p2->value.text); + KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text); + break; + + case TB_PROPERTY_TYPE_VALUE: + KUNIT_ASSERT_EQ(test, p1->value.immediate, + p2->value.immediate); + break; + default: + KUNIT_FAIL(test, "unexpected property type"); + break; + } + } +} + +static void tb_test_property_copy(struct kunit *test) +{ + struct tb_property_dir *src, *dst; + u32 *block; + int ret, i; + + src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory)); + KUNIT_ASSERT_NOT_NULL(test, src); + + dst = tb_property_copy_dir(src); + KUNIT_ASSERT_NOT_NULL(test, dst); + + /* Compare the structures */ + compare_dirs(test, src, dst); + + /* Compare the resulting property block */ + ret = tb_property_format_dir(dst, NULL, 0); + KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory)); + + block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, block); + + ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory)); + KUNIT_EXPECT_TRUE(test, !ret); + + for (i = 0; i < ARRAY_SIZE(root_directory); i++) + KUNIT_EXPECT_EQ(test, root_directory[i], block[i]); + + tb_property_free_dir(dst); + tb_property_free_dir(src); +} + +static struct kunit_case tb_test_cases[] = { + KUNIT_CASE(tb_test_path_basic), + KUNIT_CASE(tb_test_path_not_connected_walk), + KUNIT_CASE(tb_test_path_single_hop_walk), + KUNIT_CASE(tb_test_path_daisy_chain_walk), + KUNIT_CASE(tb_test_path_simple_tree_walk), + KUNIT_CASE(tb_test_path_complex_tree_walk), + KUNIT_CASE(tb_test_path_max_length_walk), + KUNIT_CASE(tb_test_path_not_connected), + KUNIT_CASE(tb_test_path_not_bonded_lane0), + KUNIT_CASE(tb_test_path_not_bonded_lane1), + KUNIT_CASE(tb_test_path_not_bonded_lane1_chain), + KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse), + KUNIT_CASE(tb_test_path_mixed_chain), + KUNIT_CASE(tb_test_path_mixed_chain_reverse), + KUNIT_CASE(tb_test_tunnel_pcie), + KUNIT_CASE(tb_test_tunnel_dp), + KUNIT_CASE(tb_test_tunnel_dp_chain), + KUNIT_CASE(tb_test_tunnel_dp_tree), + KUNIT_CASE(tb_test_tunnel_dp_max_length), + KUNIT_CASE(tb_test_tunnel_3dp), + KUNIT_CASE(tb_test_tunnel_port_on_path), + KUNIT_CASE(tb_test_tunnel_usb3), + KUNIT_CASE(tb_test_tunnel_dma), + KUNIT_CASE(tb_test_tunnel_dma_rx), + KUNIT_CASE(tb_test_tunnel_dma_tx), + KUNIT_CASE(tb_test_tunnel_dma_chain), + KUNIT_CASE(tb_test_tunnel_dma_match), + KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded), + KUNIT_CASE(tb_test_credit_alloc_legacy_bonded), + KUNIT_CASE(tb_test_credit_alloc_pcie), + KUNIT_CASE(tb_test_credit_alloc_without_dp), + KUNIT_CASE(tb_test_credit_alloc_dp), + KUNIT_CASE(tb_test_credit_alloc_usb3), + KUNIT_CASE(tb_test_credit_alloc_dma), + KUNIT_CASE(tb_test_credit_alloc_dma_multiple), + KUNIT_CASE(tb_test_credit_alloc_all), + KUNIT_CASE(tb_test_property_parse), + KUNIT_CASE(tb_test_property_format), + KUNIT_CASE(tb_test_property_copy), + { } +}; + +static struct kunit_suite tb_test_suite = { + .name = "thunderbolt", + .test_cases = tb_test_cases, +}; + +kunit_test_suite(tb_test_suite); diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c new file mode 100644 index 0000000000..11f2aec2a5 --- /dev/null +++ b/drivers/thunderbolt/tmu.c @@ -0,0 +1,1062 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt Time Management Unit (TMU) support + * + * Copyright (C) 2019, Intel Corporation + * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> + * Rajmohan Mani <rajmohan.mani@intel.com> + */ + +#include <linux/delay.h> + +#include "tb.h" + +static const unsigned int tmu_rates[] = { + [TB_SWITCH_TMU_MODE_OFF] = 0, + [TB_SWITCH_TMU_MODE_LOWRES] = 1000, + [TB_SWITCH_TMU_MODE_HIFI_UNI] = 16, + [TB_SWITCH_TMU_MODE_HIFI_BI] = 16, + [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16, +}; + +static const struct { + unsigned int freq_meas_window; + unsigned int avg_const; + unsigned int delta_avg_const; + unsigned int repl_timeout; + unsigned int repl_threshold; + unsigned int repl_n; + unsigned int dirswitch_n; +} tmu_params[] = { + [TB_SWITCH_TMU_MODE_OFF] = { }, + [TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, }, + [TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, }, + [TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, }, + [TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = { + 800, 4, 0, 3125, 25, 128, 255, + }, +}; + +static const char *tmu_mode_name(enum tb_switch_tmu_mode mode) +{ + switch (mode) { + case TB_SWITCH_TMU_MODE_OFF: + return "off"; + case TB_SWITCH_TMU_MODE_LOWRES: + return "uni-directional, LowRes"; + case TB_SWITCH_TMU_MODE_HIFI_UNI: + return "uni-directional, HiFi"; + case TB_SWITCH_TMU_MODE_HIFI_BI: + return "bi-directional, HiFi"; + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: + return "enhanced uni-directional, MedRes"; + default: + return "unknown"; + } +} + +static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw) +{ + return usb4_switch_version(sw) > 1; +} + +static int tb_switch_set_tmu_mode_params(struct tb_switch *sw, + enum tb_switch_tmu_mode mode) +{ + u32 freq, avg, val; + int ret; + + freq = tmu_params[mode].freq_meas_window; + avg = tmu_params[mode].avg_const; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK; + val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq); + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_15, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK & + ~TMU_RTR_CS_15_DELAY_AVG_MASK & + ~TMU_RTR_CS_15_OFFSET_AVG_MASK & + ~TMU_RTR_CS_15_ERROR_AVG_MASK; + val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) | + FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) | + FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) | + FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg); + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_15, 1); + if (ret) + return ret; + + if (tb_switch_tmu_enhanced_is_supported(sw)) { + u32 delta_avg = tmu_params[mode].delta_avg_const; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_18, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK; + val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg); + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_18, 1); + } + + return ret; +} + +static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return false; + + return !!(val & TMU_RTR_CS_0_UCAP); +} + +static int tb_switch_tmu_rate_read(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); + if (ret) + return ret; + + val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT; + return val; +} + +static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK; + val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); +} + +static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask, + u32 value) +{ + u32 data; + int ret; + + ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1); + if (ret) + return ret; + + data &= ~mask; + data |= value; + + return tb_port_write(port, &data, TB_CFG_PORT, + port->cap_tmu + offset, 1); +} + +static int tb_port_tmu_set_unidirectional(struct tb_port *port, + bool unidirectional) +{ + u32 val; + + if (!port->sw->tmu.has_ucap) + return 0; + + val = unidirectional ? TMU_ADP_CS_3_UDM : 0; + return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val); +} + +static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port) +{ + return tb_port_tmu_set_unidirectional(port, false); +} + +static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port) +{ + return tb_port_tmu_set_unidirectional(port, true); +} + +static bool tb_port_tmu_is_unidirectional(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_3, 1); + if (ret) + return false; + + return val & TMU_ADP_CS_3_UDM; +} + +static bool tb_port_tmu_is_enhanced(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_8, 1); + if (ret) + return false; + + return val & TMU_ADP_CS_8_EUDM; +} + +/* Can be called to non-v2 lane adapters too */ +static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable) +{ + int ret; + u32 val; + + if (!tb_switch_tmu_enhanced_is_supported(port->sw)) + return 0; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_8, 1); + if (ret) + return ret; + + if (enable) + val |= TMU_ADP_CS_8_EUDM; + else + val &= ~TMU_ADP_CS_8_EUDM; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_8, 1); +} + +static int tb_port_set_tmu_mode_params(struct tb_port *port, + enum tb_switch_tmu_mode mode) +{ + u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val; + int ret; + + repl_timeout = tmu_params[mode].repl_timeout; + repl_threshold = tmu_params[mode].repl_threshold; + repl_n = tmu_params[mode].repl_n; + dirswitch_n = tmu_params[mode].dirswitch_n; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_8, 1); + if (ret) + return ret; + + val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK; + val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK; + val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout); + val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold); + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_8, 1); + if (ret) + return ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_9, 1); + if (ret) + return ret; + + val &= ~TMU_ADP_CS_9_REPL_N_MASK; + val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK; + val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n); + val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n); + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_9, 1); +} + +/* Can be called to non-v2 lane adapters too */ +static int tb_port_tmu_rate_write(struct tb_port *port, int rate) +{ + int ret; + u32 val; + + if (!tb_switch_tmu_enhanced_is_supported(port->sw)) + return 0; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_9, 1); + if (ret) + return ret; + + val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK; + val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate); + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_9, 1); +} + +static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync) +{ + u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0; + + return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val); +} + +static int tb_port_tmu_time_sync_disable(struct tb_port *port) +{ + return tb_port_tmu_time_sync(port, true); +} + +static int tb_port_tmu_time_sync_enable(struct tb_port *port) +{ + return tb_port_tmu_time_sync(port, false); +} + +static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set) +{ + u32 val, offset, bit; + int ret; + + if (tb_switch_is_usb4(sw)) { + offset = sw->tmu.cap + TMU_RTR_CS_0; + bit = TMU_RTR_CS_0_TD; + } else { + offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26; + bit = TB_TIME_VSEC_3_CS_26_TD; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (set) + val |= bit; + else + val &= ~bit; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); +} + +static int tmu_mode_init(struct tb_switch *sw) +{ + bool enhanced, ucap; + int ret, rate; + + ucap = tb_switch_tmu_ucap_is_supported(sw); + if (ucap) + tb_sw_dbg(sw, "TMU: supports uni-directional mode\n"); + enhanced = tb_switch_tmu_enhanced_is_supported(sw); + if (enhanced) + tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n"); + + ret = tb_switch_tmu_rate_read(sw); + if (ret < 0) + return ret; + rate = ret; + + /* Off by default */ + sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF; + + if (tb_route(sw)) { + struct tb_port *up = tb_upstream_port(sw); + + if (enhanced && tb_port_tmu_is_enhanced(up)) { + sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI; + } else if (ucap && tb_port_tmu_is_unidirectional(up)) { + if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate) + sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES; + else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate) + sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI; + } else if (rate) { + sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI; + } + } else if (rate) { + sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI; + } + + /* Update the initial request to match the current mode */ + sw->tmu.mode_request = sw->tmu.mode; + sw->tmu.has_ucap = ucap; + + return 0; +} + +/** + * tb_switch_tmu_init() - Initialize switch TMU structures + * @sw: Switch to initialized + * + * This function must be called before other TMU related functions to + * makes the internal structures are filled in correctly. Does not + * change any hardware configuration. + */ +int tb_switch_tmu_init(struct tb_switch *sw) +{ + struct tb_port *port; + int ret; + + if (tb_switch_is_icm(sw)) + return 0; + + ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU); + if (ret > 0) + sw->tmu.cap = ret; + + tb_switch_for_each_port(sw, port) { + int cap; + + cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1); + if (cap > 0) + port->cap_tmu = cap; + } + + ret = tmu_mode_init(sw); + if (ret) + return ret; + + tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode)); + return 0; +} + +/** + * tb_switch_tmu_post_time() - Update switch local time + * @sw: Switch whose time to update + * + * Updates switch local time using time posting procedure. + */ +int tb_switch_tmu_post_time(struct tb_switch *sw) +{ + unsigned int post_time_high_offset, post_time_high = 0; + unsigned int post_local_time_offset, post_time_offset; + struct tb_switch *root_switch = sw->tb->root_switch; + u64 hi, mid, lo, local_time, post_time; + int i, ret, retries = 100; + u32 gm_local_time[3]; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_usb4(sw)) + return 0; + + /* Need to be able to read the grand master time */ + if (!root_switch->tmu.cap) + return 0; + + ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH, + root_switch->tmu.cap + TMU_RTR_CS_1, + ARRAY_SIZE(gm_local_time)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(gm_local_time); i++) + tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i, + gm_local_time[i]); + + /* Convert to nanoseconds (drop fractional part) */ + hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK; + mid = gm_local_time[1]; + lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >> + TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT; + local_time = hi << 48 | mid << 16 | lo; + + /* Tell the switch that time sync is disrupted for a while */ + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22; + post_time_offset = sw->tmu.cap + TMU_RTR_CS_24; + post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25; + + /* + * Write the Grandmaster time to the Post Local Time registers + * of the new switch. + */ + ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH, + post_local_time_offset, 2); + if (ret) + goto out; + + /* + * Have the new switch update its local time by: + * 1) writing 0x1 to the Post Time Low register and 0xffffffff to + * Post Time High register. + * 2) write 0 to Post Time High register and then wait for + * the completion of the post_time register becomes 0. + * This means the time has been converged properly. + */ + post_time = 0xffffffff00000001ULL; + + ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2); + if (ret) + goto out; + + ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH, + post_time_high_offset, 1); + if (ret) + goto out; + + do { + usleep_range(5, 10); + ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH, + post_time_offset, 2); + if (ret) + goto out; + } while (--retries && post_time); + + if (!retries) { + ret = -ETIMEDOUT; + goto out; + } + + tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time); + +out: + tb_switch_tmu_set_time_disruption(sw, false); + return ret; +} + +static int disable_enhanced(struct tb_port *up, struct tb_port *down) +{ + int ret; + + /* + * Router may already been disconnected so ignore errors on the + * upstream port. + */ + tb_port_tmu_rate_write(up, 0); + tb_port_tmu_enhanced_enable(up, false); + + ret = tb_port_tmu_rate_write(down, 0); + if (ret) + return ret; + return tb_port_tmu_enhanced_enable(down, false); +} + +/** + * tb_switch_tmu_disable() - Disable TMU of a switch + * @sw: Switch whose TMU to disable + * + * Turns off TMU of @sw if it is enabled. If not enabled does nothing. + */ +int tb_switch_tmu_disable(struct tb_switch *sw) +{ + /* Already disabled? */ + if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) + return 0; + + if (tb_route(sw)) { + struct tb_port *down, *up; + int ret; + + down = tb_switch_downstream_port(sw); + up = tb_upstream_port(sw); + /* + * In case of uni-directional time sync, TMU handshake is + * initiated by upstream router. In case of bi-directional + * time sync, TMU handshake is initiated by downstream router. + * We change downstream router's rate to off for both uni/bidir + * cases although it is needed only for the bi-directional mode. + * We avoid changing upstream router's mode since it might + * have another downstream router plugged, that is set to + * uni-directional mode and we don't want to change it's TMU + * mode. + */ + ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]); + if (ret) + return ret; + + tb_port_tmu_time_sync_disable(up); + ret = tb_port_tmu_time_sync_disable(down); + if (ret) + return ret; + + switch (sw->tmu.mode) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + /* The switch may be unplugged so ignore any errors */ + tb_port_tmu_unidirectional_disable(up); + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + break; + + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: + ret = disable_enhanced(up, down); + if (ret) + return ret; + break; + + default: + break; + } + } else { + tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]); + } + + sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF; + + tb_sw_dbg(sw, "TMU: disabled\n"); + return 0; +} + +/* Called only when there is failure enabling requested mode */ +static void tb_switch_tmu_off(struct tb_switch *sw) +{ + unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF]; + struct tb_port *down, *up; + + down = tb_switch_downstream_port(sw); + up = tb_upstream_port(sw); + /* + * In case of any failure in one of the steps when setting + * bi-directional or uni-directional TMU mode, get back to the TMU + * configurations in off mode. In case of additional failures in + * the functions below, ignore them since the caller shall already + * report a failure. + */ + tb_port_tmu_time_sync_disable(down); + tb_port_tmu_time_sync_disable(up); + + switch (sw->tmu.mode_request) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + tb_switch_tmu_rate_write(tb_switch_parent(sw), rate); + break; + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: + disable_enhanced(up, down); + break; + default: + break; + } + + /* Always set the rate to 0 */ + tb_switch_tmu_rate_write(sw, rate); + + tb_switch_set_tmu_mode_params(sw, sw->tmu.mode); + tb_port_tmu_unidirectional_disable(down); + tb_port_tmu_unidirectional_disable(up); +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_MODE_OFF. + */ +static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + ret = tb_port_tmu_unidirectional_disable(up); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + goto out; + + ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + return 0; + +out: + tb_switch_tmu_off(sw); + return ret; +} + +/* Only needed for Titan Ridge */ +static int tb_switch_tmu_disable_objections(struct tb_switch *sw) +{ + struct tb_port *up = tb_upstream_port(sw); + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); + if (ret) + return ret; + + val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); + if (ret) + return ret; + + return tb_port_tmu_write(up, TMU_ADP_CS_6, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 | + TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2); +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_MODE_OFF. + */ +static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), + tmu_rates[sw->tmu.mode_request]); + if (ret) + return ret; + + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_unidirectional_enable(down); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + return 0; + +out: + tb_switch_tmu_off(sw); + return ret; +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_RATE_OFF. + */ +static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw) +{ + unsigned int rate = tmu_rates[sw->tmu.mode_request]; + struct tb_port *up, *down; + int ret; + + /* Router specific parameters first */ + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); + if (ret) + return ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request); + if (ret) + goto out; + + ret = tb_port_tmu_rate_write(up, rate); + if (ret) + goto out; + + ret = tb_port_tmu_enhanced_enable(up, true); + if (ret) + goto out; + + ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request); + if (ret) + goto out; + + ret = tb_port_tmu_rate_write(down, rate); + if (ret) + goto out; + + ret = tb_port_tmu_enhanced_enable(down, true); + if (ret) + goto out; + + return 0; + +out: + tb_switch_tmu_off(sw); + return ret; +} + +static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw) +{ + unsigned int rate = tmu_rates[sw->tmu.mode]; + struct tb_port *down, *up; + + down = tb_switch_downstream_port(sw); + up = tb_upstream_port(sw); + /* + * In case of any failure in one of the steps when change mode, + * get back to the TMU configurations in previous mode. + * In case of additional failures in the functions below, + * ignore them since the caller shall already report a failure. + */ + switch (sw->tmu.mode) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + tb_port_tmu_set_unidirectional(down, true); + tb_switch_tmu_rate_write(tb_switch_parent(sw), rate); + break; + + case TB_SWITCH_TMU_MODE_HIFI_BI: + tb_port_tmu_set_unidirectional(down, false); + tb_switch_tmu_rate_write(sw, rate); + break; + + default: + break; + } + + tb_switch_set_tmu_mode_params(sw, sw->tmu.mode); + + switch (sw->tmu.mode) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + tb_port_tmu_set_unidirectional(up, true); + break; + + case TB_SWITCH_TMU_MODE_HIFI_BI: + tb_port_tmu_set_unidirectional(up, false); + break; + + default: + break; + } +} + +static int tb_switch_tmu_change_mode(struct tb_switch *sw) +{ + unsigned int rate = tmu_rates[sw->tmu.mode_request]; + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_switch_downstream_port(sw); + + /* Program the upstream router downstream facing lane adapter */ + switch (sw->tmu.mode_request) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + ret = tb_port_tmu_set_unidirectional(down, true); + if (ret) + goto out; + ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate); + if (ret) + goto out; + break; + + case TB_SWITCH_TMU_MODE_HIFI_BI: + ret = tb_port_tmu_set_unidirectional(down, false); + if (ret) + goto out; + ret = tb_switch_tmu_rate_write(sw, rate); + if (ret) + goto out; + break; + + default: + /* Not allowed to change modes from other than above */ + return -EINVAL; + } + + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request); + if (ret) + return ret; + + /* Program the new mode and the downstream router lane adapter */ + switch (sw->tmu.mode_request) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + ret = tb_port_tmu_set_unidirectional(up, true); + if (ret) + goto out; + break; + + case TB_SWITCH_TMU_MODE_HIFI_BI: + ret = tb_port_tmu_set_unidirectional(up, false); + if (ret) + goto out; + break; + + default: + /* Not allowed to change modes from other than above */ + return -EINVAL; + } + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + return 0; + +out: + tb_switch_tmu_change_mode_prev(sw); + return ret; +} + +/** + * tb_switch_tmu_enable() - Enable TMU on a router + * @sw: Router whose TMU to enable + * + * Enables TMU of a router to be in uni-directional Normal/HiFi or + * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is + * required before calling this function. + */ +int tb_switch_tmu_enable(struct tb_switch *sw) +{ + int ret; + + if (tb_switch_tmu_is_enabled(sw)) + return 0; + + if (tb_switch_is_titan_ridge(sw) && + (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES || + sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) { + ret = tb_switch_tmu_disable_objections(sw); + if (ret) + return ret; + } + + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + if (tb_route(sw)) { + /* + * The used mode changes are from OFF to + * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to + * HiFi-Uni. + */ + if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) { + switch (sw->tmu.mode_request) { + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + ret = tb_switch_tmu_enable_unidirectional(sw); + break; + + case TB_SWITCH_TMU_MODE_HIFI_BI: + ret = tb_switch_tmu_enable_bidirectional(sw); + break; + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: + ret = tb_switch_tmu_enable_enhanced(sw); + break; + default: + ret = -EINVAL; + break; + } + } else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES || + sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI || + sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) { + ret = tb_switch_tmu_change_mode(sw); + } else { + ret = -EINVAL; + } + } else { + /* + * Host router port configurations are written as + * part of configurations for downstream port of the parent + * of the child node - see above. + * Here only the host router' rate configuration is written. + */ + ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]); + } + + if (ret) { + tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n", + tmu_mode_name(sw->tmu.mode_request), ret); + } else { + sw->tmu.mode = sw->tmu.mode_request; + tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode)); + } + + return tb_switch_tmu_set_time_disruption(sw, false); +} + +/** + * tb_switch_tmu_configure() - Configure the TMU mode + * @sw: Router whose mode to change + * @mode: Mode to configure + * + * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is + * next called. + * + * Returns %0 in success and negative errno otherwise. Specifically + * returns %-EOPNOTSUPP if the requested mode is not possible (not + * supported by the router and/or topology). + */ +int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode) +{ + switch (mode) { + case TB_SWITCH_TMU_MODE_OFF: + break; + + case TB_SWITCH_TMU_MODE_LOWRES: + case TB_SWITCH_TMU_MODE_HIFI_UNI: + if (!sw->tmu.has_ucap) + return -EOPNOTSUPP; + break; + + case TB_SWITCH_TMU_MODE_HIFI_BI: + break; + + case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: { + const struct tb_switch *parent_sw = tb_switch_parent(sw); + + if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw)) + return -EOPNOTSUPP; + if (!tb_switch_tmu_enhanced_is_supported(sw)) + return -EOPNOTSUPP; + + break; + } + + default: + tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode); + return -EINVAL; + } + + if (sw->tmu.mode_request != mode) { + tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n", + tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode)); + sw->tmu.mode_request = mode; + } + + return 0; +} diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c new file mode 100644 index 0000000000..a6810fb368 --- /dev/null +++ b/drivers/thunderbolt/tunnel.c @@ -0,0 +1,2389 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - Tunneling support + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2019, Intel Corporation + */ + +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/ktime.h> +#include <linux/string_helpers.h> + +#include "tunnel.h" +#include "tb.h" + +/* PCIe adapters use always HopID of 8 for both directions */ +#define TB_PCI_HOPID 8 + +#define TB_PCI_PATH_DOWN 0 +#define TB_PCI_PATH_UP 1 + +/* USB3 adapters use always HopID of 8 for both directions */ +#define TB_USB3_HOPID 8 + +#define TB_USB3_PATH_DOWN 0 +#define TB_USB3_PATH_UP 1 + +/* DP adapters use HopID 8 for AUX and 9 for Video */ +#define TB_DP_AUX_TX_HOPID 8 +#define TB_DP_AUX_RX_HOPID 8 +#define TB_DP_VIDEO_HOPID 9 + +#define TB_DP_VIDEO_PATH_OUT 0 +#define TB_DP_AUX_PATH_OUT 1 +#define TB_DP_AUX_PATH_IN 2 + +/* Minimum number of credits needed for PCIe path */ +#define TB_MIN_PCIE_CREDITS 6U +/* + * Number of credits we try to allocate for each DMA path if not limited + * by the host router baMaxHI. + */ +#define TB_DMA_CREDITS 14 +/* Minimum number of credits for DMA path */ +#define TB_MIN_DMA_CREDITS 1 + +static unsigned int dma_credits = TB_DMA_CREDITS; +module_param(dma_credits, uint, 0444); +MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: " + __MODULE_STRING(TB_DMA_CREDITS) ")"); + +static bool bw_alloc_mode = true; +module_param(bw_alloc_mode, bool, 0444); +MODULE_PARM_DESC(bw_alloc_mode, + "enable bandwidth allocation mode if supported (default: true)"); + +static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; + +#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ + do { \ + struct tb_tunnel *__tunnel = (tunnel); \ + level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ + tb_route(__tunnel->src_port->sw), \ + __tunnel->src_port->port, \ + tb_route(__tunnel->dst_port->sw), \ + __tunnel->dst_port->port, \ + tb_tunnel_names[__tunnel->type], \ + ## arg); \ + } while (0) + +#define tb_tunnel_WARN(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) +#define tb_tunnel_warn(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) +#define tb_tunnel_info(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) +#define tb_tunnel_dbg(tunnel, fmt, arg...) \ + __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) + +static inline unsigned int tb_usable_credits(const struct tb_port *port) +{ + return port->total_credits - port->ctl_credits; +} + +/** + * tb_available_credits() - Available credits for PCIe and DMA + * @port: Lane adapter to check + * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP + * streams possible through this lane adapter + */ +static unsigned int tb_available_credits(const struct tb_port *port, + size_t *max_dp_streams) +{ + const struct tb_switch *sw = port->sw; + int credits, usb3, pcie, spare; + size_t ndp; + + usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; + pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; + + if (tb_acpi_is_xdomain_allowed()) { + spare = min_not_zero(sw->max_dma_credits, dma_credits); + /* Add some credits for potential second DMA tunnel */ + spare += TB_MIN_DMA_CREDITS; + } else { + spare = 0; + } + + credits = tb_usable_credits(port); + if (tb_acpi_may_tunnel_dp()) { + /* + * Maximum number of DP streams possible through the + * lane adapter. + */ + if (sw->min_dp_aux_credits + sw->min_dp_main_credits) + ndp = (credits - (usb3 + pcie + spare)) / + (sw->min_dp_aux_credits + sw->min_dp_main_credits); + else + ndp = 0; + } else { + ndp = 0; + } + credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); + credits -= usb3; + + if (max_dp_streams) + *max_dp_streams = ndp; + + return credits > 0 ? credits : 0; +} + +static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, + enum tb_tunnel_type type) +{ + struct tb_tunnel *tunnel; + + tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); + if (!tunnel) + return NULL; + + tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); + if (!tunnel->paths) { + tb_tunnel_free(tunnel); + return NULL; + } + + INIT_LIST_HEAD(&tunnel->list); + tunnel->tb = tb; + tunnel->npaths = npaths; + tunnel->type = type; + + return tunnel; +} + +static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable) +{ + int ret; + + /* Only supported of both routers are at least USB4 v2 */ + if (usb4_switch_version(tunnel->src_port->sw) < 2 || + usb4_switch_version(tunnel->dst_port->sw) < 2) + return 0; + + ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); + if (ret) + return ret; + + ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); + if (ret) + return ret; + + tb_tunnel_dbg(tunnel, "extended encapsulation %s\n", + str_enabled_disabled(enable)); + return 0; +} + +static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) +{ + int res; + + if (activate) { + res = tb_pci_set_ext_encapsulation(tunnel, activate); + if (res) + return res; + } + + res = tb_pci_port_enable(tunnel->src_port, activate); + if (res) + return res; + + if (tb_port_is_pcie_up(tunnel->dst_port)) { + res = tb_pci_port_enable(tunnel->dst_port, activate); + if (res) + return res; + } + + return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate); +} + +static int tb_pci_init_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + unsigned int credits; + + if (tb_port_use_credit_allocation(port)) { + unsigned int available; + + available = tb_available_credits(port, NULL); + credits = min(sw->max_pcie_credits, available); + + if (credits < TB_MIN_PCIE_CREDITS) + return -ENOSPC; + + credits = max(TB_MIN_PCIE_CREDITS, credits); + } else { + if (tb_port_is_null(port)) + credits = port->bonded ? 32 : 16; + else + credits = 7; + } + + hop->initial_credits = credits; + return 0; +} + +static int tb_pci_init_path(struct tb_path *path) +{ + struct tb_path_hop *hop; + + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 3; + path->weight = 1; + path->drop_packages = 0; + + tb_path_for_each_hop(path, hop) { + int ret; + + ret = tb_pci_init_credits(hop); + if (ret) + return ret; + } + + return 0; +} + +/** + * tb_tunnel_discover_pci() - Discover existing PCIe tunnels + * @tb: Pointer to the domain structure + * @down: PCIe downstream adapter + * @alloc_hopid: Allocate HopIDs from visited ports + * + * If @down adapter is active, follows the tunnel to the PCIe upstream + * adapter and back. Returns the discovered tunnel or %NULL if there was + * no tunnel. + */ +struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + if (!tb_pci_port_is_enabled(down)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); + if (!tunnel) + return NULL; + + tunnel->activate = tb_pci_activate; + tunnel->src_port = down; + + /* + * Discover both paths even if they are not complete. We will + * clean them up by calling tb_tunnel_deactivate() below in that + * case. + */ + path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, + &tunnel->dst_port, "PCIe Up", alloc_hopid); + if (!path) { + /* Just disable the downstream port */ + tb_pci_port_enable(down, false); + goto err_free; + } + tunnel->paths[TB_PCI_PATH_UP] = path; + if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) + goto err_free; + + path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, + "PCIe Down", alloc_hopid); + if (!path) + goto err_deactivate; + tunnel->paths[TB_PCI_PATH_DOWN] = path; + if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) + goto err_deactivate; + + /* Validate that the tunnel is complete */ + if (!tb_port_is_pcie_up(tunnel->dst_port)) { + tb_port_warn(tunnel->dst_port, + "path does not end on a PCIe adapter, cleaning up\n"); + goto err_deactivate; + } + + if (down != tunnel->src_port) { + tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); + goto err_deactivate; + } + + if (!tb_pci_port_is_enabled(tunnel->dst_port)) { + tb_tunnel_warn(tunnel, + "tunnel is not fully activated, cleaning up\n"); + goto err_deactivate; + } + + tb_tunnel_dbg(tunnel, "discovered\n"); + return tunnel; + +err_deactivate: + tb_tunnel_deactivate(tunnel); +err_free: + tb_tunnel_free(tunnel); + + return NULL; +} + +/** + * tb_tunnel_alloc_pci() - allocate a pci tunnel + * @tb: Pointer to the domain structure + * @up: PCIe upstream adapter port + * @down: PCIe downstream adapter port + * + * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and + * TB_TYPE_PCIE_DOWN. + * + * Return: Returns a tb_tunnel on success or NULL on failure. + */ +struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, + struct tb_port *down) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI); + if (!tunnel) + return NULL; + + tunnel->activate = tb_pci_activate; + tunnel->src_port = down; + tunnel->dst_port = up; + + path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0, + "PCIe Down"); + if (!path) + goto err_free; + tunnel->paths[TB_PCI_PATH_DOWN] = path; + if (tb_pci_init_path(path)) + goto err_free; + + path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, + "PCIe Up"); + if (!path) + goto err_free; + tunnel->paths[TB_PCI_PATH_UP] = path; + if (tb_pci_init_path(path)) + goto err_free; + + return tunnel; + +err_free: + tb_tunnel_free(tunnel); + return NULL; +} + +static bool tb_dp_is_usb4(const struct tb_switch *sw) +{ + /* Titan Ridge DP adapters need the same treatment as USB4 */ + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); +} + +static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out, + int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + u32 val; + int ret; + + /* Both ends need to support this */ + if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) + return 0; + + ret = tb_port_read(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + + val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS; + + ret = tb_port_write(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + + do { + ret = tb_port_read(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + if (!(val & DP_STATUS_CTRL_CMHS)) + return 0; + usleep_range(100, 150); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +/* + * Returns maximum possible rate from capability supporting only DP 2.0 + * and below. Used when DP BW allocation mode is not enabled. + */ +static inline u32 tb_dp_cap_get_rate(u32 val) +{ + u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; + + switch (rate) { + case DP_COMMON_CAP_RATE_RBR: + return 1620; + case DP_COMMON_CAP_RATE_HBR: + return 2700; + case DP_COMMON_CAP_RATE_HBR2: + return 5400; + case DP_COMMON_CAP_RATE_HBR3: + return 8100; + default: + return 0; + } +} + +/* + * Returns maximum possible rate from capability supporting DP 2.1 + * UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation + * mode is enabled. + */ +static inline u32 tb_dp_cap_get_rate_ext(u32 val) +{ + if (val & DP_COMMON_CAP_UHBR20) + return 20000; + else if (val & DP_COMMON_CAP_UHBR13_5) + return 13500; + else if (val & DP_COMMON_CAP_UHBR10) + return 10000; + + return tb_dp_cap_get_rate(val); +} + +static inline bool tb_dp_is_uhbr_rate(unsigned int rate) +{ + return rate >= 10000; +} + +static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) +{ + val &= ~DP_COMMON_CAP_RATE_MASK; + switch (rate) { + default: + WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); + fallthrough; + case 1620: + val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; + break; + case 2700: + val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT; + break; + case 5400: + val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT; + break; + case 8100: + val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT; + break; + } + return val; +} + +static inline u32 tb_dp_cap_get_lanes(u32 val) +{ + u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT; + + switch (lanes) { + case DP_COMMON_CAP_1_LANE: + return 1; + case DP_COMMON_CAP_2_LANES: + return 2; + case DP_COMMON_CAP_4_LANES: + return 4; + default: + return 0; + } +} + +static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) +{ + val &= ~DP_COMMON_CAP_LANES_MASK; + switch (lanes) { + default: + WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", + lanes); + fallthrough; + case 1: + val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; + break; + case 2: + val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT; + break; + case 4: + val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT; + break; + } + return val; +} + +static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) +{ + /* Tunneling removes the DP 8b/10b 128/132b encoding */ + if (tb_dp_is_uhbr_rate(rate)) + return rate * lanes * 128 / 132; + return rate * lanes * 8 / 10; +} + +static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes, + u32 out_rate, u32 out_lanes, u32 *new_rate, + u32 *new_lanes) +{ + static const u32 dp_bw[][2] = { + /* Mb/s, lanes */ + { 8100, 4 }, /* 25920 Mb/s */ + { 5400, 4 }, /* 17280 Mb/s */ + { 8100, 2 }, /* 12960 Mb/s */ + { 2700, 4 }, /* 8640 Mb/s */ + { 5400, 2 }, /* 8640 Mb/s */ + { 8100, 1 }, /* 6480 Mb/s */ + { 1620, 4 }, /* 5184 Mb/s */ + { 5400, 1 }, /* 4320 Mb/s */ + { 2700, 2 }, /* 4320 Mb/s */ + { 1620, 2 }, /* 2592 Mb/s */ + { 2700, 1 }, /* 2160 Mb/s */ + { 1620, 1 }, /* 1296 Mb/s */ + }; + unsigned int i; + + /* + * Find a combination that can fit into max_bw and does not + * exceed the maximum rate and lanes supported by the DP OUT and + * DP IN adapters. + */ + for (i = 0; i < ARRAY_SIZE(dp_bw); i++) { + if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes) + continue; + + if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes) + continue; + + if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { + *new_rate = dp_bw[i][0]; + *new_lanes = dp_bw[i][1]; + return 0; + } + } + + return -ENOSR; +} + +static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) +{ + u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw; + struct tb_port *out = tunnel->dst_port; + struct tb_port *in = tunnel->src_port; + int ret, max_bw; + + /* + * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for + * newer generation hardware. + */ + if (in->sw->generation < 2 || out->sw->generation < 2) + return 0; + + /* + * Perform connection manager handshake between IN and OUT ports + * before capabilities exchange can take place. + */ + ret = tb_dp_cm_handshake(in, out, 3000); + if (ret) + return ret; + + /* Read both DP_LOCAL_CAP registers */ + ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, + in->cap_adap + DP_LOCAL_CAP, 1); + if (ret) + return ret; + + ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, + out->cap_adap + DP_LOCAL_CAP, 1); + if (ret) + return ret; + + /* Write IN local caps to OUT remote caps */ + ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT, + out->cap_adap + DP_REMOTE_CAP, 1); + if (ret) + return ret; + + in_rate = tb_dp_cap_get_rate(in_dp_cap); + in_lanes = tb_dp_cap_get_lanes(in_dp_cap); + tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes)); + + /* + * If the tunnel bandwidth is limited (max_bw is set) then see + * if we need to reduce bandwidth to fit there. + */ + out_rate = tb_dp_cap_get_rate(out_dp_cap); + out_lanes = tb_dp_cap_get_lanes(out_dp_cap); + bw = tb_dp_bandwidth(out_rate, out_lanes); + tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + out_rate, out_lanes, bw); + + if (in->sw->config.depth < out->sw->config.depth) + max_bw = tunnel->max_down; + else + max_bw = tunnel->max_up; + + if (max_bw && bw > max_bw) { + u32 new_rate, new_lanes, new_bw; + + ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes, + out_rate, out_lanes, &new_rate, + &new_lanes); + if (ret) { + tb_port_info(out, "not enough bandwidth for DP tunnel\n"); + return ret; + } + + new_bw = tb_dp_bandwidth(new_rate, new_lanes); + tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", + new_rate, new_lanes, new_bw); + + /* + * Set new rate and number of lanes before writing it to + * the IN port remote caps. + */ + out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate); + out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); + } + + /* + * Titan Ridge does not disable AUX timers when it gets + * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with + * DP tunneling. + */ + if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { + out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; + tb_port_dbg(out, "disabling LTTPR\n"); + } + + return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, + in->cap_adap + DP_REMOTE_CAP, 1); +} + +static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel) +{ + int ret, estimated_bw, granularity, tmp; + struct tb_port *out = tunnel->dst_port; + struct tb_port *in = tunnel->src_port; + u32 out_dp_cap, out_rate, out_lanes; + u32 in_dp_cap, in_rate, in_lanes; + u32 rate, lanes; + + if (!bw_alloc_mode) + return 0; + + ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true); + if (ret) + return ret; + + ret = usb4_dp_port_set_group_id(in, in->group->index); + if (ret) + return ret; + + /* + * Get the non-reduced rate and lanes based on the lowest + * capability of both adapters. + */ + ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, + in->cap_adap + DP_LOCAL_CAP, 1); + if (ret) + return ret; + + ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, + out->cap_adap + DP_LOCAL_CAP, 1); + if (ret) + return ret; + + in_rate = tb_dp_cap_get_rate(in_dp_cap); + in_lanes = tb_dp_cap_get_lanes(in_dp_cap); + out_rate = tb_dp_cap_get_rate(out_dp_cap); + out_lanes = tb_dp_cap_get_lanes(out_dp_cap); + + rate = min(in_rate, out_rate); + lanes = min(in_lanes, out_lanes); + tmp = tb_dp_bandwidth(rate, lanes); + + tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate, + lanes, tmp); + + ret = usb4_dp_port_set_nrd(in, rate, lanes); + if (ret) + return ret; + + /* + * Pick up granularity that supports maximum possible bandwidth. + * For that we use the UHBR rates too. + */ + in_rate = tb_dp_cap_get_rate_ext(in_dp_cap); + out_rate = tb_dp_cap_get_rate_ext(out_dp_cap); + rate = min(in_rate, out_rate); + tmp = tb_dp_bandwidth(rate, lanes); + + tb_port_dbg(in, + "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tmp); + + for (granularity = 250; tmp / granularity > 255 && granularity <= 1000; + granularity *= 2) + ; + + tb_port_dbg(in, "granularity %d Mb/s\n", granularity); + + /* + * Returns -EINVAL if granularity above is outside of the + * accepted ranges. + */ + ret = usb4_dp_port_set_granularity(in, granularity); + if (ret) + return ret; + + /* + * Bandwidth estimation is pretty much what we have in + * max_up/down fields. For discovery we just read what the + * estimation was set to. + */ + if (in->sw->config.depth < out->sw->config.depth) + estimated_bw = tunnel->max_down; + else + estimated_bw = tunnel->max_up; + + tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw); + + ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw); + if (ret) + return ret; + + /* Initial allocation should be 0 according the spec */ + ret = usb4_dp_port_allocate_bandwidth(in, 0); + if (ret) + return ret; + + tb_port_dbg(in, "bandwidth allocation mode enabled\n"); + return 0; +} + +static int tb_dp_init(struct tb_tunnel *tunnel) +{ + struct tb_port *in = tunnel->src_port; + struct tb_switch *sw = in->sw; + struct tb *tb = in->sw->tb; + int ret; + + ret = tb_dp_xchg_caps(tunnel); + if (ret) + return ret; + + if (!tb_switch_is_usb4(sw)) + return 0; + + if (!usb4_dp_port_bandwidth_mode_supported(in)) + return 0; + + tb_port_dbg(in, "bandwidth allocation mode supported\n"); + + ret = usb4_dp_port_set_cm_id(in, tb->index); + if (ret) + return ret; + + return tb_dp_bandwidth_alloc_mode_enable(tunnel); +} + +static void tb_dp_deinit(struct tb_tunnel *tunnel) +{ + struct tb_port *in = tunnel->src_port; + + if (!usb4_dp_port_bandwidth_mode_supported(in)) + return; + if (usb4_dp_port_bandwidth_mode_enabled(in)) { + usb4_dp_port_set_cm_bandwidth_mode_supported(in, false); + tb_port_dbg(in, "bandwidth allocation mode disabled\n"); + } +} + +static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) +{ + int ret; + + if (active) { + struct tb_path **paths; + int last; + + paths = tunnel->paths; + last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; + + tb_dp_port_set_hops(tunnel->src_port, + paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, + paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, + paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); + + tb_dp_port_set_hops(tunnel->dst_port, + paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, + paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, + paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); + } else { + tb_dp_port_hpd_clear(tunnel->src_port); + tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); + if (tb_port_is_dpout(tunnel->dst_port)) + tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); + } + + ret = tb_dp_port_enable(tunnel->src_port, active); + if (ret) + return ret; + + if (tb_port_is_dpout(tunnel->dst_port)) + return tb_dp_port_enable(tunnel->dst_port, active); + + return 0; +} + +/* max_bw is rounded up to next granularity */ +static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel, + int *max_bw) +{ + struct tb_port *in = tunnel->src_port; + int ret, rate, lanes, nrd_bw; + u32 cap; + + /* + * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX + * read parameter values so this so we can use this to determine + * the maximum possible bandwidth over this link. + * + * See USB4 v2 spec 1.0 10.4.4.5. + */ + ret = tb_port_read(in, &cap, TB_CFG_PORT, + in->cap_adap + DP_LOCAL_CAP, 1); + if (ret) + return ret; + + rate = tb_dp_cap_get_rate_ext(cap); + if (tb_dp_is_uhbr_rate(rate)) { + /* + * When UHBR is used there is no reduction in lanes so + * we can use this directly. + */ + lanes = tb_dp_cap_get_lanes(cap); + } else { + /* + * If there is no UHBR supported then check the + * non-reduced rate and lanes. + */ + ret = usb4_dp_port_nrd(in, &rate, &lanes); + if (ret) + return ret; + } + + nrd_bw = tb_dp_bandwidth(rate, lanes); + + if (max_bw) { + ret = usb4_dp_port_granularity(in); + if (ret < 0) + return ret; + *max_bw = roundup(nrd_bw, ret); + } + + return nrd_bw; +} + +static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel, + int *consumed_up, + int *consumed_down) +{ + struct tb_port *out = tunnel->dst_port; + struct tb_port *in = tunnel->src_port; + int ret, allocated_bw, max_bw; + + if (!usb4_dp_port_bandwidth_mode_enabled(in)) + return -EOPNOTSUPP; + + if (!tunnel->bw_mode) + return -EOPNOTSUPP; + + /* Read what was allocated previously if any */ + ret = usb4_dp_port_allocated_bandwidth(in); + if (ret < 0) + return ret; + allocated_bw = ret; + + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + if (ret < 0) + return ret; + if (allocated_bw == max_bw) + allocated_bw = ret; + + tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n", + allocated_bw); + + if (in->sw->config.depth < out->sw->config.depth) { + *consumed_up = 0; + *consumed_down = allocated_bw; + } else { + *consumed_up = allocated_bw; + *consumed_down = 0; + } + + return 0; +} + +static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, + int *allocated_down) +{ + struct tb_port *out = tunnel->dst_port; + struct tb_port *in = tunnel->src_port; + + /* + * If we have already set the allocated bandwidth then use that. + * Otherwise we read it from the DPRX. + */ + if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { + int ret, allocated_bw, max_bw; + + ret = usb4_dp_port_allocated_bandwidth(in); + if (ret < 0) + return ret; + allocated_bw = ret; + + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + if (ret < 0) + return ret; + if (allocated_bw == max_bw) + allocated_bw = ret; + + if (in->sw->config.depth < out->sw->config.depth) { + *allocated_up = 0; + *allocated_down = allocated_bw; + } else { + *allocated_up = allocated_bw; + *allocated_down = 0; + } + return 0; + } + + return tunnel->consumed_bandwidth(tunnel, allocated_up, + allocated_down); +} + +static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, + int *alloc_down) +{ + struct tb_port *out = tunnel->dst_port; + struct tb_port *in = tunnel->src_port; + int max_bw, ret, tmp; + + if (!usb4_dp_port_bandwidth_mode_enabled(in)) + return -EOPNOTSUPP; + + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw); + if (ret < 0) + return ret; + + if (in->sw->config.depth < out->sw->config.depth) { + tmp = min(*alloc_down, max_bw); + ret = usb4_dp_port_allocate_bandwidth(in, tmp); + if (ret) + return ret; + *alloc_down = tmp; + *alloc_up = 0; + } else { + tmp = min(*alloc_up, max_bw); + ret = usb4_dp_port_allocate_bandwidth(in, tmp); + if (ret) + return ret; + *alloc_down = 0; + *alloc_up = tmp; + } + + /* Now we can use BW mode registers to figure out the bandwidth */ + /* TODO: need to handle discovery too */ + tunnel->bw_mode = true; + + tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n", + tmp); + return 0; +} + +static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes, + int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + struct tb_port *in = tunnel->src_port; + + /* + * Wait for DPRX done. Normally it should be already set for + * active tunnel. + */ + do { + u32 val; + int ret; + + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_COMMON_CAP, 1); + if (ret) + return ret; + + if (val & DP_COMMON_CAP_DPRX_DONE) { + *rate = tb_dp_cap_get_rate(val); + *lanes = tb_dp_cap_get_lanes(val); + + tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n", + tb_dp_bandwidth(*rate, *lanes)); + return 0; + } + usleep_range(100, 150); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +/* Read cap from tunnel DP IN */ +static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate, + u32 *lanes) +{ + struct tb_port *in = tunnel->src_port; + u32 val; + int ret; + + switch (cap) { + case DP_LOCAL_CAP: + case DP_REMOTE_CAP: + break; + + default: + tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap); + return -EINVAL; + } + + /* + * Read from the copied remote cap so that we take into account + * if capabilities were reduced during exchange. + */ + ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1); + if (ret) + return ret; + + *rate = tb_dp_cap_get_rate(val); + *lanes = tb_dp_cap_get_lanes(val); + + tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap, + tb_dp_bandwidth(*rate, *lanes)); + return 0; +} + +static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, + int *max_down) +{ + struct tb_port *in = tunnel->src_port; + int ret; + + if (!usb4_dp_port_bandwidth_mode_enabled(in)) + return -EOPNOTSUPP; + + ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL); + if (ret < 0) + return ret; + + if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + *max_up = 0; + *max_down = ret; + } else { + *max_up = ret; + *max_down = 0; + } + + return 0; +} + +static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down) +{ + struct tb_port *in = tunnel->src_port; + const struct tb_switch *sw = in->sw; + u32 rate = 0, lanes = 0; + int ret; + + if (tb_dp_is_usb4(sw)) { + /* + * On USB4 routers check if the bandwidth allocation + * mode is enabled first and then read the bandwidth + * through those registers. + */ + ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up, + consumed_down); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + return ret; + } else if (!ret) { + return 0; + } + /* + * Then see if the DPRX negotiation is ready and if yes + * return that bandwidth (it may be smaller than the + * reduced one). Otherwise return the remote (possibly + * reduced) caps. + */ + ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150); + if (ret) { + if (ret == -ETIMEDOUT) + ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, + &rate, &lanes); + if (ret) + return ret; + } + } else if (sw->generation >= 2) { + ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes); + if (ret) + return ret; + } else { + /* No bandwidth management for legacy devices */ + *consumed_up = 0; + *consumed_down = 0; + return 0; + } + + if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + *consumed_up = 0; + *consumed_down = tb_dp_bandwidth(rate, lanes); + } else { + *consumed_up = tb_dp_bandwidth(rate, lanes); + *consumed_down = 0; + } + + return 0; +} + +static void tb_dp_init_aux_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + + if (tb_port_use_credit_allocation(port)) + hop->initial_credits = sw->min_dp_aux_credits; + else + hop->initial_credits = 1; +} + +static void tb_dp_init_aux_path(struct tb_path *path) +{ + struct tb_path_hop *hop; + + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 2; + path->weight = 1; + + tb_path_for_each_hop(path, hop) + tb_dp_init_aux_credits(hop); +} + +static int tb_dp_init_video_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + + if (tb_port_use_credit_allocation(port)) { + unsigned int nfc_credits; + size_t max_dp_streams; + + tb_available_credits(port, &max_dp_streams); + /* + * Read the number of currently allocated NFC credits + * from the lane adapter. Since we only use them for DP + * tunneling we can use that to figure out how many DP + * tunnels already go through the lane adapter. + */ + nfc_credits = port->config.nfc_credits & + ADP_CS_4_NFC_BUFFERS_MASK; + if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) + return -ENOSPC; + + hop->nfc_credits = sw->min_dp_main_credits; + } else { + hop->nfc_credits = min(port->total_credits - 2, 12U); + } + + return 0; +} + +static int tb_dp_init_video_path(struct tb_path *path) +{ + struct tb_path_hop *hop; + + path->egress_fc_enable = TB_PATH_NONE; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 1; + path->weight = 1; + + tb_path_for_each_hop(path, hop) { + int ret; + + ret = tb_dp_init_video_credits(hop); + if (ret) + return ret; + } + + return 0; +} + +static void tb_dp_dump(struct tb_tunnel *tunnel) +{ + struct tb_port *in, *out; + u32 dp_cap, rate, lanes; + + in = tunnel->src_port; + out = tunnel->dst_port; + + if (tb_port_read(in, &dp_cap, TB_CFG_PORT, + in->cap_adap + DP_LOCAL_CAP, 1)) + return; + + rate = tb_dp_cap_get_rate(dp_cap); + lanes = tb_dp_cap_get_lanes(dp_cap); + + tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); + + out = tunnel->dst_port; + + if (tb_port_read(out, &dp_cap, TB_CFG_PORT, + out->cap_adap + DP_LOCAL_CAP, 1)) + return; + + rate = tb_dp_cap_get_rate(dp_cap); + lanes = tb_dp_cap_get_lanes(dp_cap); + + tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); + + if (tb_port_read(in, &dp_cap, TB_CFG_PORT, + in->cap_adap + DP_REMOTE_CAP, 1)) + return; + + rate = tb_dp_cap_get_rate(dp_cap); + lanes = tb_dp_cap_get_lanes(dp_cap); + + tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, tb_dp_bandwidth(rate, lanes)); +} + +/** + * tb_tunnel_discover_dp() - Discover existing Display Port tunnels + * @tb: Pointer to the domain structure + * @in: DP in adapter + * @alloc_hopid: Allocate HopIDs from visited ports + * + * If @in adapter is active, follows the tunnel to the DP out adapter + * and back. Returns the discovered tunnel or %NULL if there was no + * tunnel. + * + * Return: DP tunnel or %NULL if no tunnel found. + */ +struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid) +{ + struct tb_tunnel *tunnel; + struct tb_port *port; + struct tb_path *path; + + if (!tb_dp_port_is_enabled(in)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); + if (!tunnel) + return NULL; + + tunnel->init = tb_dp_init; + tunnel->deinit = tb_dp_deinit; + tunnel->activate = tb_dp_activate; + tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; + tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; + tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; + tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; + tunnel->src_port = in; + + path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, + &tunnel->dst_port, "Video", alloc_hopid); + if (!path) { + /* Just disable the DP IN port */ + tb_dp_port_enable(in, false); + goto err_free; + } + tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; + if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) + goto err_free; + + path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", + alloc_hopid); + if (!path) + goto err_deactivate; + tunnel->paths[TB_DP_AUX_PATH_OUT] = path; + tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); + + path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, + &port, "AUX RX", alloc_hopid); + if (!path) + goto err_deactivate; + tunnel->paths[TB_DP_AUX_PATH_IN] = path; + tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]); + + /* Validate that the tunnel is complete */ + if (!tb_port_is_dpout(tunnel->dst_port)) { + tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n"); + goto err_deactivate; + } + + if (!tb_dp_port_is_enabled(tunnel->dst_port)) + goto err_deactivate; + + if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) + goto err_deactivate; + + if (port != tunnel->src_port) { + tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); + goto err_deactivate; + } + + tb_dp_dump(tunnel); + + tb_tunnel_dbg(tunnel, "discovered\n"); + return tunnel; + +err_deactivate: + tb_tunnel_deactivate(tunnel); +err_free: + tb_tunnel_free(tunnel); + + return NULL; +} + +/** + * tb_tunnel_alloc_dp() - allocate a Display Port tunnel + * @tb: Pointer to the domain structure + * @in: DP in adapter port + * @out: DP out adapter port + * @link_nr: Preferred lane adapter when the link is not bonded + * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 + * if not limited) + * @max_down: Maximum available downstream bandwidth for the DP tunnel + * (%0 if not limited) + * + * Allocates a tunnel between @in and @out that is capable of tunneling + * Display Port traffic. + * + * Return: Returns a tb_tunnel on success or NULL on failure. + */ +struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, + struct tb_port *out, int link_nr, + int max_up, int max_down) +{ + struct tb_tunnel *tunnel; + struct tb_path **paths; + struct tb_path *path; + + if (WARN_ON(!in->cap_adap || !out->cap_adap)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP); + if (!tunnel) + return NULL; + + tunnel->init = tb_dp_init; + tunnel->deinit = tb_dp_deinit; + tunnel->activate = tb_dp_activate; + tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; + tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; + tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; + tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; + tunnel->src_port = in; + tunnel->dst_port = out; + tunnel->max_up = max_up; + tunnel->max_down = max_down; + + paths = tunnel->paths; + + path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, + link_nr, "Video"); + if (!path) + goto err_free; + tb_dp_init_video_path(path); + paths[TB_DP_VIDEO_PATH_OUT] = path; + + path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, + TB_DP_AUX_TX_HOPID, link_nr, "AUX TX"); + if (!path) + goto err_free; + tb_dp_init_aux_path(path); + paths[TB_DP_AUX_PATH_OUT] = path; + + path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, + TB_DP_AUX_RX_HOPID, link_nr, "AUX RX"); + if (!path) + goto err_free; + tb_dp_init_aux_path(path); + paths[TB_DP_AUX_PATH_IN] = path; + + return tunnel; + +err_free: + tb_tunnel_free(tunnel); + return NULL; +} + +static unsigned int tb_dma_available_credits(const struct tb_port *port) +{ + const struct tb_switch *sw = port->sw; + int credits; + + credits = tb_available_credits(port, NULL); + if (tb_acpi_may_tunnel_pcie()) + credits -= sw->max_pcie_credits; + credits -= port->dma_credits; + + return credits > 0 ? credits : 0; +} + +static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits) +{ + struct tb_port *port = hop->in_port; + + if (tb_port_use_credit_allocation(port)) { + unsigned int available = tb_dma_available_credits(port); + + /* + * Need to have at least TB_MIN_DMA_CREDITS, otherwise + * DMA path cannot be established. + */ + if (available < TB_MIN_DMA_CREDITS) + return -ENOSPC; + + while (credits > available) + credits--; + + tb_port_dbg(port, "reserving %u credits for DMA path\n", + credits); + + port->dma_credits += credits; + } else { + if (tb_port_is_null(port)) + credits = port->bonded ? 14 : 6; + else + credits = min(port->total_credits, credits); + } + + hop->initial_credits = credits; + return 0; +} + +/* Path from lane adapter to NHI */ +static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) +{ + struct tb_path_hop *hop; + unsigned int i, tmp; + + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; + path->ingress_fc_enable = TB_PATH_ALL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 5; + path->weight = 1; + path->clear_fc = true; + + /* + * First lane adapter is the one connected to the remote host. + * We don't tunnel other traffic over this link so can use all + * the credits (except the ones reserved for control traffic). + */ + hop = &path->hops[0]; + tmp = min(tb_usable_credits(hop->in_port), credits); + hop->initial_credits = tmp; + hop->in_port->dma_credits += tmp; + + for (i = 1; i < path->path_length; i++) { + int ret; + + ret = tb_dma_reserve_credits(&path->hops[i], credits); + if (ret) + return ret; + } + + return 0; +} + +/* Path from NHI to lane adapter */ +static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) +{ + struct tb_path_hop *hop; + + path->egress_fc_enable = TB_PATH_ALL; + path->ingress_fc_enable = TB_PATH_ALL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 5; + path->weight = 1; + path->clear_fc = true; + + tb_path_for_each_hop(path, hop) { + int ret; + + ret = tb_dma_reserve_credits(hop, credits); + if (ret) + return ret; + } + + return 0; +} + +static void tb_dma_release_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + + if (tb_port_use_credit_allocation(port)) { + port->dma_credits -= hop->initial_credits; + + tb_port_dbg(port, "released %u DMA path credits\n", + hop->initial_credits); + } +} + +static void tb_dma_deinit_path(struct tb_path *path) +{ + struct tb_path_hop *hop; + + tb_path_for_each_hop(path, hop) + tb_dma_release_credits(hop); +} + +static void tb_dma_deinit(struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + continue; + tb_dma_deinit_path(tunnel->paths[i]); + } +} + +/** + * tb_tunnel_alloc_dma() - allocate a DMA tunnel + * @tb: Pointer to the domain structure + * @nhi: Host controller port + * @dst: Destination null port which the other domain is connected to + * @transmit_path: HopID used for transmitting packets + * @transmit_ring: NHI ring number used to send packets towards the + * other domain. Set to %-1 if TX path is not needed. + * @receive_path: HopID used for receiving packets + * @receive_ring: NHI ring number used to receive packets from the + * other domain. Set to %-1 if RX path is not needed. + * + * Return: Returns a tb_tunnel on success or NULL on failure. + */ +struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, + struct tb_port *dst, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring) +{ + struct tb_tunnel *tunnel; + size_t npaths = 0, i = 0; + struct tb_path *path; + int credits; + + /* Ring 0 is reserved for control channel */ + if (WARN_ON(!receive_ring || !transmit_ring)) + return NULL; + + if (receive_ring > 0) + npaths++; + if (transmit_ring > 0) + npaths++; + + if (WARN_ON(!npaths)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA); + if (!tunnel) + return NULL; + + tunnel->src_port = nhi; + tunnel->dst_port = dst; + tunnel->deinit = tb_dma_deinit; + + credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits); + + if (receive_ring > 0) { + path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, + "DMA RX"); + if (!path) + goto err_free; + tunnel->paths[i++] = path; + if (tb_dma_init_rx_path(path, credits)) { + tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n"); + goto err_free; + } + } + + if (transmit_ring > 0) { + path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, + "DMA TX"); + if (!path) + goto err_free; + tunnel->paths[i++] = path; + if (tb_dma_init_tx_path(path, credits)) { + tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n"); + goto err_free; + } + } + + return tunnel; + +err_free: + tb_tunnel_free(tunnel); + return NULL; +} + +/** + * tb_tunnel_match_dma() - Match DMA tunnel + * @tunnel: Tunnel to match + * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore. + * @transmit_ring: NHI ring number used to send packets towards the + * other domain. Pass %-1 to ignore. + * @receive_path: HopID used for receiving packets. Pass %-1 to ignore. + * @receive_ring: NHI ring number used to receive packets from the + * other domain. Pass %-1 to ignore. + * + * This function can be used to match specific DMA tunnel, if there are + * multiple DMA tunnels going through the same XDomain connection. + * Returns true if there is match and false otherwise. + */ +bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, + int transmit_ring, int receive_path, int receive_ring) +{ + const struct tb_path *tx_path = NULL, *rx_path = NULL; + int i; + + if (!receive_ring || !transmit_ring) + return false; + + for (i = 0; i < tunnel->npaths; i++) { + const struct tb_path *path = tunnel->paths[i]; + + if (!path) + continue; + + if (tb_port_is_nhi(path->hops[0].in_port)) + tx_path = path; + else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) + rx_path = path; + } + + if (transmit_ring > 0 || transmit_path > 0) { + if (!tx_path) + return false; + if (transmit_ring > 0 && + (tx_path->hops[0].in_hop_index != transmit_ring)) + return false; + if (transmit_path > 0 && + (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) + return false; + } + + if (receive_ring > 0 || receive_path > 0) { + if (!rx_path) + return false; + if (receive_path > 0 && + (rx_path->hops[0].in_hop_index != receive_path)) + return false; + if (receive_ring > 0 && + (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) + return false; + } + + return true; +} + +static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) +{ + int ret, up_max_rate, down_max_rate; + + ret = usb4_usb3_port_max_link_rate(up); + if (ret < 0) + return ret; + up_max_rate = ret; + + ret = usb4_usb3_port_max_link_rate(down); + if (ret < 0) + return ret; + down_max_rate = ret; + + return min(up_max_rate, down_max_rate); +} + +static int tb_usb3_init(struct tb_tunnel *tunnel) +{ + tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); + + return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, + &tunnel->allocated_up, + &tunnel->allocated_down); +} + +static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) +{ + int res; + + res = tb_usb3_port_enable(tunnel->src_port, activate); + if (res) + return res; + + if (tb_port_is_usb3_up(tunnel->dst_port)) + return tb_usb3_port_enable(tunnel->dst_port, activate); + + return 0; +} + +static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, + int *consumed_up, int *consumed_down) +{ + int pcie_enabled = tb_acpi_may_tunnel_pcie(); + + /* + * PCIe tunneling, if enabled, affects the USB3 bandwidth so + * take that it into account here. + */ + *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; + *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; + return 0; +} + +static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel) +{ + int ret; + + ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, + &tunnel->allocated_up, + &tunnel->allocated_down); + if (ret) + return ret; + + tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); + return 0; +} + +static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + int *available_up, + int *available_down) +{ + int ret, max_rate, allocate_up, allocate_down; + + ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); + if (ret < 0) { + tb_tunnel_warn(tunnel, "failed to read actual link rate\n"); + return; + } else if (!ret) { + /* Use maximum link rate if the link valid is not set */ + ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); + if (ret < 0) { + tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); + return; + } + } + + /* + * 90% of the max rate can be allocated for isochronous + * transfers. + */ + max_rate = ret * 90 / 100; + + /* No need to reclaim if already at maximum */ + if (tunnel->allocated_up >= max_rate && + tunnel->allocated_down >= max_rate) + return; + + /* Don't go lower than what is already allocated */ + allocate_up = min(max_rate, *available_up); + if (allocate_up < tunnel->allocated_up) + allocate_up = tunnel->allocated_up; + + allocate_down = min(max_rate, *available_down); + if (allocate_down < tunnel->allocated_down) + allocate_down = tunnel->allocated_down; + + /* If no changes no need to do more */ + if (allocate_up == tunnel->allocated_up && + allocate_down == tunnel->allocated_down) + return; + + ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, + &allocate_down); + if (ret) { + tb_tunnel_info(tunnel, "failed to allocate bandwidth\n"); + return; + } + + tunnel->allocated_up = allocate_up; + *available_up -= tunnel->allocated_up; + + tunnel->allocated_down = allocate_down; + *available_down -= tunnel->allocated_down; + + tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); +} + +static void tb_usb3_init_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + unsigned int credits; + + if (tb_port_use_credit_allocation(port)) { + credits = sw->max_usb3_credits; + } else { + if (tb_port_is_null(port)) + credits = port->bonded ? 32 : 16; + else + credits = 7; + } + + hop->initial_credits = credits; +} + +static void tb_usb3_init_path(struct tb_path *path) +{ + struct tb_path_hop *hop; + + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 3; + path->weight = 3; + path->drop_packages = 0; + + tb_path_for_each_hop(path, hop) + tb_usb3_init_credits(hop); +} + +/** + * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels + * @tb: Pointer to the domain structure + * @down: USB3 downstream adapter + * @alloc_hopid: Allocate HopIDs from visited ports + * + * If @down adapter is active, follows the tunnel to the USB3 upstream + * adapter and back. Returns the discovered tunnel or %NULL if there was + * no tunnel. + */ +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, + bool alloc_hopid) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + if (!tb_usb3_port_is_enabled(down)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); + if (!tunnel) + return NULL; + + tunnel->activate = tb_usb3_activate; + tunnel->src_port = down; + + /* + * Discover both paths even if they are not complete. We will + * clean them up by calling tb_tunnel_deactivate() below in that + * case. + */ + path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, + &tunnel->dst_port, "USB3 Down", alloc_hopid); + if (!path) { + /* Just disable the downstream port */ + tb_usb3_port_enable(down, false); + goto err_free; + } + tunnel->paths[TB_USB3_PATH_DOWN] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); + + path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, + "USB3 Up", alloc_hopid); + if (!path) + goto err_deactivate; + tunnel->paths[TB_USB3_PATH_UP] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); + + /* Validate that the tunnel is complete */ + if (!tb_port_is_usb3_up(tunnel->dst_port)) { + tb_port_warn(tunnel->dst_port, + "path does not end on an USB3 adapter, cleaning up\n"); + goto err_deactivate; + } + + if (down != tunnel->src_port) { + tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); + goto err_deactivate; + } + + if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { + tb_tunnel_warn(tunnel, + "tunnel is not fully activated, cleaning up\n"); + goto err_deactivate; + } + + if (!tb_route(down->sw)) { + int ret; + + /* + * Read the initial bandwidth allocation for the first + * hop tunnel. + */ + ret = usb4_usb3_port_allocated_bandwidth(down, + &tunnel->allocated_up, &tunnel->allocated_down); + if (ret) + goto err_deactivate; + + tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); + + tunnel->init = tb_usb3_init; + tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; + tunnel->release_unused_bandwidth = + tb_usb3_release_unused_bandwidth; + tunnel->reclaim_available_bandwidth = + tb_usb3_reclaim_available_bandwidth; + } + + tb_tunnel_dbg(tunnel, "discovered\n"); + return tunnel; + +err_deactivate: + tb_tunnel_deactivate(tunnel); +err_free: + tb_tunnel_free(tunnel); + + return NULL; +} + +/** + * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel + * @tb: Pointer to the domain structure + * @up: USB3 upstream adapter port + * @down: USB3 downstream adapter port + * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0 + * if not limited). + * @max_down: Maximum available downstream bandwidth for the USB3 tunnel + * (%0 if not limited). + * + * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and + * @TB_TYPE_USB3_DOWN. + * + * Return: Returns a tb_tunnel on success or %NULL on failure. + */ +struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, + struct tb_port *down, int max_up, + int max_down) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + int max_rate = 0; + + /* + * Check that we have enough bandwidth available for the new + * USB3 tunnel. + */ + if (max_up > 0 || max_down > 0) { + max_rate = tb_usb3_max_link_rate(down, up); + if (max_rate < 0) + return NULL; + + /* Only 90% can be allocated for USB3 isochronous transfers */ + max_rate = max_rate * 90 / 100; + tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", + max_rate); + + if (max_rate > max_up || max_rate > max_down) { + tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); + return NULL; + } + } + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); + if (!tunnel) + return NULL; + + tunnel->activate = tb_usb3_activate; + tunnel->src_port = down; + tunnel->dst_port = up; + tunnel->max_up = max_up; + tunnel->max_down = max_down; + + path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, + "USB3 Down"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_usb3_init_path(path); + tunnel->paths[TB_USB3_PATH_DOWN] = path; + + path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0, + "USB3 Up"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_usb3_init_path(path); + tunnel->paths[TB_USB3_PATH_UP] = path; + + if (!tb_route(down->sw)) { + tunnel->allocated_up = max_rate; + tunnel->allocated_down = max_rate; + + tunnel->init = tb_usb3_init; + tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; + tunnel->release_unused_bandwidth = + tb_usb3_release_unused_bandwidth; + tunnel->reclaim_available_bandwidth = + tb_usb3_reclaim_available_bandwidth; + } + + return tunnel; +} + +/** + * tb_tunnel_free() - free a tunnel + * @tunnel: Tunnel to be freed + * + * Frees a tunnel. The tunnel does not need to be deactivated. + */ +void tb_tunnel_free(struct tb_tunnel *tunnel) +{ + int i; + + if (!tunnel) + return; + + if (tunnel->deinit) + tunnel->deinit(tunnel); + + for (i = 0; i < tunnel->npaths; i++) { + if (tunnel->paths[i]) + tb_path_free(tunnel->paths[i]); + } + + kfree(tunnel->paths); + kfree(tunnel); +} + +/** + * tb_tunnel_is_invalid - check whether an activated path is still valid + * @tunnel: Tunnel to check + */ +bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + WARN_ON(!tunnel->paths[i]->activated); + if (tb_path_is_invalid(tunnel->paths[i])) + return true; + } + + return false; +} + +/** + * tb_tunnel_restart() - activate a tunnel after a hardware reset + * @tunnel: Tunnel to restart + * + * Return: 0 on success and negative errno in case if failure + */ +int tb_tunnel_restart(struct tb_tunnel *tunnel) +{ + int res, i; + + tb_tunnel_dbg(tunnel, "activating\n"); + + /* + * Make sure all paths are properly disabled before enabling + * them again. + */ + for (i = 0; i < tunnel->npaths; i++) { + if (tunnel->paths[i]->activated) { + tb_path_deactivate(tunnel->paths[i]); + tunnel->paths[i]->activated = false; + } + } + + if (tunnel->init) { + res = tunnel->init(tunnel); + if (res) + return res; + } + + for (i = 0; i < tunnel->npaths; i++) { + res = tb_path_activate(tunnel->paths[i]); + if (res) + goto err; + } + + if (tunnel->activate) { + res = tunnel->activate(tunnel, true); + if (res) + goto err; + } + + return 0; + +err: + tb_tunnel_warn(tunnel, "activation failed\n"); + tb_tunnel_deactivate(tunnel); + return res; +} + +/** + * tb_tunnel_activate() - activate a tunnel + * @tunnel: Tunnel to activate + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_tunnel_activate(struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (tunnel->paths[i]->activated) { + tb_tunnel_WARN(tunnel, + "trying to activate an already activated tunnel\n"); + return -EINVAL; + } + } + + return tb_tunnel_restart(tunnel); +} + +/** + * tb_tunnel_deactivate() - deactivate a tunnel + * @tunnel: Tunnel to deactivate + */ +void tb_tunnel_deactivate(struct tb_tunnel *tunnel) +{ + int i; + + tb_tunnel_dbg(tunnel, "deactivating\n"); + + if (tunnel->activate) + tunnel->activate(tunnel, false); + + for (i = 0; i < tunnel->npaths; i++) { + if (tunnel->paths[i] && tunnel->paths[i]->activated) + tb_path_deactivate(tunnel->paths[i]); + } +} + +/** + * tb_tunnel_port_on_path() - Does the tunnel go through port + * @tunnel: Tunnel to check + * @port: Port to check + * + * Returns true if @tunnel goes through @port (direction does not matter), + * false otherwise. + */ +bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, + const struct tb_port *port) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + continue; + + if (tb_path_port_on_path(tunnel->paths[i], port)) + return true; + } + + return false; +} + +static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + return false; + if (!tunnel->paths[i]->activated) + return false; + } + + return true; +} + +/** + * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth + * @tunnel: Tunnel to check + * @max_up: Maximum upstream bandwidth in Mb/s + * @max_down: Maximum downstream bandwidth in Mb/s + * + * Returns maximum possible bandwidth this tunnel can go if not limited + * by other bandwidth clients. If the tunnel does not support this + * returns %-EOPNOTSUPP. + */ +int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, + int *max_down) +{ + if (!tb_tunnel_is_active(tunnel)) + return -EINVAL; + + if (tunnel->maximum_bandwidth) + return tunnel->maximum_bandwidth(tunnel, max_up, max_down); + return -EOPNOTSUPP; +} + +/** + * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel + * @tunnel: Tunnel to check + * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here + * @allocated_down: Currently allocated downstream bandwidth in Mb/s is + * stored here + * + * Returns the bandwidth allocated for the tunnel. This may be higher + * than what the tunnel actually consumes. + */ +int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, + int *allocated_down) +{ + if (!tb_tunnel_is_active(tunnel)) + return -EINVAL; + + if (tunnel->allocated_bandwidth) + return tunnel->allocated_bandwidth(tunnel, allocated_up, + allocated_down); + return -EOPNOTSUPP; +} + +/** + * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation + * @tunnel: Tunnel whose bandwidth allocation to change + * @alloc_up: New upstream bandwidth in Mb/s + * @alloc_down: New downstream bandwidth in Mb/s + * + * Tries to change tunnel bandwidth allocation. If succeeds returns %0 + * and updates @alloc_up and @alloc_down to that was actually allocated + * (it may not be the same as passed originally). Returns negative errno + * in case of failure. + */ +int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, + int *alloc_down) +{ + if (!tb_tunnel_is_active(tunnel)) + return -EINVAL; + + if (tunnel->alloc_bandwidth) + return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); + + return -EOPNOTSUPP; +} + +/** + * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel + * @tunnel: Tunnel to check + * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port. + * Can be %NULL. + * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port. + * Can be %NULL. + * + * Stores the amount of isochronous bandwidth @tunnel consumes in + * @consumed_up and @consumed_down. In case of success returns %0, + * negative errno otherwise. + */ +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down) +{ + int up_bw = 0, down_bw = 0; + + if (!tb_tunnel_is_active(tunnel)) + goto out; + + if (tunnel->consumed_bandwidth) { + int ret; + + ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); + if (ret) + return ret; + + tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, + down_bw); + } + +out: + if (consumed_up) + *consumed_up = up_bw; + if (consumed_down) + *consumed_down = down_bw; + + return 0; +} + +/** + * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth + * @tunnel: Tunnel whose unused bandwidth to release + * + * If tunnel supports dynamic bandwidth management (USB3 tunnels at the + * moment) this function makes it to release all the unused bandwidth. + * + * Returns %0 in case of success and negative errno otherwise. + */ +int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) +{ + if (!tb_tunnel_is_active(tunnel)) + return 0; + + if (tunnel->release_unused_bandwidth) { + int ret; + + ret = tunnel->release_unused_bandwidth(tunnel); + if (ret) + return ret; + } + + return 0; +} + +/** + * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth + * @tunnel: Tunnel reclaiming available bandwidth + * @available_up: Available upstream bandwidth (in Mb/s) + * @available_down: Available downstream bandwidth (in Mb/s) + * + * Reclaims bandwidth from @available_up and @available_down and updates + * the variables accordingly (e.g decreases both according to what was + * reclaimed by the tunnel). If nothing was reclaimed the values are + * kept as is. + */ +void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + int *available_up, + int *available_down) +{ + if (!tb_tunnel_is_active(tunnel)) + return; + + if (tunnel->reclaim_available_bandwidth) + tunnel->reclaim_available_bandwidth(tunnel, available_up, + available_down); +} diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h new file mode 100644 index 0000000000..bf690f7bee --- /dev/null +++ b/drivers/thunderbolt/tunnel.h @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Thunderbolt driver - Tunneling support + * + * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> + * Copyright (C) 2019, Intel Corporation + */ + +#ifndef TB_TUNNEL_H_ +#define TB_TUNNEL_H_ + +#include "tb.h" + +enum tb_tunnel_type { + TB_TUNNEL_PCI, + TB_TUNNEL_DP, + TB_TUNNEL_DMA, + TB_TUNNEL_USB3, +}; + +/** + * struct tb_tunnel - Tunnel between two ports + * @tb: Pointer to the domain + * @src_port: Source port of the tunnel + * @dst_port: Destination port of the tunnel. For discovered incomplete + * tunnels may be %NULL or null adapter port instead. + * @paths: All paths required by the tunnel + * @npaths: Number of paths in @paths + * @init: Optional tunnel specific initialization + * @deinit: Optional tunnel specific de-initialization + * @activate: Optional tunnel specific activation/deactivation + * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel + * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel + * @alloc_bandwidth: Change tunnel bandwidth allocation + * @consumed_bandwidth: Return how much bandwidth the tunnel consumes + * @release_unused_bandwidth: Release all unused bandwidth + * @reclaim_available_bandwidth: Reclaim back available bandwidth + * @list: Tunnels are linked using this field + * @type: Type of the tunnel + * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel. + * Only set if the bandwidth needs to be limited. + * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel. + * Only set if the bandwidth needs to be limited. + * @allocated_up: Allocated upstream bandwidth (only for USB3) + * @allocated_down: Allocated downstream bandwidth (only for USB3) + * @bw_mode: DP bandwidth allocation mode registers can be used to + * determine consumed and allocated bandwidth + */ +struct tb_tunnel { + struct tb *tb; + struct tb_port *src_port; + struct tb_port *dst_port; + struct tb_path **paths; + size_t npaths; + int (*init)(struct tb_tunnel *tunnel); + void (*deinit)(struct tb_tunnel *tunnel); + int (*activate)(struct tb_tunnel *tunnel, bool activate); + int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up, + int *max_down); + int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up, + int *allocated_down); + int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up, + int *alloc_down); + int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down); + int (*release_unused_bandwidth)(struct tb_tunnel *tunnel); + void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel, + int *available_up, + int *available_down); + struct list_head list; + enum tb_tunnel_type type; + int max_up; + int max_down; + int allocated_up; + int allocated_down; + bool bw_mode; +}; + +struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid); +struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, + struct tb_port *down); +struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid); +struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, + struct tb_port *out, int link_nr, + int max_up, int max_down); +struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, + struct tb_port *dst, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring); +bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, + int transmit_ring, int receive_path, int receive_ring); +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, + bool alloc_hopid); +struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, + struct tb_port *down, int max_up, + int max_down); + +void tb_tunnel_free(struct tb_tunnel *tunnel); +int tb_tunnel_activate(struct tb_tunnel *tunnel); +int tb_tunnel_restart(struct tb_tunnel *tunnel); +void tb_tunnel_deactivate(struct tb_tunnel *tunnel); +bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); +bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, + const struct tb_port *port); +int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, + int *max_down); +int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, + int *allocated_down); +int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, + int *alloc_down); +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down); +int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel); +void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + int *available_up, + int *available_down); + +static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel) +{ + return tunnel->type == TB_TUNNEL_PCI; +} + +static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel) +{ + return tunnel->type == TB_TUNNEL_DP; +} + +static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel) +{ + return tunnel->type == TB_TUNNEL_DMA; +} + +static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) +{ + return tunnel->type == TB_TUNNEL_USB3; +} + +#endif + diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c new file mode 100644 index 0000000000..05ddb224c4 --- /dev/null +++ b/drivers/thunderbolt/usb4.c @@ -0,0 +1,2863 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB4 specific functionality + * + * Copyright (C) 2019, Intel Corporation + * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> + * Rajmohan Mani <rajmohan.mani@intel.com> + */ + +#include <linux/delay.h> +#include <linux/ktime.h> +#include <linux/units.h> + +#include "sb_regs.h" +#include "tb.h" + +#define USB4_DATA_RETRIES 3 +#define USB4_DATA_DWORDS 16 + +enum usb4_sb_target { + USB4_SB_TARGET_ROUTER, + USB4_SB_TARGET_PARTNER, + USB4_SB_TARGET_RETIMER, +}; + +#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) +#define USB4_NVM_READ_OFFSET_SHIFT 2 +#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) +#define USB4_NVM_READ_LENGTH_SHIFT 24 + +#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK +#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT + +#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) +#define USB4_DROM_ADDRESS_SHIFT 2 +#define USB4_DROM_SIZE_MASK GENMASK(19, 15) +#define USB4_DROM_SIZE_SHIFT 15 + +#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) + +#define USB4_BA_LENGTH_MASK GENMASK(7, 0) +#define USB4_BA_INDEX_MASK GENMASK(15, 0) + +enum usb4_ba_index { + USB4_BA_MAX_USB3 = 0x1, + USB4_BA_MIN_DP_AUX = 0x2, + USB4_BA_MIN_DP_MAIN = 0x3, + USB4_BA_MAX_PCIE = 0x4, + USB4_BA_MAX_HI = 0x5, +}; + +#define USB4_BA_VALUE_MASK GENMASK(31, 16) +#define USB4_BA_VALUE_SHIFT 16 + +static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status, + const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) +{ + u32 val; + int ret; + + if (metadata) { + ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + if (ret) + return ret; + } + if (tx_dwords) { + ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, + tx_dwords); + if (ret) + return ret; + } + + val = opcode | ROUTER_CS_26_OV; + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; + + ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; + + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + if (status) + *status = (val & ROUTER_CS_26_STATUS_MASK) >> + ROUTER_CS_26_STATUS_SHIFT; + + if (metadata) { + ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + if (ret) + return ret; + } + if (rx_dwords) { + ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, + rx_dwords); + if (ret) + return ret; + } + + return 0; +} + +static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) +{ + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) + return -EINVAL; + + /* + * If the connection manager implementation provides USB4 router + * operation proxy callback, call it here instead of running the + * operation natively. + */ + if (cm_ops->usb4_switch_op) { + int ret; + + ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, + tx_data, tx_dwords, rx_data, + rx_dwords); + if (ret != -EOPNOTSUPP) + return ret; + + /* + * If the proxy was not supported then run the native + * router operation instead. + */ + } + + return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, + tx_dwords, rx_data, rx_dwords); +} + +static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status) +{ + return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); +} + +static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status, + const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) +{ + return __usb4_switch_op(sw, opcode, metadata, status, tx_data, + tx_dwords, rx_data, rx_dwords); +} + +static void usb4_switch_check_wakes(struct tb_switch *sw) +{ + bool wakeup_usb4 = false; + struct usb4_port *usb4; + struct tb_port *port; + bool wakeup = false; + u32 val; + + if (!device_may_wakeup(&sw->dev)) + return; + + if (tb_route(sw)) { + if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) + return; + + tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", + (val & ROUTER_CS_6_WOPS) ? "yes" : "no", + (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); + + wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); + } + + /* + * Check for any downstream ports for USB4 wake, + * connection wake and disconnection wake. + */ + tb_switch_for_each_port(sw, port) { + if (!port->cap_usb4) + continue; + + if (tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1)) + break; + + tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n", + (val & PORT_CS_18_WOU4S) ? "yes" : "no", + (val & PORT_CS_18_WOCS) ? "yes" : "no", + (val & PORT_CS_18_WODS) ? "yes" : "no"); + + wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS | + PORT_CS_18_WODS); + + usb4 = port->usb4; + if (device_may_wakeup(&usb4->dev) && wakeup_usb4) + pm_wakeup_event(&usb4->dev, 0); + + wakeup |= wakeup_usb4; + } + + if (wakeup) + pm_wakeup_event(&sw->dev, 0); +} + +static bool link_is_usb4(struct tb_port *port) +{ + u32 val; + + if (!port->cap_usb4) + return false; + + if (tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1)) + return false; + + return !(val & PORT_CS_18_TCM); +} + +/** + * usb4_switch_setup() - Additional setup for USB4 device + * @sw: USB4 router to setup + * + * USB4 routers need additional settings in order to enable all the + * tunneling. This function enables USB and PCIe tunneling if it can be + * enabled (e.g the parent switch also supports them). If USB tunneling + * is not available for some reason (like that there is Thunderbolt 3 + * switch upstream) then the internal xHCI controller is enabled + * instead. + * + * This does not set the configuration valid bit of the router. To do + * that call usb4_switch_configuration_valid(). + */ +int usb4_switch_setup(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *down; + bool tbt3, xhci; + u32 val = 0; + int ret; + + usb4_switch_check_wakes(sw); + + if (!tb_route(sw)) + return 0; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); + if (ret) + return ret; + + down = tb_switch_downstream_port(sw); + sw->link_usb4 = link_is_usb4(down); + tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); + + xhci = val & ROUTER_CS_6_HCI; + tbt3 = !(val & ROUTER_CS_6_TNS); + + tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", + tbt3 ? "yes" : "no", xhci ? "yes" : "no"); + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && + tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { + val |= ROUTER_CS_5_UTO; + xhci = false; + } + + /* + * Only enable PCIe tunneling if the parent router supports it + * and it is not disabled. + */ + if (tb_acpi_may_tunnel_pcie() && + tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { + val |= ROUTER_CS_5_PTO; + /* + * xHCI can be enabled if PCIe tunneling is supported + * and the parent does not have any USB3 dowstream + * adapters (so we cannot do USB 3.x tunneling). + */ + if (xhci) + val |= ROUTER_CS_5_HCO; + } + + /* TBT3 supported by the CM */ + val |= ROUTER_CS_5_C3S; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); +} + +/** + * usb4_switch_configuration_valid() - Set tunneling configuration to be valid + * @sw: USB4 router + * + * Sets configuration valid bit for the router. Must be called before + * any tunnels can be set through the router and after + * usb4_switch_setup() has been called. Can be called to host and device + * routers (does nothing for the latter). + * + * Returns %0 in success and negative errno otherwise. + */ +int usb4_switch_configuration_valid(struct tb_switch *sw) +{ + u32 val; + int ret; + + if (!tb_route(sw)) + return 0; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + val |= ROUTER_CS_5_CV; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, + ROUTER_CS_6_CR, 50); +} + +/** + * usb4_switch_read_uid() - Read UID from USB4 router + * @sw: USB4 router + * @uid: UID is stored here + * + * Reads 64-bit UID from USB4 router config space. + */ +int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) +{ + return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); +} + +static int usb4_switch_drom_read_block(void *data, + unsigned int dwaddress, void *buf, + size_t dwords) +{ + struct tb_switch *sw = data; + u8 status = 0; + u32 metadata; + int ret; + + metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; + metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & + USB4_DROM_ADDRESS_MASK; + + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, + &status, NULL, 0, buf, dwords); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +/** + * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM + * @sw: USB4 router + * @address: Byte address inside DROM to start reading + * @buf: Buffer where the DROM content is stored + * @size: Number of bytes to read from DROM + * + * Uses USB4 router operations to read router DROM. For devices this + * should always work but for hosts it may return %-EOPNOTSUPP in which + * case the host router does not have DROM. + */ +int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, + usb4_switch_drom_read_block, sw); +} + +/** + * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding + * @sw: USB4 router + * + * Checks whether conditions are met so that lane bonding can be + * established with the upstream router. Call only for device routers. + */ +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) +{ + struct tb_port *up; + int ret; + u32 val; + + up = tb_upstream_port(sw); + ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); + if (ret) + return false; + + return !!(val & PORT_CS_18_BE); +} + +/** + * usb4_switch_set_wake() - Enabled/disable wake + * @sw: USB4 router + * @flags: Wakeup flags (%0 to disable) + * + * Enables/disables router to wake up from sleep. + */ +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) +{ + struct usb4_port *usb4; + struct tb_port *port; + u64 route = tb_route(sw); + u32 val; + int ret; + + /* + * Enable wakes coming from all USB4 downstream ports (from + * child routers). For device routers do this also for the + * upstream USB4 port. + */ + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_null(port)) + continue; + if (!route && tb_is_upstream_port(port)) + continue; + if (!port->cap_usb4) + continue; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); + + if (tb_is_upstream_port(port)) { + val |= PORT_CS_19_WOU4; + } else { + bool configured = val & PORT_CS_19_PC; + usb4 = port->usb4; + + if (((flags & TB_WAKE_ON_CONNECT) | + device_may_wakeup(&usb4->dev)) && !configured) + val |= PORT_CS_19_WOC; + if (((flags & TB_WAKE_ON_DISCONNECT) | + device_may_wakeup(&usb4->dev)) && configured) + val |= PORT_CS_19_WOD; + if ((flags & TB_WAKE_ON_USB4) && configured) + val |= PORT_CS_19_WOU4; + } + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + } + + /* + * Enable wakes from PCIe, USB 3.x and DP on this router. Only + * needed for device routers. + */ + if (route) { + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); + if (flags & TB_WAKE_ON_USB3) + val |= ROUTER_CS_5_WOU; + if (flags & TB_WAKE_ON_PCIE) + val |= ROUTER_CS_5_WOP; + if (flags & TB_WAKE_ON_DP) + val |= ROUTER_CS_5_WOD; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + } + + return 0; +} + +/** + * usb4_switch_set_sleep() - Prepare the router to enter sleep + * @sw: USB4 router + * + * Sets sleep bit for the router. Returns when the router sleep ready + * bit has been asserted. + */ +int usb4_switch_set_sleep(struct tb_switch *sw) +{ + int ret; + u32 val; + + /* Set sleep bit and wait for sleep ready to be asserted */ + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + val |= ROUTER_CS_5_SLP; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, + ROUTER_CS_6_SLPR, 500); +} + +/** + * usb4_switch_nvm_sector_size() - Return router NVM sector size + * @sw: USB4 router + * + * If the router supports NVM operations this function returns the NVM + * sector size in bytes. If NVM operations are not supported returns + * %-EOPNOTSUPP. + */ +int usb4_switch_nvm_sector_size(struct tb_switch *sw) +{ + u32 metadata; + u8 status; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, + &status); + if (ret) + return ret; + + if (status) + return status == 0x2 ? -EOPNOTSUPP : -EIO; + + return metadata & USB4_NVM_SECTOR_SIZE_MASK; +} + +static int usb4_switch_nvm_read_block(void *data, + unsigned int dwaddress, void *buf, size_t dwords) +{ + struct tb_switch *sw = data; + u8 status = 0; + u32 metadata; + int ret; + + metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & + USB4_NVM_READ_LENGTH_MASK; + metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & + USB4_NVM_READ_OFFSET_MASK; + + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, + &status, NULL, 0, buf, dwords); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +/** + * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM + * @sw: USB4 router + * @address: Starting address in bytes + * @buf: Read data is placed here + * @size: How many bytes to read + * + * Reads NVM contents of the router. If NVM is not supported returns + * %-EOPNOTSUPP. + */ +int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, + usb4_switch_nvm_read_block, sw); +} + +/** + * usb4_switch_nvm_set_offset() - Set NVM write offset + * @sw: USB4 router + * @address: Start offset + * + * Explicitly sets NVM write offset. Normally when writing to NVM this + * is done automatically by usb4_switch_nvm_write(). + * + * Returns %0 in success and negative errno if there was a failure. + */ +int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) +{ + u32 metadata, dwaddress; + u8 status = 0; + int ret; + + dwaddress = address / 4; + metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & + USB4_NVM_SET_OFFSET_MASK; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, + &status); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, + const void *buf, size_t dwords) +{ + struct tb_switch *sw = data; + u8 status; + int ret; + + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, + buf, dwords, NULL, 0); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +/** + * usb4_switch_nvm_write() - Write to the router NVM + * @sw: USB4 router + * @address: Start address where to write in bytes + * @buf: Pointer to the data to write + * @size: Size of @buf in bytes + * + * Writes @buf to the router NVM using USB4 router operations. If NVM + * write is not supported returns %-EOPNOTSUPP. + */ +int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, + const void *buf, size_t size) +{ + int ret; + + ret = usb4_switch_nvm_set_offset(sw, address); + if (ret) + return ret; + + return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, + usb4_switch_nvm_write_next_block, sw); +} + +/** + * usb4_switch_nvm_authenticate() - Authenticate new NVM + * @sw: USB4 router + * + * After the new NVM has been written via usb4_switch_nvm_write(), this + * function triggers NVM authentication process. The router gets power + * cycled and if the authentication is successful the new NVM starts + * running. In case of failure returns negative errno. + * + * The caller should call usb4_switch_nvm_authenticate_status() to read + * the status of the authentication after power cycle. It should be the + * first router operation to avoid the status being lost. + */ +int usb4_switch_nvm_authenticate(struct tb_switch *sw) +{ + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); + switch (ret) { + /* + * The router is power cycled once NVM_AUTH is started so it is + * expected to get any of the following errors back. + */ + case -EACCES: + case -ENOTCONN: + case -ETIMEDOUT: + return 0; + + default: + return ret; + } +} + +/** + * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate + * @sw: USB4 router + * @status: Status code of the operation + * + * The function checks if there is status available from the last NVM + * authenticate router operation. If there is status then %0 is returned + * and the status code is placed in @status. Returns negative errno in case + * of failure. + * + * Must be called before any other router operation. + */ +int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) +{ + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + u16 opcode; + u32 val; + int ret; + + if (cm_ops->usb4_switch_nvm_authenticate_status) { + ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); + if (ret != -EOPNOTSUPP) + return ret; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; + + /* Check that the opcode is correct */ + opcode = val & ROUTER_CS_26_OPCODE_MASK; + if (opcode == USB4_SWITCH_OP_NVM_AUTH) { + if (val & ROUTER_CS_26_OV) + return -EBUSY; + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + *status = (val & ROUTER_CS_26_STATUS_MASK) >> + ROUTER_CS_26_STATUS_SHIFT; + } else { + *status = 0; + } + + return 0; +} + +/** + * usb4_switch_credits_init() - Read buffer allocation parameters + * @sw: USB4 router + * + * Reads @sw buffer allocation parameters and initializes @sw buffer + * allocation fields accordingly. Specifically @sw->credits_allocation + * is set to %true if these parameters can be used in tunneling. + * + * Returns %0 on success and negative errno otherwise. + */ +int usb4_switch_credits_init(struct tb_switch *sw) +{ + int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; + int ret, length, i, nports; + const struct tb_port *port; + u32 data[USB4_DATA_DWORDS]; + u32 metadata = 0; + u8 status = 0; + + memset(data, 0, sizeof(data)); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, + &status, NULL, 0, data, ARRAY_SIZE(data)); + if (ret) + return ret; + if (status) + return -EIO; + + length = metadata & USB4_BA_LENGTH_MASK; + if (WARN_ON(length > ARRAY_SIZE(data))) + return -EMSGSIZE; + + max_usb3 = -1; + min_dp_aux = -1; + min_dp_main = -1; + max_pcie = -1; + max_dma = -1; + + tb_sw_dbg(sw, "credit allocation parameters:\n"); + + for (i = 0; i < length; i++) { + u16 index, value; + + index = data[i] & USB4_BA_INDEX_MASK; + value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; + + switch (index) { + case USB4_BA_MAX_USB3: + tb_sw_dbg(sw, " USB3: %u\n", value); + max_usb3 = value; + break; + case USB4_BA_MIN_DP_AUX: + tb_sw_dbg(sw, " DP AUX: %u\n", value); + min_dp_aux = value; + break; + case USB4_BA_MIN_DP_MAIN: + tb_sw_dbg(sw, " DP main: %u\n", value); + min_dp_main = value; + break; + case USB4_BA_MAX_PCIE: + tb_sw_dbg(sw, " PCIe: %u\n", value); + max_pcie = value; + break; + case USB4_BA_MAX_HI: + tb_sw_dbg(sw, " DMA: %u\n", value); + max_dma = value; + break; + default: + tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", + index); + break; + } + } + + /* + * Validate the buffer allocation preferences. If we find + * issues, log a warning and fall back using the hard-coded + * values. + */ + + /* Host router must report baMaxHI */ + if (!tb_route(sw) && max_dma < 0) { + tb_sw_warn(sw, "host router is missing baMaxHI\n"); + goto err_invalid; + } + + nports = 0; + tb_switch_for_each_port(sw, port) { + if (tb_port_is_null(port)) + nports++; + } + + /* Must have DP buffer allocation (multiple USB4 ports) */ + if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { + tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); + goto err_invalid; + } + + tb_switch_for_each_port(sw, port) { + if (tb_port_is_dpout(port) && min_dp_main < 0) { + tb_sw_warn(sw, "missing baMinDPmain"); + goto err_invalid; + } + if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && + min_dp_aux < 0) { + tb_sw_warn(sw, "missing baMinDPaux"); + goto err_invalid; + } + if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && + max_usb3 < 0) { + tb_sw_warn(sw, "missing baMaxUSB3"); + goto err_invalid; + } + if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && + max_pcie < 0) { + tb_sw_warn(sw, "missing baMaxPCIe"); + goto err_invalid; + } + } + + /* + * Buffer allocation passed the validation so we can use it in + * path creation. + */ + sw->credit_allocation = true; + if (max_usb3 > 0) + sw->max_usb3_credits = max_usb3; + if (min_dp_aux > 0) + sw->min_dp_aux_credits = min_dp_aux; + if (min_dp_main > 0) + sw->min_dp_main_credits = min_dp_main; + if (max_pcie > 0) + sw->max_pcie_credits = max_pcie; + if (max_dma > 0) + sw->max_dma_credits = max_dma; + + return 0; + +err_invalid: + return -EINVAL; +} + +/** + * usb4_switch_query_dp_resource() - Query availability of DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * For DP tunneling this function can be used to query availability of + * DP IN resource. Returns true if the resource is available for DP + * tunneling, false otherwise. + */ +bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u32 metadata = in->port; + u8 status; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, + &status); + /* + * If DP resource allocation is not supported assume it is + * always available. + */ + if (ret == -EOPNOTSUPP) + return true; + if (ret) + return false; + + return !status; +} + +/** + * usb4_switch_alloc_dp_resource() - Allocate DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * Allocates DP IN resource for DP tunneling using USB4 router + * operations. If the resource was allocated returns %0. Otherwise + * returns negative errno, in particular %-EBUSY if the resource is + * already allocated. + */ +int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u32 metadata = in->port; + u8 status; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, + &status); + if (ret == -EOPNOTSUPP) + return 0; + if (ret) + return ret; + + return status ? -EBUSY : 0; +} + +/** + * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * Releases the previously allocated DP IN resource. + */ +int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u32 metadata = in->port; + u8 status; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, + &status); + if (ret == -EOPNOTSUPP) + return 0; + if (ret) + return ret; + + return status ? -EIO : 0; +} + +static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) +{ + struct tb_port *p; + int usb4_idx = 0; + + /* Assume port is primary */ + tb_switch_for_each_port(sw, p) { + if (!tb_port_is_null(p)) + continue; + if (tb_is_upstream_port(p)) + continue; + if (!p->link_nr) { + if (p == port) + break; + usb4_idx++; + } + } + + return usb4_idx; +} + +/** + * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter + * @sw: USB4 router + * @port: USB4 port + * + * USB4 routers have direct mapping between USB4 ports and PCIe + * downstream adapters where the PCIe topology is extended. This + * function returns the corresponding downstream PCIe adapter or %NULL + * if no such mapping was possible. + */ +struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, + const struct tb_port *port) +{ + int usb4_idx = usb4_port_idx(sw, port); + struct tb_port *p; + int pcie_idx = 0; + + /* Find PCIe down port matching usb4_port */ + tb_switch_for_each_port(sw, p) { + if (!tb_port_is_pcie_down(p)) + continue; + + if (pcie_idx == usb4_idx) + return p; + + pcie_idx++; + } + + return NULL; +} + +/** + * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter + * @sw: USB4 router + * @port: USB4 port + * + * USB4 routers have direct mapping between USB4 ports and USB 3.x + * downstream adapters where the USB 3.x topology is extended. This + * function returns the corresponding downstream USB 3.x adapter or + * %NULL if no such mapping was possible. + */ +struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, + const struct tb_port *port) +{ + int usb4_idx = usb4_port_idx(sw, port); + struct tb_port *p; + int usb_idx = 0; + + /* Find USB3 down port matching usb4_port */ + tb_switch_for_each_port(sw, p) { + if (!tb_port_is_usb3_down(p)) + continue; + + if (usb_idx == usb4_idx) + return p; + + usb_idx++; + } + + return NULL; +} + +/** + * usb4_switch_add_ports() - Add USB4 ports for this router + * @sw: USB4 router + * + * For USB4 router finds all USB4 ports and registers devices for each. + * Can be called to any router. + * + * Return %0 in case of success and negative errno in case of failure. + */ +int usb4_switch_add_ports(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) + return 0; + + tb_switch_for_each_port(sw, port) { + struct usb4_port *usb4; + + if (!tb_port_is_null(port)) + continue; + if (!port->cap_usb4) + continue; + + usb4 = usb4_port_device_add(port); + if (IS_ERR(usb4)) { + usb4_switch_remove_ports(sw); + return PTR_ERR(usb4); + } + + port->usb4 = usb4; + } + + return 0; +} + +/** + * usb4_switch_remove_ports() - Removes USB4 ports from this router + * @sw: USB4 router + * + * Unregisters previously registered USB4 ports. + */ +void usb4_switch_remove_ports(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (port->usb4) { + usb4_port_device_remove(port->usb4); + port->usb4 = NULL; + } + } +} + +/** + * usb4_port_unlock() - Unlock USB4 downstream port + * @port: USB4 port to unlock + * + * Unlocks USB4 downstream port so that the connection manager can + * access the router below this port. + */ +int usb4_port_unlock(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); + if (ret) + return ret; + + val &= ~ADP_CS_4_LCK; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); +} + +/** + * usb4_port_hotplug_enable() - Enables hotplug for a port + * @port: USB4 port to operate on + * + * Enables hot plug events on a given port. This is only intended + * to be used on lane, DP-IN, and DP-OUT adapters. + */ +int usb4_port_hotplug_enable(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); + if (ret) + return ret; + + val &= ~ADP_CS_5_DHP; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); +} + +static int usb4_port_set_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PC; + else + val &= ~PORT_CS_19_PC; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_port_configure() - Set USB4 port configured + * @port: USB4 router + * + * Sets the USB4 link to be configured for power management purposes. + */ +int usb4_port_configure(struct tb_port *port) +{ + return usb4_port_set_configured(port, true); +} + +/** + * usb4_port_unconfigure() - Set USB4 port unconfigured + * @port: USB4 router + * + * Sets the USB4 link to be unconfigured for power management purposes. + */ +void usb4_port_unconfigure(struct tb_port *port) +{ + usb4_port_set_configured(port, false); +} + +static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PID; + else + val &= ~PORT_CS_19_PID; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_port_configure_xdomain() - Configure port for XDomain + * @port: USB4 port connected to another host + * @xd: XDomain that is connected to the port + * + * Marks the USB4 port as being connected to another host and updates + * the link type. Returns %0 in success and negative errno in failure. + */ +int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) +{ + xd->link_usb4 = link_is_usb4(port); + return usb4_set_xdomain_configured(port, true); +} + +/** + * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain + * @port: USB4 port that was connected to another host + * + * Clears USB4 port from being marked as XDomain. + */ +void usb4_port_unconfigure_xdomain(struct tb_port *port) +{ + usb4_set_xdomain_configured(port, false); +} + +static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) +{ + if (dwords > USB4_DATA_DWORDS) + return -EINVAL; + + return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, + dwords); +} + +static int usb4_port_write_data(struct tb_port *port, const void *data, + size_t dwords) +{ + if (dwords > USB4_DATA_DWORDS) + return -EINVAL; + + return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, + dwords); +} + +static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, + u8 index, u8 reg, void *buf, u8 size) +{ + size_t dwords = DIV_ROUND_UP(size, 4); + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + val = reg; + val |= size << PORT_CS_1_LENGTH_SHIFT; + val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; + if (target == USB4_SB_TARGET_RETIMER) + val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); + val |= PORT_CS_1_PND; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, + PORT_CS_1_PND, 0, 500); + if (ret) + return ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + if (val & PORT_CS_1_NR) + return -ENODEV; + if (val & PORT_CS_1_RC) + return -EIO; + + return buf ? usb4_port_read_data(port, buf, dwords) : 0; +} + +static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, + u8 index, u8 reg, const void *buf, u8 size) +{ + size_t dwords = DIV_ROUND_UP(size, 4); + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + if (buf) { + ret = usb4_port_write_data(port, buf, dwords); + if (ret) + return ret; + } + + val = reg; + val |= size << PORT_CS_1_LENGTH_SHIFT; + val |= PORT_CS_1_WNR_WRITE; + val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; + if (target == USB4_SB_TARGET_RETIMER) + val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); + val |= PORT_CS_1_PND; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, + PORT_CS_1_PND, 0, 500); + if (ret) + return ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + if (val & PORT_CS_1_NR) + return -ENODEV; + if (val & PORT_CS_1_RC) + return -EIO; + + return 0; +} + +static int usb4_port_sb_opcode_err_to_errno(u32 val) +{ + switch (val) { + case 0: + return 0; + case USB4_SB_OPCODE_ERR: + return -EAGAIN; + case USB4_SB_OPCODE_ONS: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, + u8 index, enum usb4_sb_opcode opcode, int timeout_msec) +{ + ktime_t timeout; + u32 val; + int ret; + + val = opcode; + ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, + sizeof(val)); + if (ret) + return ret; + + timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + /* Check results */ + ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, + &val, sizeof(val)); + if (ret) + return ret; + + if (val != opcode) + return usb4_port_sb_opcode_err_to_errno(val); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int usb4_port_set_router_offline(struct tb_port *port, bool offline) +{ + u32 val = !offline; + int ret; + + ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, &val, sizeof(val)); + if (ret) + return ret; + + val = USB4_SB_OPCODE_ROUTER_OFFLINE; + return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE, &val, sizeof(val)); +} + +/** + * usb4_port_router_offline() - Put the USB4 port to offline mode + * @port: USB4 port + * + * This function puts the USB4 port into offline mode. In this mode the + * port does not react on hotplug events anymore. This needs to be + * called before retimer access is done when the USB4 links is not up. + * + * Returns %0 in case of success and negative errno if there was an + * error. + */ +int usb4_port_router_offline(struct tb_port *port) +{ + return usb4_port_set_router_offline(port, true); +} + +/** + * usb4_port_router_online() - Put the USB4 port back to online + * @port: USB4 port + * + * Makes the USB4 port functional again. + */ +int usb4_port_router_online(struct tb_port *port) +{ + return usb4_port_set_router_offline(port, false); +} + +/** + * usb4_port_enumerate_retimers() - Send RT broadcast transaction + * @port: USB4 port + * + * This forces the USB4 port to send broadcast RT transaction which + * makes the retimers on the link to assign index to themselves. Returns + * %0 in case of success and negative errno if there was an error. + */ +int usb4_port_enumerate_retimers(struct tb_port *port) +{ + u32 val; + + val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; + return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE, &val, sizeof(val)); +} + +/** + * usb4_port_clx_supported() - Check if CLx is supported by the link + * @port: Port to check for CLx support for + * + * PORT_CS_18_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool usb4_port_clx_supported(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1); + if (ret) + return false; + + return !!(val & PORT_CS_18_CPS); +} + +/** + * usb4_port_margining_caps() - Read USB4 port marginig capabilities + * @port: USB4 port + * @caps: Array with at least two elements to hold the results + * + * Reads the USB4 port lane margining capabilities into @caps. + */ +int usb4_port_margining_caps(struct tb_port *port, u32 *caps) +{ + int ret; + + ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); + if (ret) + return ret; + + return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_DATA, caps, sizeof(*caps) * 2); +} + +/** + * usb4_port_hw_margin() - Run hardware lane margining on port + * @port: USB4 port + * @lanes: Which lanes to run (must match the port capabilities). Can be + * %0, %1 or %7. + * @ber_level: BER level contour value + * @timing: Perform timing margining instead of voltage + * @right_high: Use Right/high margin instead of left/low + * @results: Array with at least two elements to hold the results + * + * Runs hardware lane margining on USB4 port and returns the result in + * @results. + */ +int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, + unsigned int ber_level, bool timing, bool right_high, + u32 *results) +{ + u32 val; + int ret; + + val = lanes; + if (timing) + val |= USB4_MARGIN_HW_TIME; + if (right_high) + val |= USB4_MARGIN_HW_RH; + if (ber_level) + val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & + USB4_MARGIN_HW_BER_MASK; + + ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, &val, sizeof(val)); + if (ret) + return ret; + + ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); + if (ret) + return ret; + + return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_DATA, results, sizeof(*results) * 2); +} + +/** + * usb4_port_sw_margin() - Run software lane margining on port + * @port: USB4 port + * @lanes: Which lanes to run (must match the port capabilities). Can be + * %0, %1 or %7. + * @timing: Perform timing margining instead of voltage + * @right_high: Use Right/high margin instead of left/low + * @counter: What to do with the error counter + * + * Runs software lane margining on USB4 port. Read back the error + * counters by calling usb4_port_sw_margin_errors(). Returns %0 in + * success and negative errno otherwise. + */ +int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, + bool right_high, u32 counter) +{ + u32 val; + int ret; + + val = lanes; + if (timing) + val |= USB4_MARGIN_SW_TIME; + if (right_high) + val |= USB4_MARGIN_SW_RH; + val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & + USB4_MARGIN_SW_COUNTER_MASK; + + ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, &val, sizeof(val)); + if (ret) + return ret; + + return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); +} + +/** + * usb4_port_sw_margin_errors() - Read the software margining error counters + * @port: USB4 port + * @errors: Error metadata is copied here. + * + * This reads back the software margining error counters from the port. + * Returns %0 in success and negative errno otherwise. + */ +int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors) +{ + int ret; + + ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); + if (ret) + return ret; + + return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, errors, sizeof(*errors)); +} + +static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, + enum usb4_sb_opcode opcode, + int timeout_msec) +{ + return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, + timeout_msec); +} + +/** + * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions + * @port: USB4 port + * @index: Retimer index + * + * Enables sideband channel transations on SBTX. Can be used when USB4 + * link does not go up, for example if there is no device connected. + */ +int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) +{ + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, + 500); + + if (ret != -ENODEV) + return ret; + + /* + * Per the USB4 retimer spec, the retimer is not required to + * send an RT (Retimer Transaction) response for the first + * SET_INBOUND_SBTX command + */ + return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, + 500); +} + +/** + * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions + * @port: USB4 port + * @index: Retimer index + * + * Disables sideband channel transations on SBTX. The reverse of + * usb4_port_retimer_set_inbound_sbtx(). + */ +int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) +{ + return usb4_port_retimer_op(port, index, + USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); +} + +/** + * usb4_port_retimer_read() - Read from retimer sideband registers + * @port: USB4 port + * @index: Retimer index + * @reg: Sideband register to read + * @buf: Data from @reg is stored here + * @size: Number of bytes to read + * + * Function reads retimer sideband registers starting from @reg. The + * retimer is connected to @port at @index. Returns %0 in case of + * success, and read data is copied to @buf. If there is no retimer + * present at given @index returns %-ENODEV. In any other failure + * returns negative errno. + */ +int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, + u8 size) +{ + return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, + size); +} + +/** + * usb4_port_retimer_write() - Write to retimer sideband registers + * @port: USB4 port + * @index: Retimer index + * @reg: Sideband register to write + * @buf: Data that is written starting from @reg + * @size: Number of bytes to write + * + * Writes retimer sideband registers starting from @reg. The retimer is + * connected to @port at @index. Returns %0 in case of success. If there + * is no retimer present at given @index returns %-ENODEV. In any other + * failure returns negative errno. + */ +int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, + const void *buf, u8 size) +{ + return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, + size); +} + +/** + * usb4_port_retimer_is_last() - Is the retimer last on-board retimer + * @port: USB4 port + * @index: Retimer index + * + * If the retimer at @index is last one (connected directly to the + * Type-C port) this function returns %1. If it is not returns %0. If + * the retimer is not present returns %-ENODEV. Otherwise returns + * negative errno. + */ +int usb4_port_retimer_is_last(struct tb_port *port, u8 index) +{ + u32 metadata; + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, + 500); + if (ret) + return ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + return ret ? ret : metadata & 1; +} + +/** + * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size + * @port: USB4 port + * @index: Retimer index + * + * Reads NVM sector size (in bytes) of a retimer at @index. This + * operation can be used to determine whether the retimer supports NVM + * upgrade for example. Returns sector size in bytes or negative errno + * in case of error. Specifically returns %-ENODEV if there is no + * retimer at @index. + */ +int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) +{ + u32 metadata; + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, + 500); + if (ret) + return ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; +} + +/** + * usb4_port_retimer_nvm_set_offset() - Set NVM write offset + * @port: USB4 port + * @index: Retimer index + * @address: Start offset + * + * Exlicitly sets NVM write offset. Normally when writing to NVM this is + * done automatically by usb4_port_retimer_nvm_write(). + * + * Returns %0 in success and negative errno if there was a failure. + */ +int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, + unsigned int address) +{ + u32 metadata, dwaddress; + int ret; + + dwaddress = address / 4; + metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & + USB4_NVM_SET_OFFSET_MASK; + + ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + if (ret) + return ret; + + return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, + 500); +} + +struct retimer_info { + struct tb_port *port; + u8 index; +}; + +static int usb4_port_retimer_nvm_write_next_block(void *data, + unsigned int dwaddress, const void *buf, size_t dwords) + +{ + const struct retimer_info *info = data; + struct tb_port *port = info->port; + u8 index = info->index; + int ret; + + ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, + buf, dwords * 4); + if (ret) + return ret; + + return usb4_port_retimer_op(port, index, + USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); +} + +/** + * usb4_port_retimer_nvm_write() - Write to retimer NVM + * @port: USB4 port + * @index: Retimer index + * @address: Byte address where to start the write + * @buf: Data to write + * @size: Size in bytes how much to write + * + * Writes @size bytes from @buf to the retimer NVM. Used for NVM + * upgrade. Returns %0 if the data was written successfully and negative + * errno in case of failure. Specifically returns %-ENODEV if there is + * no retimer at @index. + */ +int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, + const void *buf, size_t size) +{ + struct retimer_info info = { .port = port, .index = index }; + int ret; + + ret = usb4_port_retimer_nvm_set_offset(port, index, address); + if (ret) + return ret; + + return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, + usb4_port_retimer_nvm_write_next_block, &info); +} + +/** + * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade + * @port: USB4 port + * @index: Retimer index + * + * After the new NVM image has been written via usb4_port_retimer_nvm_write() + * this function can be used to trigger the NVM upgrade process. If + * successful the retimer restarts with the new NVM and may not have the + * index set so one needs to call usb4_port_enumerate_retimers() to + * force index to be assigned. + */ +int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) +{ + u32 val; + + /* + * We need to use the raw operation here because once the + * authentication completes the retimer index is not set anymore + * so we do not get back the status now. + */ + val = USB4_SB_OPCODE_NVM_AUTH_WRITE; + return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_OPCODE, &val, sizeof(val)); +} + +/** + * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade + * @port: USB4 port + * @index: Retimer index + * @status: Raw status code read from metadata + * + * This can be called after usb4_port_retimer_nvm_authenticate() and + * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. + * + * Returns %0 if the authentication status was successfully read. The + * completion metadata (the result) is then stored into @status. If + * reading the status fails, returns negative errno. + */ +int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, + u32 *status) +{ + u32 metadata, val; + int ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, + sizeof(val)); + if (ret) + return ret; + + ret = usb4_port_sb_opcode_err_to_errno(val); + switch (ret) { + case 0: + *status = 0; + return 0; + + case -EAGAIN: + ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, + &metadata, sizeof(metadata)); + if (ret) + return ret; + + *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; + return 0; + + default: + return ret; + } +} + +static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, + void *buf, size_t dwords) +{ + const struct retimer_info *info = data; + struct tb_port *port = info->port; + u8 index = info->index; + u32 metadata; + int ret; + + metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; + if (dwords < USB4_DATA_DWORDS) + metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; + + ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + if (ret) + return ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); + if (ret) + return ret; + + return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, + dwords * 4); +} + +/** + * usb4_port_retimer_nvm_read() - Read contents of retimer NVM + * @port: USB4 port + * @index: Retimer index + * @address: NVM address (in bytes) to start reading + * @buf: Data read from NVM is stored here + * @size: Number of bytes to read + * + * Reads retimer NVM and copies the contents to @buf. Returns %0 if the + * read was successful and negative errno in case of failure. + * Specifically returns %-ENODEV if there is no retimer at @index. + */ +int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, + unsigned int address, void *buf, size_t size) +{ + struct retimer_info info = { .port = port, .index = index }; + + return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, + usb4_port_retimer_nvm_read_block, &info); +} + +static inline unsigned int +usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) +{ + /* Take the possible bandwidth limitation into account */ + if (port->max_bw) + return min(bw, port->max_bw); + return bw; +} + +/** + * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate + * @port: USB3 adapter port + * + * Return maximum supported link rate of a USB3 adapter in Mb/s. + * Negative errno in case of error. + */ +int usb4_usb3_port_max_link_rate(struct tb_port *port) +{ + int ret, lr; + u32 val; + + if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_4, 1); + if (ret) + return ret; + + lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; + ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; + + return usb4_usb3_port_max_bandwidth(port, ret); +} + +/** + * usb4_usb3_port_actual_link_rate() - Established USB3 link rate + * @port: USB3 adapter port + * + * Return actual established link rate of a USB3 adapter in Mb/s. If the + * link is not up returns %0 and negative errno in case of failure. + */ +int usb4_usb3_port_actual_link_rate(struct tb_port *port) +{ + int ret, lr; + u32 val; + + if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_4, 1); + if (ret) + return ret; + + if (!(val & ADP_USB3_CS_4_ULV)) + return 0; + + lr = val & ADP_USB3_CS_4_ALR_MASK; + ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; + + return usb4_usb3_port_max_bandwidth(port, ret); +} + +static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) +{ + int ret; + u32 val; + + if (!tb_port_is_usb3_down(port)) + return -EINVAL; + if (tb_route(port->sw)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + if (request) + val |= ADP_USB3_CS_2_CMR; + else + val &= ~ADP_USB3_CS_2_CMR; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + /* + * We can use val here directly as the CMR bit is in the same place + * as HCA. Just mask out others. + */ + val &= ADP_USB3_CS_2_CMR; + return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, + ADP_USB3_CS_1_HCA, val, 1500); +} + +static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) +{ + return usb4_usb3_port_cm_request(port, true); +} + +static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) +{ + return usb4_usb3_port_cm_request(port, false); +} + +static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) +{ + unsigned long uframes; + + uframes = bw * 512UL << scale; + return DIV_ROUND_CLOSEST(uframes * 8000, MEGA); +} + +static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) +{ + unsigned long uframes; + + /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ + uframes = ((unsigned long)mbps * MEGA) / 8000; + return DIV_ROUND_UP(uframes, 512UL << scale); +} + +static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, + int *upstream_bw, + int *downstream_bw) +{ + u32 val, bw, scale; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + ret = tb_port_read(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); + if (ret) + return ret; + + scale &= ADP_USB3_CS_3_SCALE_MASK; + + bw = val & ADP_USB3_CS_2_AUBW_MASK; + *upstream_bw = usb3_bw_to_mbps(bw, scale); + + bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; + *downstream_bw = usb3_bw_to_mbps(bw, scale); + + return 0; +} + +/** + * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 + * @port: USB3 adapter port + * @upstream_bw: Allocated upstream bandwidth is stored here + * @downstream_bw: Allocated downstream bandwidth is stored here + * + * Stores currently allocated USB3 bandwidth into @upstream_bw and + * @downstream_bw in Mb/s. Returns %0 in case of success and negative + * errno in failure. + */ +int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw) +{ + int ret; + + ret = usb4_usb3_port_set_cm_request(port); + if (ret) + return ret; + + ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, + downstream_bw); + usb4_usb3_port_clear_cm_request(port); + + return ret; +} + +static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, + int *upstream_bw, + int *downstream_bw) +{ + u32 val, bw, scale; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_1, 1); + if (ret) + return ret; + + ret = tb_port_read(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); + if (ret) + return ret; + + scale &= ADP_USB3_CS_3_SCALE_MASK; + + bw = val & ADP_USB3_CS_1_CUBW_MASK; + *upstream_bw = usb3_bw_to_mbps(bw, scale); + + bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; + *downstream_bw = usb3_bw_to_mbps(bw, scale); + + return 0; +} + +static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, + int upstream_bw, + int downstream_bw) +{ + u32 val, ubw, dbw, scale; + int ret, max_bw; + + /* Figure out suitable scale */ + scale = 0; + max_bw = max(upstream_bw, downstream_bw); + while (scale < 64) { + if (mbps_to_usb3_bw(max_bw, scale) < 4096) + break; + scale++; + } + + if (WARN_ON(scale >= 64)) + return -EINVAL; + + ret = tb_port_write(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); + if (ret) + return ret; + + ubw = mbps_to_usb3_bw(upstream_bw, scale); + dbw = mbps_to_usb3_bw(downstream_bw, scale); + + tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); + val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; + val |= ubw; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); +} + +/** + * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 + * @port: USB3 adapter port + * @upstream_bw: New upstream bandwidth + * @downstream_bw: New downstream bandwidth + * + * This can be used to set how much bandwidth is allocated for the USB3 + * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the + * new values programmed to the USB3 adapter allocation registers. If + * the values are lower than what is currently consumed the allocation + * is set to what is currently consumed instead (consumed bandwidth + * cannot be taken away by CM). The actual new values are returned in + * @upstream_bw and @downstream_bw. + * + * Returns %0 in case of success and negative errno if there was a + * failure. + */ +int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw) +{ + int ret, consumed_up, consumed_down, allocate_up, allocate_down; + + ret = usb4_usb3_port_set_cm_request(port); + if (ret) + return ret; + + ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, + &consumed_down); + if (ret) + goto err_request; + + /* Don't allow it go lower than what is consumed */ + allocate_up = max(*upstream_bw, consumed_up); + allocate_down = max(*downstream_bw, consumed_down); + + ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, + allocate_down); + if (ret) + goto err_request; + + *upstream_bw = allocate_up; + *downstream_bw = allocate_down; + +err_request: + usb4_usb3_port_clear_cm_request(port); + return ret; +} + +/** + * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth + * @port: USB3 adapter port + * @upstream_bw: New allocated upstream bandwidth + * @downstream_bw: New allocated downstream bandwidth + * + * Releases USB3 allocated bandwidth down to what is actually consumed. + * The new bandwidth is returned in @upstream_bw and @downstream_bw. + * + * Returns 0% in success and negative errno in case of failure. + */ +int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw) +{ + int ret, consumed_up, consumed_down; + + ret = usb4_usb3_port_set_cm_request(port); + if (ret) + return ret; + + ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, + &consumed_down); + if (ret) + goto err_request; + + /* + * Always keep 1000 Mb/s to make sure xHCI has at least some + * bandwidth available for isochronous traffic. + */ + if (consumed_up < 1000) + consumed_up = 1000; + if (consumed_down < 1000) + consumed_down = 1000; + + ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, + consumed_down); + if (ret) + goto err_request; + + *upstream_bw = consumed_up; + *downstream_bw = consumed_down; + +err_request: + usb4_usb3_port_clear_cm_request(port); + return ret; +} + +static bool is_usb4_dpin(const struct tb_port *port) +{ + if (!tb_port_is_dpin(port)) + return false; + if (!tb_switch_is_usb4(port->sw)) + return false; + return true; +} + +/** + * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter + * @port: DP IN adapter + * @cm_id: CM ID to assign + * + * Sets CM ID for the @port. Returns %0 on success and negative errno + * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not + * support this. + */ +int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ~ADP_DP_CS_2_CM_ID_MASK; + val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode + * supported + * @port: DP IN adapter to check + * + * Can be called to any DP IN adapter. Returns true if the adapter + * supports USB4 bandwidth allocation mode, false otherwise. + */ +bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port) +{ + int ret; + u32 val; + + if (!is_usb4_dpin(port)) + return false; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + DP_LOCAL_CAP, 1); + if (ret) + return false; + + return !!(val & DP_COMMON_CAP_BW_MODE); +} + +/** + * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode + * enabled + * @port: DP IN adapter to check + * + * Can be called to any DP IN adapter. Returns true if the bandwidth + * allocation mode has been enabled, false otherwise. + */ +bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port) +{ + int ret; + u32 val; + + if (!is_usb4_dpin(port)) + return false; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_8, 1); + if (ret) + return false; + + return !!(val & ADP_DP_CS_8_DPME); +} + +/** + * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for + * bandwidth allocation mode + * @port: DP IN adapter + * @supported: Does the CM support bandwidth allocation mode + * + * Can be called to any DP IN adapter. Sets or clears the CM support bit + * of the DP IN adapter. Returns %0 in success and negative errno + * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter + * does not support this. + */ +int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, + bool supported) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + if (supported) + val |= ADP_DP_CS_2_CMMS; + else + val &= ~ADP_DP_CS_2_CMMS; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_group_id() - Return Group ID assigned for the adapter + * @port: DP IN adapter + * + * Reads bandwidth allocation Group ID from the DP IN adapter and + * returns it. If the adapter does not support setting Group_ID + * %-EOPNOTSUPP is returned. + */ +int usb4_dp_port_group_id(struct tb_port *port) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT; +} + +/** + * usb4_dp_port_set_group_id() - Set adapter Group ID + * @port: DP IN adapter + * @group_id: Group ID for the adapter + * + * Sets bandwidth allocation mode Group ID for the DP IN adapter. + * Returns %0 in case of success and negative errno otherwise. + * Specifically returns %-EOPNOTSUPP if the adapter does not support + * this. + */ +int usb4_dp_port_set_group_id(struct tb_port *port, int group_id) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ~ADP_DP_CS_2_GROUP_ID_MASK; + val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_nrd() - Read non-reduced rate and lanes + * @port: DP IN adapter + * @rate: Non-reduced rate in Mb/s is placed here + * @lanes: Non-reduced lanes are placed here + * + * Reads the non-reduced rate and lanes from the DP IN adapter. Returns + * %0 in success and negative errno otherwise. Specifically returns + * %-EOPNOTSUPP if the adapter does not support this. + */ +int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes) +{ + u32 val, tmp; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT; + switch (tmp) { + case DP_COMMON_CAP_RATE_RBR: + *rate = 1620; + break; + case DP_COMMON_CAP_RATE_HBR: + *rate = 2700; + break; + case DP_COMMON_CAP_RATE_HBR2: + *rate = 5400; + break; + case DP_COMMON_CAP_RATE_HBR3: + *rate = 8100; + break; + } + + tmp = val & ADP_DP_CS_2_NRD_MLC_MASK; + switch (tmp) { + case DP_COMMON_CAP_1_LANE: + *lanes = 1; + break; + case DP_COMMON_CAP_2_LANES: + *lanes = 2; + break; + case DP_COMMON_CAP_4_LANES: + *lanes = 4; + break; + } + + return 0; +} + +/** + * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes + * @port: DP IN adapter + * @rate: Non-reduced rate in Mb/s + * @lanes: Non-reduced lanes + * + * Before the capabilities reduction this function can be used to set + * the non-reduced values for the DP IN adapter. Returns %0 in success + * and negative errno otherwise. If the adapter does not support this + * %-EOPNOTSUPP is returned. + */ +int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ~ADP_DP_CS_2_NRD_MLR_MASK; + + switch (rate) { + case 1620: + break; + case 2700: + val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT) + & ADP_DP_CS_2_NRD_MLR_MASK; + break; + case 5400: + val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT) + & ADP_DP_CS_2_NRD_MLR_MASK; + break; + case 8100: + val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT) + & ADP_DP_CS_2_NRD_MLR_MASK; + break; + default: + return -EINVAL; + } + + val &= ~ADP_DP_CS_2_NRD_MLC_MASK; + + switch (lanes) { + case 1: + break; + case 2: + val |= DP_COMMON_CAP_2_LANES; + break; + case 4: + val |= DP_COMMON_CAP_4_LANES; + break; + default: + return -EINVAL; + } + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_granularity() - Return granularity for the bandwidth values + * @port: DP IN adapter + * + * Reads the programmed granularity from @port. If the DP IN adapter does + * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative + * errno in other error cases. + */ +int usb4_dp_port_granularity(struct tb_port *port) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ADP_DP_CS_2_GR_MASK; + val >>= ADP_DP_CS_2_GR_SHIFT; + + switch (val) { + case ADP_DP_CS_2_GR_0_25G: + return 250; + case ADP_DP_CS_2_GR_0_5G: + return 500; + case ADP_DP_CS_2_GR_1G: + return 1000; + } + + return -EINVAL; +} + +/** + * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values + * @port: DP IN adapter + * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250. + * + * Sets the granularity used with the estimated, allocated and requested + * bandwidth. Returns %0 in success and negative errno otherwise. If the + * adapter does not support this %-EOPNOTSUPP is returned. + */ +int usb4_dp_port_set_granularity(struct tb_port *port, int granularity) +{ + u32 val; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ~ADP_DP_CS_2_GR_MASK; + + switch (granularity) { + case 250: + val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT; + break; + case 500: + val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT; + break; + case 1000: + val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT; + break; + default: + return -EINVAL; + } + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth + * @port: DP IN adapter + * @bw: Estimated bandwidth in Mb/s. + * + * Sets the estimated bandwidth to @bw. Set the granularity by calling + * usb4_dp_port_set_granularity() before calling this. The @bw is round + * down to the closest granularity multiplier. Returns %0 in success + * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if + * the adapter does not support this. + */ +int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw) +{ + u32 val, granularity; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = usb4_dp_port_granularity(port); + if (ret < 0) + return ret; + granularity = ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK; + val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth + * @port: DP IN adapter + * + * Reads and returns allocated bandwidth for @port in Mb/s (taking into + * account the programmed granularity). Returns negative errno in case + * of error. + */ +int usb4_dp_port_allocated_bandwidth(struct tb_port *port) +{ + u32 val, granularity; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = usb4_dp_port_granularity(port); + if (ret < 0) + return ret; + granularity = ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + DP_STATUS, 1); + if (ret) + return ret; + + val &= DP_STATUS_ALLOCATED_BW_MASK; + val >>= DP_STATUS_ALLOCATED_BW_SHIFT; + + return val * granularity; +} + +static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack) +{ + u32 val; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + if (ack) + val |= ADP_DP_CS_2_CA; + else + val &= ~ADP_DP_CS_2_CA; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +static inline int usb4_dp_port_set_cm_ack(struct tb_port *port) +{ + return __usb4_dp_port_set_cm_ack(port, true); +} + +static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port, + int timeout_msec) +{ + ktime_t end; + u32 val; + int ret; + + ret = __usb4_dp_port_set_cm_ack(port, false); + if (ret) + return ret; + + end = ktime_add_ms(ktime_get(), timeout_msec); + do { + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_8, 1); + if (ret) + return ret; + + if (!(val & ADP_DP_CS_8_DR)) + break; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), end)); + + if (val & ADP_DP_CS_8_DR) + return -ETIMEDOUT; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); + if (ret) + return ret; + + val &= ~ADP_DP_CS_2_CA; + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); +} + +/** + * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth + * @port: DP IN adapter + * @bw: New allocated bandwidth in Mb/s + * + * Communicates the new allocated bandwidth with the DPCD (graphics + * driver). Takes into account the programmed granularity. Returns %0 in + * success and negative errno in case of error. + */ +int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw) +{ + u32 val, granularity; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = usb4_dp_port_granularity(port); + if (ret < 0) + return ret; + granularity = ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + DP_STATUS, 1); + if (ret) + return ret; + + val &= ~DP_STATUS_ALLOCATED_BW_MASK; + val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + DP_STATUS, 1); + if (ret) + return ret; + + ret = usb4_dp_port_set_cm_ack(port); + if (ret) + return ret; + + return usb4_dp_port_wait_and_clear_cm_ack(port, 500); +} + +/** + * usb4_dp_port_requested_bandwidth() - Read requested bandwidth + * @port: DP IN adapter + * + * Reads the DPCD (graphics driver) requested bandwidth and returns it + * in Mb/s. Takes the programmed granularity into account. In case of + * error returns negative errno. Specifically returns %-EOPNOTSUPP if + * the adapter does not support bandwidth allocation mode, and %ENODATA + * if there is no active bandwidth request from the graphics driver. + */ +int usb4_dp_port_requested_bandwidth(struct tb_port *port) +{ + u32 val, granularity; + int ret; + + if (!is_usb4_dpin(port)) + return -EOPNOTSUPP; + + ret = usb4_dp_port_granularity(port); + if (ret < 0) + return ret; + granularity = ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_8, 1); + if (ret) + return ret; + + if (!(val & ADP_DP_CS_8_DR)) + return -ENODATA; + + return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity; +} + +/** + * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation + * @port: PCIe adapter + * @enable: Enable/disable extended encapsulation + * + * Enables or disables extended encapsulation used in PCIe tunneling. Caller + * needs to make sure both adapters support this before enabling. Returns %0 on + * success and negative errno otherwise. + */ +int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable) +{ + u32 val; + int ret; + + if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_1, 1); + if (ret) + return ret; + + if (enable) + val |= ADP_PCIE_CS_1_EE; + else + val &= ~ADP_PCIE_CS_1_EE; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_1, 1); +} diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c new file mode 100644 index 0000000000..e355bfd634 --- /dev/null +++ b/drivers/thunderbolt/usb4_port.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB4 port device + * + * Copyright (C) 2021, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/pm_runtime.h> +#include <linux/component.h> +#include <linux/property.h> + +#include "tb.h" + +static int connector_bind(struct device *dev, struct device *connector, void *data) +{ + int ret; + + ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector"); + if (ret) + return ret; + + ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev)); + if (ret) + sysfs_remove_link(&dev->kobj, "connector"); + + return ret; +} + +static void connector_unbind(struct device *dev, struct device *connector, void *data) +{ + sysfs_remove_link(&connector->kobj, dev_name(dev)); + sysfs_remove_link(&dev->kobj, "connector"); +} + +static const struct component_ops connector_ops = { + .bind = connector_bind, + .unbind = connector_unbind, +}; + +static ssize_t link_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + struct tb_port *port = usb4->port; + struct tb *tb = port->sw->tb; + const char *link; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (tb_is_upstream_port(port)) + link = port->sw->link_usb4 ? "usb4" : "tbt"; + else if (tb_port_has_remote(port)) + link = port->remote->sw->link_usb4 ? "usb4" : "tbt"; + else if (port->xdomain) + link = port->xdomain->link_usb4 ? "usb4" : "tbt"; + else + link = "none"; + + mutex_unlock(&tb->lock); + + return sysfs_emit(buf, "%s\n", link); +} +static DEVICE_ATTR_RO(link); + +static struct attribute *common_attrs[] = { + &dev_attr_link.attr, + NULL +}; + +static const struct attribute_group common_group = { + .attrs = common_attrs, +}; + +static int usb4_port_offline(struct usb4_port *usb4) +{ + struct tb_port *port = usb4->port; + int ret; + + ret = tb_acpi_power_on_retimers(port); + if (ret) + return ret; + + ret = usb4_port_router_offline(port); + if (ret) { + tb_acpi_power_off_retimers(port); + return ret; + } + + ret = tb_retimer_scan(port, false); + if (ret) { + usb4_port_router_online(port); + tb_acpi_power_off_retimers(port); + } + + return ret; +} + +static void usb4_port_online(struct usb4_port *usb4) +{ + struct tb_port *port = usb4->port; + + usb4_port_router_online(port); + tb_acpi_power_off_retimers(port); +} + +static ssize_t offline_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + + return sysfs_emit(buf, "%d\n", usb4->offline); +} + +static ssize_t offline_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + struct tb_port *port = usb4->port; + struct tb *tb = port->sw->tb; + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + pm_runtime_get_sync(&usb4->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm; + } + + if (val == usb4->offline) + goto out_unlock; + + /* Offline mode works only for ports that are not connected */ + if (tb_port_has_remote(port)) { + ret = -EBUSY; + goto out_unlock; + } + + if (val) { + ret = usb4_port_offline(usb4); + if (ret) + goto out_unlock; + } else { + usb4_port_online(usb4); + tb_retimer_remove_all(port); + } + + usb4->offline = val; + tb_port_dbg(port, "%s offline mode\n", val ? "enter" : "exit"); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm: + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_put_autosuspend(&usb4->dev); + + return ret ? ret : count; +} +static DEVICE_ATTR_RW(offline); + +static ssize_t rescan_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + struct tb_port *port = usb4->port; + struct tb *tb = port->sw->tb; + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + if (!val) + return count; + + pm_runtime_get_sync(&usb4->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm; + } + + /* Must be in offline mode already */ + if (!usb4->offline) { + ret = -EINVAL; + goto out_unlock; + } + + tb_retimer_remove_all(port); + ret = tb_retimer_scan(port, true); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm: + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_put_autosuspend(&usb4->dev); + + return ret ? ret : count; +} +static DEVICE_ATTR_WO(rescan); + +static struct attribute *service_attrs[] = { + &dev_attr_offline.attr, + &dev_attr_rescan.attr, + NULL +}; + +static umode_t service_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + + /* + * Always need some platform help to cycle the modes so that + * retimers can be accessed through the sideband. + */ + return usb4->can_offline ? attr->mode : 0; +} + +static const struct attribute_group service_group = { + .attrs = service_attrs, + .is_visible = service_attr_is_visible, +}; + +static const struct attribute_group *usb4_port_device_groups[] = { + &common_group, + &service_group, + NULL +}; + +static void usb4_port_device_release(struct device *dev) +{ + struct usb4_port *usb4 = container_of(dev, struct usb4_port, dev); + + kfree(usb4); +} + +struct device_type usb4_port_device_type = { + .name = "usb4_port", + .groups = usb4_port_device_groups, + .release = usb4_port_device_release, +}; + +/** + * usb4_port_device_add() - Add USB4 port device + * @port: Lane 0 adapter port to add the USB4 port + * + * Creates and registers a USB4 port device for @port. Returns the new + * USB4 port device pointer or ERR_PTR() in case of error. + */ +struct usb4_port *usb4_port_device_add(struct tb_port *port) +{ + struct usb4_port *usb4; + int ret; + + usb4 = kzalloc(sizeof(*usb4), GFP_KERNEL); + if (!usb4) + return ERR_PTR(-ENOMEM); + + usb4->port = port; + usb4->dev.type = &usb4_port_device_type; + usb4->dev.parent = &port->sw->dev; + dev_set_name(&usb4->dev, "usb4_port%d", port->port); + + ret = device_register(&usb4->dev); + if (ret) { + put_device(&usb4->dev); + return ERR_PTR(ret); + } + + if (dev_fwnode(&usb4->dev)) { + ret = component_add(&usb4->dev, &connector_ops); + if (ret) { + dev_err(&usb4->dev, "failed to add component\n"); + device_unregister(&usb4->dev); + } + } + + if (!tb_is_upstream_port(port)) + device_set_wakeup_capable(&usb4->dev, true); + + pm_runtime_no_callbacks(&usb4->dev); + pm_runtime_set_active(&usb4->dev); + pm_runtime_enable(&usb4->dev); + pm_runtime_set_autosuspend_delay(&usb4->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_use_autosuspend(&usb4->dev); + + return usb4; +} + +/** + * usb4_port_device_remove() - Removes USB4 port device + * @usb4: USB4 port device + * + * Unregisters the USB4 port device from the system. The device will be + * released when the last reference is dropped. + */ +void usb4_port_device_remove(struct usb4_port *usb4) +{ + if (dev_fwnode(&usb4->dev)) + component_del(&usb4->dev, &connector_ops); + device_unregister(&usb4->dev); +} + +/** + * usb4_port_device_resume() - Resumes USB4 port device + * @usb4: USB4 port device + * + * Used to resume USB4 port device after sleep state. + */ +int usb4_port_device_resume(struct usb4_port *usb4) +{ + return usb4->offline ? usb4_port_offline(usb4) : 0; +} diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c new file mode 100644 index 0000000000..9803f0bbf2 --- /dev/null +++ b/drivers/thunderbolt/xdomain.c @@ -0,0 +1,2526 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt XDomain discovery protocol support + * + * Copyright (C) 2017, Intel Corporation + * Authors: Michael Jamet <michael.jamet@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/kmod.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> +#include <linux/prandom.h> +#include <linux/string_helpers.h> +#include <linux/utsname.h> +#include <linux/uuid.h> +#include <linux/workqueue.h> + +#include "tb.h" + +#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */ +#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */ +#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */ +#define XDOMAIN_RETRIES 10 +#define XDOMAIN_DEFAULT_MAX_HOPID 15 + +enum { + XDOMAIN_STATE_INIT, + XDOMAIN_STATE_UUID, + XDOMAIN_STATE_LINK_STATUS, + XDOMAIN_STATE_LINK_STATE_CHANGE, + XDOMAIN_STATE_LINK_STATUS2, + XDOMAIN_STATE_BONDING_UUID_LOW, + XDOMAIN_STATE_BONDING_UUID_HIGH, + XDOMAIN_STATE_PROPERTIES, + XDOMAIN_STATE_ENUMERATED, + XDOMAIN_STATE_ERROR, +}; + +static const char * const state_names[] = { + [XDOMAIN_STATE_INIT] = "INIT", + [XDOMAIN_STATE_UUID] = "UUID", + [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS", + [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE", + [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2", + [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW", + [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH", + [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES", + [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED", + [XDOMAIN_STATE_ERROR] = "ERROR", +}; + +struct xdomain_request_work { + struct work_struct work; + struct tb_xdp_header *pkg; + struct tb *tb; +}; + +static bool tb_xdomain_enabled = true; +module_param_named(xdomain, tb_xdomain_enabled, bool, 0444); +MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)"); + +/* + * Serializes access to the properties and protocol handlers below. If + * you need to take both this lock and the struct tb_xdomain lock, take + * this one first. + */ +static DEFINE_MUTEX(xdomain_lock); + +/* Properties exposed to the remote domains */ +static struct tb_property_dir *xdomain_property_dir; +static u32 xdomain_property_block_gen; + +/* Additional protocol handlers */ +static LIST_HEAD(protocol_handlers); + +/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */ +static const uuid_t tb_xdp_uuid = + UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, + 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); + +bool tb_is_xdomain_enabled(void) +{ + return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed(); +} + +static bool tb_xdomain_match(const struct tb_cfg_request *req, + const struct ctl_pkg *pkg) +{ + switch (pkg->frame.eof) { + case TB_CFG_PKG_ERROR: + return true; + + case TB_CFG_PKG_XDOMAIN_RESP: { + const struct tb_xdp_header *res_hdr = pkg->buffer; + const struct tb_xdp_header *req_hdr = req->request; + + if (pkg->frame.size < req->response_size / 4) + return false; + + /* Make sure route matches */ + if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) != + req_hdr->xd_hdr.route_hi) + return false; + if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo) + return false; + + /* Check that the XDomain protocol matches */ + if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid)) + return false; + + return true; + } + + default: + return false; + } +} + +static bool tb_xdomain_copy(struct tb_cfg_request *req, + const struct ctl_pkg *pkg) +{ + memcpy(req->response, pkg->buffer, req->response_size); + req->result.err = 0; + return true; +} + +static void response_ready(void *data) +{ + tb_cfg_request_put(data); +} + +static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response, + size_t size, enum tb_cfg_pkg_type type) +{ + struct tb_cfg_request *req; + + req = tb_cfg_request_alloc(); + if (!req) + return -ENOMEM; + + req->match = tb_xdomain_match; + req->copy = tb_xdomain_copy; + req->request = response; + req->request_size = size; + req->request_type = type; + + return tb_cfg_request(ctl, req, response_ready, req); +} + +/** + * tb_xdomain_response() - Send a XDomain response message + * @xd: XDomain to send the message + * @response: Response to send + * @size: Size of the response + * @type: PDF type of the response + * + * This can be used to send a XDomain response message to the other + * domain. No response for the message is expected. + * + * Return: %0 in case of success and negative errno in case of failure + */ +int tb_xdomain_response(struct tb_xdomain *xd, const void *response, + size_t size, enum tb_cfg_pkg_type type) +{ + return __tb_xdomain_response(xd->tb->ctl, response, size, type); +} +EXPORT_SYMBOL_GPL(tb_xdomain_response); + +static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request, + size_t request_size, enum tb_cfg_pkg_type request_type, void *response, + size_t response_size, enum tb_cfg_pkg_type response_type, + unsigned int timeout_msec) +{ + struct tb_cfg_request *req; + struct tb_cfg_result res; + + req = tb_cfg_request_alloc(); + if (!req) + return -ENOMEM; + + req->match = tb_xdomain_match; + req->copy = tb_xdomain_copy; + req->request = request; + req->request_size = request_size; + req->request_type = request_type; + req->response = response; + req->response_size = response_size; + req->response_type = response_type; + + res = tb_cfg_request_sync(ctl, req, timeout_msec); + + tb_cfg_request_put(req); + + return res.err == 1 ? -EIO : res.err; +} + +/** + * tb_xdomain_request() - Send a XDomain request + * @xd: XDomain to send the request + * @request: Request to send + * @request_size: Size of the request in bytes + * @request_type: PDF type of the request + * @response: Response is copied here + * @response_size: Expected size of the response in bytes + * @response_type: Expected PDF type of the response + * @timeout_msec: Timeout in milliseconds to wait for the response + * + * This function can be used to send XDomain control channel messages to + * the other domain. The function waits until the response is received + * or when timeout triggers. Whichever comes first. + * + * Return: %0 in case of success and negative errno in case of failure + */ +int tb_xdomain_request(struct tb_xdomain *xd, const void *request, + size_t request_size, enum tb_cfg_pkg_type request_type, + void *response, size_t response_size, + enum tb_cfg_pkg_type response_type, unsigned int timeout_msec) +{ + return __tb_xdomain_request(xd->tb->ctl, request, request_size, + request_type, response, response_size, + response_type, timeout_msec); +} +EXPORT_SYMBOL_GPL(tb_xdomain_request); + +static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, + u8 sequence, enum tb_xdp_type type, size_t size) +{ + u32 length_sn; + + length_sn = (size - sizeof(hdr->xd_hdr)) / 4; + length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK; + + hdr->xd_hdr.route_hi = upper_32_bits(route); + hdr->xd_hdr.route_lo = lower_32_bits(route); + hdr->xd_hdr.length_sn = length_sn; + hdr->type = type; + memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); +} + +static int tb_xdp_handle_error(const struct tb_xdp_error_response *res) +{ + if (res->hdr.type != ERROR_RESPONSE) + return 0; + + switch (res->error) { + case ERROR_UNKNOWN_PACKET: + case ERROR_UNKNOWN_DOMAIN: + return -EIO; + case ERROR_NOT_SUPPORTED: + return -ENOTSUPP; + case ERROR_NOT_READY: + return -EAGAIN; + default: + break; + } + + return 0; +} + +static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, + uuid_t *uuid, u64 *remote_route) +{ + struct tb_xdp_uuid_response res; + struct tb_xdp_uuid req; + int ret; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST, + sizeof(req)); + + memset(&res, 0, sizeof(res)); + ret = __tb_xdomain_request(ctl, &req, sizeof(req), + TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + return ret; + + ret = tb_xdp_handle_error(&res.err); + if (ret) + return ret; + + uuid_copy(uuid, &res.src_uuid); + *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo; + + return 0; +} + +static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence, + const uuid_t *uuid) +{ + struct tb_xdp_uuid_response res; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE, + sizeof(res)); + + uuid_copy(&res.src_uuid, uuid); + res.src_route_hi = upper_32_bits(route); + res.src_route_lo = lower_32_bits(route); + + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + +static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, + enum tb_xdp_error error) +{ + struct tb_xdp_error_response res; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, + sizeof(res)); + res.error = error; + + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + +static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, + const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry, + u32 **block, u32 *generation) +{ + struct tb_xdp_properties_response *res; + struct tb_xdp_properties req; + u16 data_len, len; + size_t total_size; + u32 *data = NULL; + int ret; + + total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4; + res = kzalloc(total_size, GFP_KERNEL); + if (!res) + return -ENOMEM; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, + sizeof(req)); + memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid)); + memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid)); + + data_len = 0; + + do { + ret = __tb_xdomain_request(ctl, &req, sizeof(req), + TB_CFG_PKG_XDOMAIN_REQ, res, + total_size, TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + goto err; + + ret = tb_xdp_handle_error(&res->err); + if (ret) + goto err; + + /* + * Package length includes the whole payload without the + * XDomain header. Validate first that the package is at + * least size of the response structure. + */ + len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; + if (len < sizeof(*res) / 4) { + ret = -EINVAL; + goto err; + } + + len += sizeof(res->hdr.xd_hdr) / 4; + len -= sizeof(*res) / 4; + + if (res->offset != req.offset) { + ret = -EINVAL; + goto err; + } + + /* + * First time allocate block that has enough space for + * the whole properties block. + */ + if (!data) { + data_len = res->data_length; + if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) { + ret = -E2BIG; + goto err; + } + + data = kcalloc(data_len, sizeof(u32), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err; + } + } + + memcpy(data + req.offset, res->data, len * 4); + req.offset += len; + } while (!data_len || req.offset < data_len); + + *block = data; + *generation = res->generation; + + kfree(res); + + return data_len; + +err: + kfree(data); + kfree(res); + + return ret; +} + +static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, + struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req) +{ + struct tb_xdp_properties_response *res; + size_t total_size; + u16 len; + int ret; + + /* + * Currently we expect all requests to be directed to us. The + * protocol supports forwarding, though which we might add + * support later on. + */ + if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) { + tb_xdp_error_response(ctl, xd->route, sequence, + ERROR_UNKNOWN_DOMAIN); + return 0; + } + + mutex_lock(&xd->lock); + + if (req->offset >= xd->local_property_block_len) { + mutex_unlock(&xd->lock); + return -EINVAL; + } + + len = xd->local_property_block_len - req->offset; + len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); + total_size = sizeof(*res) + len * 4; + + res = kzalloc(total_size, GFP_KERNEL); + if (!res) { + mutex_unlock(&xd->lock); + return -ENOMEM; + } + + tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE, + total_size); + res->generation = xd->local_property_block_gen; + res->data_length = xd->local_property_block_len; + res->offset = req->offset; + uuid_copy(&res->src_uuid, xd->local_uuid); + uuid_copy(&res->dst_uuid, &req->src_uuid); + memcpy(res->data, &xd->local_property_block[req->offset], len * 4); + + mutex_unlock(&xd->lock); + + ret = __tb_xdomain_response(ctl, res, total_size, + TB_CFG_PKG_XDOMAIN_RESP); + + kfree(res); + return ret; +} + +static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, + int retry, const uuid_t *uuid) +{ + struct tb_xdp_properties_changed_response res; + struct tb_xdp_properties_changed req; + int ret; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, retry % 4, + PROPERTIES_CHANGED_REQUEST, sizeof(req)); + uuid_copy(&req.src_uuid, uuid); + + memset(&res, 0, sizeof(res)); + ret = __tb_xdomain_request(ctl, &req, sizeof(req), + TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + return ret; + + return tb_xdp_handle_error(&res.err); +} + +static int +tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) +{ + struct tb_xdp_properties_changed_response res; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, route, sequence, + PROPERTIES_CHANGED_RESPONSE, sizeof(res)); + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + +static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route, + u8 sequence, u8 *slw, u8 *tlw, + u8 *sls, u8 *tls) +{ + struct tb_xdp_link_state_status_response res; + struct tb_xdp_link_state_status req; + int ret; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST, + sizeof(req)); + + memset(&res, 0, sizeof(res)); + ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ, + &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + return ret; + + ret = tb_xdp_handle_error(&res.err); + if (ret) + return ret; + + if (res.status != 0) + return -EREMOTEIO; + + *slw = res.slw; + *tlw = res.tlw; + *sls = res.sls; + *tls = res.tls; + + return 0; +} + +static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl, + struct tb_xdomain *xd, u8 sequence) +{ + struct tb_xdp_link_state_status_response res; + struct tb_port *port = tb_xdomain_downstream_port(xd); + u32 val[2]; + int ret; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, xd->route, sequence, + LINK_STATE_STATUS_RESPONSE, sizeof(res)); + + ret = tb_port_read(port, val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val)); + if (ret) + return ret; + + res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> + LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; + res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >> + LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT; + res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK; + res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >> + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + +static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route, + u8 sequence, u8 tlw, u8 tls) +{ + struct tb_xdp_link_state_change_response res; + struct tb_xdp_link_state_change req; + int ret; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST, + sizeof(req)); + req.tlw = tlw; + req.tls = tls; + + memset(&res, 0, sizeof(res)); + ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ, + &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + return ret; + + ret = tb_xdp_handle_error(&res.err); + if (ret) + return ret; + + return res.status != 0 ? -EREMOTEIO : 0; +} + +static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route, + u8 sequence, u32 status) +{ + struct tb_xdp_link_state_change_response res; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE, + sizeof(res)); + + res.status = status; + + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + +/** + * tb_register_protocol_handler() - Register protocol handler + * @handler: Handler to register + * + * This allows XDomain service drivers to hook into incoming XDomain + * messages. After this function is called the service driver needs to + * be able to handle calls to callback whenever a package with the + * registered protocol is received. + */ +int tb_register_protocol_handler(struct tb_protocol_handler *handler) +{ + if (!handler->uuid || !handler->callback) + return -EINVAL; + if (uuid_equal(handler->uuid, &tb_xdp_uuid)) + return -EINVAL; + + mutex_lock(&xdomain_lock); + list_add_tail(&handler->list, &protocol_handlers); + mutex_unlock(&xdomain_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(tb_register_protocol_handler); + +/** + * tb_unregister_protocol_handler() - Unregister protocol handler + * @handler: Handler to unregister + * + * Removes the previously registered protocol handler. + */ +void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) +{ + mutex_lock(&xdomain_lock); + list_del_init(&handler->list); + mutex_unlock(&xdomain_lock); +} +EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); + +static void update_property_block(struct tb_xdomain *xd) +{ + mutex_lock(&xdomain_lock); + mutex_lock(&xd->lock); + /* + * If the local property block is not up-to-date, rebuild it now + * based on the global property template. + */ + if (!xd->local_property_block || + xd->local_property_block_gen < xdomain_property_block_gen) { + struct tb_property_dir *dir; + int ret, block_len; + u32 *block; + + dir = tb_property_copy_dir(xdomain_property_dir); + if (!dir) { + dev_warn(&xd->dev, "failed to copy properties\n"); + goto out_unlock; + } + + /* Fill in non-static properties now */ + tb_property_add_text(dir, "deviceid", utsname()->nodename); + tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid); + + ret = tb_property_format_dir(dir, NULL, 0); + if (ret < 0) { + dev_warn(&xd->dev, "local property block creation failed\n"); + tb_property_free_dir(dir); + goto out_unlock; + } + + block_len = ret; + block = kcalloc(block_len, sizeof(*block), GFP_KERNEL); + if (!block) { + tb_property_free_dir(dir); + goto out_unlock; + } + + ret = tb_property_format_dir(dir, block, block_len); + if (ret) { + dev_warn(&xd->dev, "property block generation failed\n"); + tb_property_free_dir(dir); + kfree(block); + goto out_unlock; + } + + tb_property_free_dir(dir); + /* Release the previous block */ + kfree(xd->local_property_block); + /* Assign new one */ + xd->local_property_block = block; + xd->local_property_block_len = block_len; + xd->local_property_block_gen = xdomain_property_block_gen; + } + +out_unlock: + mutex_unlock(&xd->lock); + mutex_unlock(&xdomain_lock); +} + +static void start_handshake(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_INIT; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +} + +/* Can be called from state_work */ +static void __stop_handshake(struct tb_xdomain *xd) +{ + cancel_delayed_work_sync(&xd->properties_changed_work); + xd->properties_changed_retries = 0; + xd->state_retries = 0; +} + +static void stop_handshake(struct tb_xdomain *xd) +{ + cancel_delayed_work_sync(&xd->state_work); + __stop_handshake(xd); +} + +static void tb_xdp_handle_request(struct work_struct *work) +{ + struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); + const struct tb_xdp_header *pkg = xw->pkg; + const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; + struct tb *tb = xw->tb; + struct tb_ctl *ctl = tb->ctl; + struct tb_xdomain *xd; + const uuid_t *uuid; + int ret = 0; + u32 sequence; + u64 route; + + route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); + sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK; + sequence >>= TB_XDOMAIN_SN_SHIFT; + + mutex_lock(&tb->lock); + if (tb->root_switch) + uuid = tb->root_switch->uuid; + else + uuid = NULL; + mutex_unlock(&tb->lock); + + if (!uuid) { + tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); + goto out; + } + + xd = tb_xdomain_find_by_route_locked(tb, route); + if (xd) + update_property_block(xd); + + switch (pkg->type) { + case PROPERTIES_REQUEST: + tb_dbg(tb, "%llx: received XDomain properties request\n", route); + if (xd) { + ret = tb_xdp_properties_response(tb, ctl, xd, sequence, + (const struct tb_xdp_properties *)pkg); + } + break; + + case PROPERTIES_CHANGED_REQUEST: + tb_dbg(tb, "%llx: received XDomain properties changed request\n", + route); + + ret = tb_xdp_properties_changed_response(ctl, route, sequence); + + /* + * Since the properties have been changed, let's update + * the xdomain related to this connection as well in + * case there is a change in services it offers. + */ + if (xd && device_is_registered(&xd->dev)) + queue_delayed_work(tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); + break; + + case UUID_REQUEST_OLD: + case UUID_REQUEST: + tb_dbg(tb, "%llx: received XDomain UUID request\n", route); + ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); + /* + * If we've stopped the discovery with an error such as + * timing out, we will restart the handshake now that we + * received UUID request from the remote host. + */ + if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) { + dev_dbg(&xd->dev, "restarting handshake\n"); + start_handshake(xd); + } + break; + + case LINK_STATE_STATUS_REQUEST: + tb_dbg(tb, "%llx: received XDomain link state status request\n", + route); + + if (xd) { + ret = tb_xdp_link_state_status_response(tb, ctl, xd, + sequence); + } else { + tb_xdp_error_response(ctl, route, sequence, + ERROR_NOT_READY); + } + break; + + case LINK_STATE_CHANGE_REQUEST: + tb_dbg(tb, "%llx: received XDomain link state change request\n", + route); + + if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) { + const struct tb_xdp_link_state_change *lsc = + (const struct tb_xdp_link_state_change *)pkg; + + ret = tb_xdp_link_state_change_response(ctl, route, + sequence, 0); + xd->target_link_width = lsc->tlw; + queue_delayed_work(tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); + } else { + tb_xdp_error_response(ctl, route, sequence, + ERROR_NOT_READY); + } + break; + + default: + tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type); + tb_xdp_error_response(ctl, route, sequence, + ERROR_NOT_SUPPORTED); + break; + } + + tb_xdomain_put(xd); + + if (ret) { + tb_warn(tb, "failed to send XDomain response for %#x\n", + pkg->type); + } + +out: + kfree(xw->pkg); + kfree(xw); + + tb_domain_put(tb); +} + +static bool +tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr, + size_t size) +{ + struct xdomain_request_work *xw; + + xw = kmalloc(sizeof(*xw), GFP_KERNEL); + if (!xw) + return false; + + INIT_WORK(&xw->work, tb_xdp_handle_request); + xw->pkg = kmemdup(hdr, size, GFP_KERNEL); + if (!xw->pkg) { + kfree(xw); + return false; + } + xw->tb = tb_domain_get(tb); + + schedule_work(&xw->work); + return true; +} + +/** + * tb_register_service_driver() - Register XDomain service driver + * @drv: Driver to register + * + * Registers new service driver from @drv to the bus. + */ +int tb_register_service_driver(struct tb_service_driver *drv) +{ + drv->driver.bus = &tb_bus_type; + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(tb_register_service_driver); + +/** + * tb_unregister_service_driver() - Unregister XDomain service driver + * @drv: Driver to unregister + * + * Unregisters XDomain service driver from the bus. + */ +void tb_unregister_service_driver(struct tb_service_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(tb_unregister_service_driver); + +static ssize_t key_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + + /* + * It should be null terminated but anything else is pretty much + * allowed. + */ + return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key); +} +static DEVICE_ATTR_RO(key); + +static int get_modalias(const struct tb_service *svc, char *buf, size_t size) +{ + return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key, + svc->prtcid, svc->prtcvers, svc->prtcrevs); +} + +static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + + /* Full buffer size except new line and null termination */ + get_modalias(svc, buf, PAGE_SIZE - 2); + return strlen(strcat(buf, "\n")); +} +static DEVICE_ATTR_RO(modalias); + +static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + + return sysfs_emit(buf, "%u\n", svc->prtcid); +} +static DEVICE_ATTR_RO(prtcid); + +static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + + return sysfs_emit(buf, "%u\n", svc->prtcvers); +} +static DEVICE_ATTR_RO(prtcvers); + +static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + + return sysfs_emit(buf, "%u\n", svc->prtcrevs); +} +static DEVICE_ATTR_RO(prtcrevs); + +static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + + return sysfs_emit(buf, "0x%08x\n", svc->prtcstns); +} +static DEVICE_ATTR_RO(prtcstns); + +static struct attribute *tb_service_attrs[] = { + &dev_attr_key.attr, + &dev_attr_modalias.attr, + &dev_attr_prtcid.attr, + &dev_attr_prtcvers.attr, + &dev_attr_prtcrevs.attr, + &dev_attr_prtcstns.attr, + NULL, +}; + +static const struct attribute_group tb_service_attr_group = { + .attrs = tb_service_attrs, +}; + +static const struct attribute_group *tb_service_attr_groups[] = { + &tb_service_attr_group, + NULL, +}; + +static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct tb_service *svc = container_of_const(dev, struct tb_service, dev); + char modalias[64]; + + get_modalias(svc, modalias, sizeof(modalias)); + return add_uevent_var(env, "MODALIAS=%s", modalias); +} + +static void tb_service_release(struct device *dev) +{ + struct tb_service *svc = container_of(dev, struct tb_service, dev); + struct tb_xdomain *xd = tb_service_parent(svc); + + tb_service_debugfs_remove(svc); + ida_simple_remove(&xd->service_ids, svc->id); + kfree(svc->key); + kfree(svc); +} + +struct device_type tb_service_type = { + .name = "thunderbolt_service", + .groups = tb_service_attr_groups, + .uevent = tb_service_uevent, + .release = tb_service_release, +}; +EXPORT_SYMBOL_GPL(tb_service_type); + +static int remove_missing_service(struct device *dev, void *data) +{ + struct tb_xdomain *xd = data; + struct tb_service *svc; + + svc = tb_to_service(dev); + if (!svc) + return 0; + + if (!tb_property_find(xd->remote_properties, svc->key, + TB_PROPERTY_TYPE_DIRECTORY)) + device_unregister(dev); + + return 0; +} + +static int find_service(struct device *dev, void *data) +{ + const struct tb_property *p = data; + struct tb_service *svc; + + svc = tb_to_service(dev); + if (!svc) + return 0; + + return !strcmp(svc->key, p->key); +} + +static int populate_service(struct tb_service *svc, + struct tb_property *property) +{ + struct tb_property_dir *dir = property->value.dir; + struct tb_property *p; + + /* Fill in standard properties */ + p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE); + if (p) + svc->prtcid = p->value.immediate; + p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); + if (p) + svc->prtcvers = p->value.immediate; + p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); + if (p) + svc->prtcrevs = p->value.immediate; + p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); + if (p) + svc->prtcstns = p->value.immediate; + + svc->key = kstrdup(property->key, GFP_KERNEL); + if (!svc->key) + return -ENOMEM; + + return 0; +} + +static void enumerate_services(struct tb_xdomain *xd) +{ + struct tb_service *svc; + struct tb_property *p; + struct device *dev; + int id; + + /* + * First remove all services that are not available anymore in + * the updated property block. + */ + device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); + + /* Then re-enumerate properties creating new services as we go */ + tb_property_for_each(xd->remote_properties, p) { + if (p->type != TB_PROPERTY_TYPE_DIRECTORY) + continue; + + /* If the service exists already we are fine */ + dev = device_find_child(&xd->dev, p, find_service); + if (dev) { + put_device(dev); + continue; + } + + svc = kzalloc(sizeof(*svc), GFP_KERNEL); + if (!svc) + break; + + if (populate_service(svc, p)) { + kfree(svc); + break; + } + + id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); + if (id < 0) { + kfree(svc->key); + kfree(svc); + break; + } + svc->id = id; + svc->dev.bus = &tb_bus_type; + svc->dev.type = &tb_service_type; + svc->dev.parent = &xd->dev; + dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); + + tb_service_debugfs_init(svc); + + if (device_register(&svc->dev)) { + put_device(&svc->dev); + break; + } + } +} + +static int populate_properties(struct tb_xdomain *xd, + struct tb_property_dir *dir) +{ + const struct tb_property *p; + + /* Required properties */ + p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); + if (!p) + return -EINVAL; + xd->device = p->value.immediate; + + p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); + if (!p) + return -EINVAL; + xd->vendor = p->value.immediate; + + p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE); + /* + * USB4 inter-domain spec suggests using 15 as HopID if the + * other end does not announce it in a property. This is for + * TBT3 compatibility. + */ + xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID; + + kfree(xd->device_name); + xd->device_name = NULL; + kfree(xd->vendor_name); + xd->vendor_name = NULL; + + /* Optional properties */ + p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); + if (p) + xd->device_name = kstrdup(p->value.text, GFP_KERNEL); + p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); + if (p) + xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL); + + return 0; +} + +static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd) +{ + bool change = false; + struct tb_port *port; + int ret; + + port = tb_xdomain_downstream_port(xd); + + ret = tb_port_get_link_speed(port); + if (ret < 0) + return ret; + + if (xd->link_speed != ret) + change = true; + + xd->link_speed = ret; + + ret = tb_port_get_link_width(port); + if (ret < 0) + return ret; + + if (xd->link_width != ret) + change = true; + + xd->link_width = ret; + + if (change) + kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); + + return 0; +} + +static int tb_xdomain_get_uuid(struct tb_xdomain *xd) +{ + struct tb *tb = xd->tb; + uuid_t uuid; + u64 route; + int ret; + + dev_dbg(&xd->dev, "requesting remote UUID\n"); + + ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid, + &route); + if (ret < 0) { + if (xd->state_retries-- > 0) { + dev_dbg(&xd->dev, "failed to request UUID, retrying\n"); + return -EAGAIN; + } + dev_dbg(&xd->dev, "failed to read remote UUID\n"); + return ret; + } + + dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid); + + if (uuid_equal(&uuid, xd->local_uuid)) { + if (route == xd->route) + dev_dbg(&xd->dev, "loop back detected\n"); + else + dev_dbg(&xd->dev, "intra-domain loop detected\n"); + + /* Don't bond lanes automatically for loops */ + xd->bonding_possible = false; + } + + /* + * If the UUID is different, there is another domain connected + * so mark this one unplugged and wait for the connection + * manager to replace it. + */ + if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { + dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); + xd->is_unplugged = true; + return -ENODEV; + } + + /* First time fill in the missing UUID */ + if (!xd->remote_uuid) { + xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); + if (!xd->remote_uuid) + return -ENOMEM; + } + + return 0; +} + +static int tb_xdomain_get_link_status(struct tb_xdomain *xd) +{ + struct tb *tb = xd->tb; + u8 slw, tlw, sls, tls; + int ret; + + dev_dbg(&xd->dev, "sending link state status request to %pUb\n", + xd->remote_uuid); + + ret = tb_xdp_link_state_status_request(tb->ctl, xd->route, + xd->state_retries, &slw, &tlw, &sls, + &tls); + if (ret) { + if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to request remote link status, retrying\n"); + return -EAGAIN; + } + dev_dbg(&xd->dev, "failed to receive remote link status\n"); + return ret; + } + + dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls); + + if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) { + dev_dbg(&xd->dev, "remote adapter is single lane only\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int tb_xdomain_link_state_change(struct tb_xdomain *xd, + unsigned int width) +{ + struct tb_port *port = tb_xdomain_downstream_port(xd); + struct tb *tb = xd->tb; + u8 tlw, tls; + u32 val; + int ret; + + if (width == 2) + tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL; + else if (width == 1) + tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE; + else + return -EINVAL; + + /* Use the current target speed */ + ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK; + + dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n", + tlw, tls); + + ret = tb_xdp_link_state_change_request(tb->ctl, xd->route, + xd->state_retries, tlw, tls); + if (ret) { + if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to change remote link state, retrying\n"); + return -EAGAIN; + } + dev_err(&xd->dev, "failed request link state change, aborting\n"); + return ret; + } + + dev_dbg(&xd->dev, "received link state change response\n"); + return 0; +} + +static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd) +{ + unsigned int width, width_mask; + struct tb_port *port; + int ret; + + if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) { + width = TB_LINK_WIDTH_SINGLE; + width_mask = width; + } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) { + width = TB_LINK_WIDTH_DUAL; + width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX; + } else { + if (xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "link state change request not received yet, retrying\n"); + return -EAGAIN; + } + dev_dbg(&xd->dev, "timeout waiting for link change request\n"); + return -ETIMEDOUT; + } + + port = tb_xdomain_downstream_port(xd); + + /* + * We can't use tb_xdomain_lane_bonding_enable() here because it + * is the other side that initiates lane bonding. So here we + * just set the width to both lane adapters and wait for the + * link to transition bonded. + */ + ret = tb_port_set_link_width(port->dual_link_port, width); + if (ret) { + tb_port_warn(port->dual_link_port, + "failed to set link width to %d\n", width); + return ret; + } + + ret = tb_port_set_link_width(port, width); + if (ret) { + tb_port_warn(port, "failed to set link width to %d\n", width); + return ret; + } + + ret = tb_port_wait_for_link_width(port, width_mask, + XDOMAIN_BONDING_TIMEOUT); + if (ret) { + dev_warn(&xd->dev, "error waiting for link width to become %d\n", + width_mask); + return ret; + } + + port->bonded = width > TB_LINK_WIDTH_SINGLE; + port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE; + + tb_port_update_credits(port); + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2)); + return 0; +} + +static int tb_xdomain_get_properties(struct tb_xdomain *xd) +{ + struct tb_property_dir *dir; + struct tb *tb = xd->tb; + bool update = false; + u32 *block = NULL; + u32 gen = 0; + int ret; + + dev_dbg(&xd->dev, "requesting remote properties\n"); + + ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, + xd->remote_uuid, xd->state_retries, + &block, &gen); + if (ret < 0) { + if (xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to request remote properties, retrying\n"); + return -EAGAIN; + } + /* Give up now */ + dev_err(&xd->dev, "failed read XDomain properties from %pUb\n", + xd->remote_uuid); + + return ret; + } + + mutex_lock(&xd->lock); + + /* Only accept newer generation properties */ + if (xd->remote_properties && gen <= xd->remote_property_block_gen) { + ret = 0; + goto err_free_block; + } + + dir = tb_property_parse_dir(block, ret); + if (!dir) { + dev_err(&xd->dev, "failed to parse XDomain properties\n"); + ret = -ENOMEM; + goto err_free_block; + } + + ret = populate_properties(xd, dir); + if (ret) { + dev_err(&xd->dev, "missing XDomain properties in response\n"); + goto err_free_dir; + } + + /* Release the existing one */ + if (xd->remote_properties) { + tb_property_free_dir(xd->remote_properties); + update = true; + } + + xd->remote_properties = dir; + xd->remote_property_block_gen = gen; + + tb_xdomain_update_link_attributes(xd); + + mutex_unlock(&xd->lock); + + kfree(block); + + /* + * Now the device should be ready enough so we can add it to the + * bus and let userspace know about it. If the device is already + * registered, we notify the userspace that it has changed. + */ + if (!update) { + /* + * Now disable lane 1 if bonding was not enabled. Do + * this only if bonding was possible at the beginning + * (that is we are the connection manager and there are + * two lanes). + */ + if (xd->bonding_possible) { + struct tb_port *port; + + port = tb_xdomain_downstream_port(xd); + if (!port->bonded) + tb_port_disable(port->dual_link_port); + } + + if (device_add(&xd->dev)) { + dev_err(&xd->dev, "failed to add XDomain device\n"); + return -ENODEV; + } + dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n", + xd->vendor, xd->device); + if (xd->vendor_name && xd->device_name) + dev_info(&xd->dev, "%s %s\n", xd->vendor_name, + xd->device_name); + + tb_xdomain_debugfs_init(xd); + } else { + kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); + } + + enumerate_services(xd); + return 0; + +err_free_dir: + tb_property_free_dir(dir); +err_free_block: + kfree(block); + mutex_unlock(&xd->lock); + + return ret; +} + +static void tb_xdomain_queue_uuid(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_UUID; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +} + +static void tb_xdomain_queue_link_status(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_LINK_STATUS; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_LINK_STATUS2; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_bonding(struct tb_xdomain *xd) +{ + if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) { + dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n"); + xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH; + } else { + dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n"); + xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE; + } + + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_BONDING_UUID_LOW; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_properties(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_PROPERTIES; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd) +{ + xd->properties_changed_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +} + +static void tb_xdomain_failed(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_ERROR; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_state_work(struct work_struct *work) +{ + struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work); + int ret, state = xd->state; + + if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT || + state > XDOMAIN_STATE_ERROR)) + return; + + dev_dbg(&xd->dev, "running state %s\n", state_names[state]); + + switch (state) { + case XDOMAIN_STATE_INIT: + if (xd->needs_uuid) { + tb_xdomain_queue_uuid(xd); + } else { + tb_xdomain_queue_properties_changed(xd); + tb_xdomain_queue_properties(xd); + } + break; + + case XDOMAIN_STATE_UUID: + ret = tb_xdomain_get_uuid(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + tb_xdomain_failed(xd); + } else { + tb_xdomain_queue_properties_changed(xd); + if (xd->bonding_possible) + tb_xdomain_queue_link_status(xd); + else + tb_xdomain_queue_properties(xd); + } + break; + + case XDOMAIN_STATE_LINK_STATUS: + ret = tb_xdomain_get_link_status(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + + /* + * If any of the lane bonding states fail we skip + * bonding completely and try to continue from + * reading properties. + */ + tb_xdomain_queue_properties(xd); + } else { + tb_xdomain_queue_bonding(xd); + } + break; + + case XDOMAIN_STATE_LINK_STATE_CHANGE: + ret = tb_xdomain_link_state_change(xd, 2); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + tb_xdomain_queue_properties(xd); + } else { + tb_xdomain_queue_link_status2(xd); + } + break; + + case XDOMAIN_STATE_LINK_STATUS2: + ret = tb_xdomain_get_link_status(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + tb_xdomain_queue_properties(xd); + } else { + tb_xdomain_queue_bonding_uuid_low(xd); + } + break; + + case XDOMAIN_STATE_BONDING_UUID_LOW: + tb_xdomain_lane_bonding_enable(xd); + tb_xdomain_queue_properties(xd); + break; + + case XDOMAIN_STATE_BONDING_UUID_HIGH: + if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN) + goto retry_state; + tb_xdomain_queue_properties(xd); + break; + + case XDOMAIN_STATE_PROPERTIES: + ret = tb_xdomain_get_properties(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + tb_xdomain_failed(xd); + } else { + xd->state = XDOMAIN_STATE_ENUMERATED; + } + break; + + case XDOMAIN_STATE_ENUMERATED: + tb_xdomain_queue_properties(xd); + break; + + case XDOMAIN_STATE_ERROR: + dev_dbg(&xd->dev, "discovery failed, stopping handshake\n"); + __stop_handshake(xd); + break; + + default: + dev_warn(&xd->dev, "unexpected state %d\n", state); + break; + } + + return; + +retry_state: + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_properties_changed(struct work_struct *work) +{ + struct tb_xdomain *xd = container_of(work, typeof(*xd), + properties_changed_work.work); + int ret; + + dev_dbg(&xd->dev, "sending properties changed notification\n"); + + ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, + xd->properties_changed_retries, xd->local_uuid); + if (ret) { + if (xd->properties_changed_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to send properties changed notification, retrying\n"); + queue_delayed_work(xd->tb->wq, + &xd->properties_changed_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); + } + dev_err(&xd->dev, "failed to send properties changed notification\n"); + return; + } + + xd->properties_changed_retries = XDOMAIN_RETRIES; +} + +static ssize_t device_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%#x\n", xd->device); +} +static DEVICE_ATTR_RO(device); + +static ssize_t +device_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + int ret; + + if (mutex_lock_interruptible(&xd->lock)) + return -ERESTARTSYS; + ret = sysfs_emit(buf, "%s\n", xd->device_name ?: ""); + mutex_unlock(&xd->lock); + + return ret; +} +static DEVICE_ATTR_RO(device_name); + +static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%d\n", xd->remote_max_hopid); +} +static DEVICE_ATTR_RO(maxhopid); + +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%#x\n", xd->vendor); +} +static DEVICE_ATTR_RO(vendor); + +static ssize_t +vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + int ret; + + if (mutex_lock_interruptible(&xd->lock)) + return -ERESTARTSYS; + ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: ""); + mutex_unlock(&xd->lock); + + return ret; +} +static DEVICE_ATTR_RO(vendor_name); + +static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%pUb\n", xd->remote_uuid); +} +static DEVICE_ATTR_RO(unique_id); + +static ssize_t speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed); +} + +static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); +static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); + +static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + unsigned int width; + + switch (xd->link_width) { + case TB_LINK_WIDTH_SINGLE: + case TB_LINK_WIDTH_ASYM_RX: + width = 1; + break; + case TB_LINK_WIDTH_DUAL: + width = 2; + break; + case TB_LINK_WIDTH_ASYM_TX: + width = 3; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + return sysfs_emit(buf, "%u\n", width); +} +static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL); + +static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + unsigned int width; + + switch (xd->link_width) { + case TB_LINK_WIDTH_SINGLE: + case TB_LINK_WIDTH_ASYM_TX: + width = 1; + break; + case TB_LINK_WIDTH_DUAL: + width = 2; + break; + case TB_LINK_WIDTH_ASYM_RX: + width = 3; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + return sysfs_emit(buf, "%u\n", width); +} +static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL); + +static struct attribute *xdomain_attrs[] = { + &dev_attr_device.attr, + &dev_attr_device_name.attr, + &dev_attr_maxhopid.attr, + &dev_attr_rx_lanes.attr, + &dev_attr_rx_speed.attr, + &dev_attr_tx_lanes.attr, + &dev_attr_tx_speed.attr, + &dev_attr_unique_id.attr, + &dev_attr_vendor.attr, + &dev_attr_vendor_name.attr, + NULL, +}; + +static const struct attribute_group xdomain_attr_group = { + .attrs = xdomain_attrs, +}; + +static const struct attribute_group *xdomain_attr_groups[] = { + &xdomain_attr_group, + NULL, +}; + +static void tb_xdomain_release(struct device *dev) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + put_device(xd->dev.parent); + + kfree(xd->local_property_block); + tb_property_free_dir(xd->remote_properties); + ida_destroy(&xd->out_hopids); + ida_destroy(&xd->in_hopids); + ida_destroy(&xd->service_ids); + + kfree(xd->local_uuid); + kfree(xd->remote_uuid); + kfree(xd->device_name); + kfree(xd->vendor_name); + kfree(xd); +} + +static int __maybe_unused tb_xdomain_suspend(struct device *dev) +{ + stop_handshake(tb_to_xdomain(dev)); + return 0; +} + +static int __maybe_unused tb_xdomain_resume(struct device *dev) +{ + start_handshake(tb_to_xdomain(dev)); + return 0; +} + +static const struct dev_pm_ops tb_xdomain_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume) +}; + +struct device_type tb_xdomain_type = { + .name = "thunderbolt_xdomain", + .release = tb_xdomain_release, + .pm = &tb_xdomain_pm_ops, +}; +EXPORT_SYMBOL_GPL(tb_xdomain_type); + +/** + * tb_xdomain_alloc() - Allocate new XDomain object + * @tb: Domain where the XDomain belongs + * @parent: Parent device (the switch through the connection to the + * other domain is reached). + * @route: Route string used to reach the other domain + * @local_uuid: Our local domain UUID + * @remote_uuid: UUID of the other domain (optional) + * + * Allocates new XDomain structure and returns pointer to that. The + * object must be released by calling tb_xdomain_put(). + */ +struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, + u64 route, const uuid_t *local_uuid, + const uuid_t *remote_uuid) +{ + struct tb_switch *parent_sw = tb_to_switch(parent); + struct tb_xdomain *xd; + struct tb_port *down; + + /* Make sure the downstream domain is accessible */ + down = tb_port_at(route, parent_sw); + tb_port_unlock(down); + + xd = kzalloc(sizeof(*xd), GFP_KERNEL); + if (!xd) + return NULL; + + xd->tb = tb; + xd->route = route; + xd->local_max_hopid = down->config.max_in_hop_id; + ida_init(&xd->service_ids); + ida_init(&xd->in_hopids); + ida_init(&xd->out_hopids); + mutex_init(&xd->lock); + INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work); + INIT_DELAYED_WORK(&xd->properties_changed_work, + tb_xdomain_properties_changed); + + xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL); + if (!xd->local_uuid) + goto err_free; + + if (remote_uuid) { + xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), + GFP_KERNEL); + if (!xd->remote_uuid) + goto err_free_local_uuid; + } else { + xd->needs_uuid = true; + xd->bonding_possible = !!down->dual_link_port; + } + + device_initialize(&xd->dev); + xd->dev.parent = get_device(parent); + xd->dev.bus = &tb_bus_type; + xd->dev.type = &tb_xdomain_type; + xd->dev.groups = xdomain_attr_groups; + dev_set_name(&xd->dev, "%u-%llx", tb->index, route); + + dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid); + if (remote_uuid) + dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid); + + /* + * This keeps the DMA powered on as long as we have active + * connection to another host. + */ + pm_runtime_set_active(&xd->dev); + pm_runtime_get_noresume(&xd->dev); + pm_runtime_enable(&xd->dev); + + return xd; + +err_free_local_uuid: + kfree(xd->local_uuid); +err_free: + kfree(xd); + + return NULL; +} + +/** + * tb_xdomain_add() - Add XDomain to the bus + * @xd: XDomain to add + * + * This function starts XDomain discovery protocol handshake and + * eventually adds the XDomain to the bus. After calling this function + * the caller needs to call tb_xdomain_remove() in order to remove and + * release the object regardless whether the handshake succeeded or not. + */ +void tb_xdomain_add(struct tb_xdomain *xd) +{ + /* Start exchanging properties with the other host */ + start_handshake(xd); +} + +static int unregister_service(struct device *dev, void *data) +{ + device_unregister(dev); + return 0; +} + +/** + * tb_xdomain_remove() - Remove XDomain from the bus + * @xd: XDomain to remove + * + * This will stop all ongoing configuration work and remove the XDomain + * along with any services from the bus. When the last reference to @xd + * is released the object will be released as well. + */ +void tb_xdomain_remove(struct tb_xdomain *xd) +{ + tb_xdomain_debugfs_remove(xd); + + stop_handshake(xd); + + device_for_each_child_reverse(&xd->dev, xd, unregister_service); + + /* + * Undo runtime PM here explicitly because it is possible that + * the XDomain was never added to the bus and thus device_del() + * is not called for it (device_del() would handle this otherwise). + */ + pm_runtime_disable(&xd->dev); + pm_runtime_put_noidle(&xd->dev); + pm_runtime_set_suspended(&xd->dev); + + if (!device_is_registered(&xd->dev)) { + put_device(&xd->dev); + } else { + dev_info(&xd->dev, "host disconnected\n"); + device_unregister(&xd->dev); + } +} + +/** + * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain + * @xd: XDomain connection + * + * Lane bonding is disabled by default for XDomains. This function tries + * to enable bonding by first enabling the port and waiting for the CL0 + * state. + * + * Return: %0 in case of success and negative errno in case of error. + */ +int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) +{ + unsigned int width_mask; + struct tb_port *port; + int ret; + + port = tb_xdomain_downstream_port(xd); + if (!port->dual_link_port) + return -ENODEV; + + ret = tb_port_enable(port->dual_link_port); + if (ret) + return ret; + + ret = tb_wait_for_port(port->dual_link_port, true); + if (ret < 0) + return ret; + if (!ret) + return -ENOTCONN; + + ret = tb_port_lane_bonding_enable(port); + if (ret) { + tb_port_warn(port, "failed to enable lane bonding\n"); + return ret; + } + + /* Any of the widths are all bonded */ + width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX | + TB_LINK_WIDTH_ASYM_RX; + + ret = tb_port_wait_for_link_width(port, width_mask, + XDOMAIN_BONDING_TIMEOUT); + if (ret) { + tb_port_warn(port, "failed to enable lane bonding\n"); + return ret; + } + + tb_port_update_credits(port); + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding enabled\n"); + return 0; +} +EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable); + +/** + * tb_xdomain_lane_bonding_disable() - Disable lane bonding + * @xd: XDomain connection + * + * Lane bonding is disabled by default for XDomains. If bonding has been + * enabled, this function can be used to disable it. + */ +void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) +{ + struct tb_port *port; + + port = tb_xdomain_downstream_port(xd); + if (port->dual_link_port) { + int ret; + + tb_port_lane_bonding_disable(port); + ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100); + if (ret == -ETIMEDOUT) + tb_port_warn(port, "timeout disabling lane bonding\n"); + tb_port_disable(port->dual_link_port); + tb_port_update_credits(port); + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding disabled\n"); + } +} +EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); + +/** + * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling + * @xd: XDomain connection + * @hopid: Preferred HopID or %-1 for next available + * + * Returns allocated HopID or negative errno. Specifically returns + * %-ENOSPC if there are no more available HopIDs. Returned HopID is + * guaranteed to be within range supported by the input lane adapter. + * Call tb_xdomain_release_in_hopid() to release the allocated HopID. + */ +int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid) +{ + if (hopid < 0) + hopid = TB_PATH_MIN_HOPID; + if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid) + return -EINVAL; + + return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid, + GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid); + +/** + * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling + * @xd: XDomain connection + * @hopid: Preferred HopID or %-1 for next available + * + * Returns allocated HopID or negative errno. Specifically returns + * %-ENOSPC if there are no more available HopIDs. Returned HopID is + * guaranteed to be within range supported by the output lane adapter. + * Call tb_xdomain_release_in_hopid() to release the allocated HopID. + */ +int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid) +{ + if (hopid < 0) + hopid = TB_PATH_MIN_HOPID; + if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid) + return -EINVAL; + + return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid, + GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid); + +/** + * tb_xdomain_release_in_hopid() - Release input HopID + * @xd: XDomain connection + * @hopid: HopID to release + */ +void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid) +{ + ida_free(&xd->in_hopids, hopid); +} +EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid); + +/** + * tb_xdomain_release_out_hopid() - Release output HopID + * @xd: XDomain connection + * @hopid: HopID to release + */ +void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid) +{ + ida_free(&xd->out_hopids, hopid); +} +EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid); + +/** + * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection + * @xd: XDomain connection + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path + * + * The function enables DMA paths accordingly so that after successful + * return the caller can send and receive packets using high-speed DMA + * path. If a transmit or receive path is not needed, pass %-1 for those + * parameters. + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring) +{ + return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path, + transmit_ring, receive_path, + receive_ring); +} +EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); + +/** + * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection + * @xd: XDomain connection + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path + * + * This does the opposite of tb_xdomain_enable_paths(). After call to + * this the caller is not expected to use the rings anymore. Passing %-1 + * as path/ring parameter means don't care. Normally the callers should + * pass the same values here as they do when paths are enabled. + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring) +{ + return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path, + transmit_ring, receive_path, + receive_ring); +} +EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); + +struct tb_xdomain_lookup { + const uuid_t *uuid; + u8 link; + u8 depth; + u64 route; +}; + +static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, + const struct tb_xdomain_lookup *lookup) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + struct tb_xdomain *xd; + + if (port->xdomain) { + xd = port->xdomain; + + if (lookup->uuid) { + if (xd->remote_uuid && + uuid_equal(xd->remote_uuid, lookup->uuid)) + return xd; + } else { + if (lookup->link && lookup->link == xd->link && + lookup->depth == xd->depth) + return xd; + if (lookup->route && lookup->route == xd->route) + return xd; + } + } else if (tb_port_has_remote(port)) { + xd = switch_find_xdomain(port->remote->sw, lookup); + if (xd) + return xd; + } + } + + return NULL; +} + +/** + * tb_xdomain_find_by_uuid() - Find an XDomain by UUID + * @tb: Domain where the XDomain belongs to + * @uuid: UUID to look for + * + * Finds XDomain by walking through the Thunderbolt topology below @tb. + * The returned XDomain will have its reference count increased so the + * caller needs to call tb_xdomain_put() when it is done with the + * object. + * + * This will find all XDomains including the ones that are not yet added + * to the bus (handshake is still in progress). + * + * The caller needs to hold @tb->lock. + */ +struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid) +{ + struct tb_xdomain_lookup lookup; + struct tb_xdomain *xd; + + memset(&lookup, 0, sizeof(lookup)); + lookup.uuid = uuid; + + xd = switch_find_xdomain(tb->root_switch, &lookup); + return tb_xdomain_get(xd); +} +EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid); + +/** + * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth + * @tb: Domain where the XDomain belongs to + * @link: Root switch link number + * @depth: Depth in the link + * + * Finds XDomain by walking through the Thunderbolt topology below @tb. + * The returned XDomain will have its reference count increased so the + * caller needs to call tb_xdomain_put() when it is done with the + * object. + * + * This will find all XDomains including the ones that are not yet added + * to the bus (handshake is still in progress). + * + * The caller needs to hold @tb->lock. + */ +struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, + u8 depth) +{ + struct tb_xdomain_lookup lookup; + struct tb_xdomain *xd; + + memset(&lookup, 0, sizeof(lookup)); + lookup.link = link; + lookup.depth = depth; + + xd = switch_find_xdomain(tb->root_switch, &lookup); + return tb_xdomain_get(xd); +} + +/** + * tb_xdomain_find_by_route() - Find an XDomain by route string + * @tb: Domain where the XDomain belongs to + * @route: XDomain route string + * + * Finds XDomain by walking through the Thunderbolt topology below @tb. + * The returned XDomain will have its reference count increased so the + * caller needs to call tb_xdomain_put() when it is done with the + * object. + * + * This will find all XDomains including the ones that are not yet added + * to the bus (handshake is still in progress). + * + * The caller needs to hold @tb->lock. + */ +struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) +{ + struct tb_xdomain_lookup lookup; + struct tb_xdomain *xd; + + memset(&lookup, 0, sizeof(lookup)); + lookup.route = route; + + xd = switch_find_xdomain(tb->root_switch, &lookup); + return tb_xdomain_get(xd); +} +EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route); + +bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, + const void *buf, size_t size) +{ + const struct tb_protocol_handler *handler, *tmp; + const struct tb_xdp_header *hdr = buf; + unsigned int length; + int ret = 0; + + /* We expect the packet is at least size of the header */ + length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK; + if (length != size / 4 - sizeof(hdr->xd_hdr) / 4) + return true; + if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4) + return true; + + /* + * Handle XDomain discovery protocol packets directly here. For + * other protocols (based on their UUID) we call registered + * handlers in turn. + */ + if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) { + if (type == TB_CFG_PKG_XDOMAIN_REQ) + return tb_xdp_schedule_request(tb, hdr, size); + return false; + } + + mutex_lock(&xdomain_lock); + list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) { + if (!uuid_equal(&hdr->uuid, handler->uuid)) + continue; + + mutex_unlock(&xdomain_lock); + ret = handler->callback(buf, size, handler->data); + mutex_lock(&xdomain_lock); + + if (ret) + break; + } + mutex_unlock(&xdomain_lock); + + return ret > 0; +} + +static int update_xdomain(struct device *dev, void *data) +{ + struct tb_xdomain *xd; + + xd = tb_to_xdomain(dev); + if (xd) { + queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, + msecs_to_jiffies(50)); + } + + return 0; +} + +static void update_all_xdomains(void) +{ + bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain); +} + +static bool remove_directory(const char *key, const struct tb_property_dir *dir) +{ + struct tb_property *p; + + p = tb_property_find(xdomain_property_dir, key, + TB_PROPERTY_TYPE_DIRECTORY); + if (p && p->value.dir == dir) { + tb_property_remove(p); + return true; + } + return false; +} + +/** + * tb_register_property_dir() - Register property directory to the host + * @key: Key (name) of the directory to add + * @dir: Directory to add + * + * Service drivers can use this function to add new property directory + * to the host available properties. The other connected hosts are + * notified so they can re-read properties of this host if they are + * interested. + * + * Return: %0 on success and negative errno on failure + */ +int tb_register_property_dir(const char *key, struct tb_property_dir *dir) +{ + int ret; + + if (WARN_ON(!xdomain_property_dir)) + return -EAGAIN; + + if (!key || strlen(key) > 8) + return -EINVAL; + + mutex_lock(&xdomain_lock); + if (tb_property_find(xdomain_property_dir, key, + TB_PROPERTY_TYPE_DIRECTORY)) { + ret = -EEXIST; + goto err_unlock; + } + + ret = tb_property_add_dir(xdomain_property_dir, key, dir); + if (ret) + goto err_unlock; + + xdomain_property_block_gen++; + + mutex_unlock(&xdomain_lock); + update_all_xdomains(); + return 0; + +err_unlock: + mutex_unlock(&xdomain_lock); + return ret; +} +EXPORT_SYMBOL_GPL(tb_register_property_dir); + +/** + * tb_unregister_property_dir() - Removes property directory from host + * @key: Key (name) of the directory + * @dir: Directory to remove + * + * This will remove the existing directory from this host and notify the + * connected hosts about the change. + */ +void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) +{ + int ret = 0; + + mutex_lock(&xdomain_lock); + if (remove_directory(key, dir)) + xdomain_property_block_gen++; + mutex_unlock(&xdomain_lock); + + if (!ret) + update_all_xdomains(); +} +EXPORT_SYMBOL_GPL(tb_unregister_property_dir); + +int tb_xdomain_init(void) +{ + xdomain_property_dir = tb_property_create_dir(NULL); + if (!xdomain_property_dir) + return -ENOMEM; + + /* + * Initialize standard set of properties without any service + * directories. Those will be added by service drivers + * themselves when they are loaded. + * + * Rest of the properties are filled dynamically based on these + * when the P2P connection is made. + */ + tb_property_add_immediate(xdomain_property_dir, "vendorid", + PCI_VENDOR_ID_INTEL); + tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); + tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); + tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); + + xdomain_property_block_gen = get_random_u32(); + return 0; +} + +void tb_xdomain_exit(void) +{ + tb_property_free_dir(xdomain_property_dir); +} |