summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/thunderbolt
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/Kconfig34
-rw-r--r--drivers/thunderbolt/Makefile9
-rw-r--r--drivers/thunderbolt/acpi.c130
-rw-r--r--drivers/thunderbolt/cap.c243
-rw-r--r--drivers/thunderbolt/ctl.c1040
-rw-r--r--drivers/thunderbolt/ctl.h144
-rw-r--r--drivers/thunderbolt/debugfs.c702
-rw-r--r--drivers/thunderbolt/dma_port.c520
-rw-r--r--drivers/thunderbolt/dma_port.h31
-rw-r--r--drivers/thunderbolt/domain.c859
-rw-r--r--drivers/thunderbolt/eeprom.c633
-rw-r--r--drivers/thunderbolt/icm.c2324
-rw-r--r--drivers/thunderbolt/lc.c473
-rw-r--r--drivers/thunderbolt/nhi.c1391
-rw-r--r--drivers/thunderbolt/nhi.h85
-rw-r--r--drivers/thunderbolt/nhi_ops.c185
-rw-r--r--drivers/thunderbolt/nhi_regs.h164
-rw-r--r--drivers/thunderbolt/nvm.c170
-rw-r--r--drivers/thunderbolt/path.c591
-rw-r--r--drivers/thunderbolt/property.c681
-rw-r--r--drivers/thunderbolt/quirks.c42
-rw-r--r--drivers/thunderbolt/retimer.c485
-rw-r--r--drivers/thunderbolt/sb_regs.h33
-rw-r--r--drivers/thunderbolt/switch.c2937
-rw-r--r--drivers/thunderbolt/tb.c1542
-rw-r--r--drivers/thunderbolt/tb.h1044
-rw-r--r--drivers/thunderbolt/tb_msgs.h571
-rw-r--r--drivers/thunderbolt/tb_regs.h457
-rw-r--r--drivers/thunderbolt/test.c1637
-rw-r--r--drivers/thunderbolt/tmu.c383
-rw-r--r--drivers/thunderbolt/tunnel.c1460
-rw-r--r--drivers/thunderbolt/tunnel.h116
-rw-r--r--drivers/thunderbolt/usb4.c1769
-rw-r--r--drivers/thunderbolt/xdomain.c1752
34 files changed, 24637 insertions, 0 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
new file mode 100644
index 000000000..7fc058f81
--- /dev/null
+++ b/drivers/thunderbolt/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig USB4
+ tristate "Unified support for USB4 and Thunderbolt"
+ depends on PCI
+ select APPLE_PROPERTIES if EFI_STUB && X86
+ select CRC32
+ select CRYPTO
+ select CRYPTO_HASH
+ select NVMEM
+ help
+ USB4 and Thunderbolt driver. USB4 is the public specification
+ based on the Thunderbolt 3 protocol. This driver is required if
+ you want to hotplug Thunderbolt and USB4 compliant devices on
+ Apple hardware or on PCs with Intel Falcon Ridge or newer.
+
+ To compile this driver a module, choose M here. The module will be
+ called thunderbolt.
+
+if USB4
+
+config USB4_DEBUGFS_WRITE
+ bool "Enable write by debugfs to configuration spaces (DANGEROUS)"
+ help
+ Enables writing to device configuration registers through
+ debugfs interface.
+
+ Only enable this if you know what you are doing! Never enable
+ this for production systems or distro kernels.
+
+config USB4_KUNIT_TEST
+ bool "KUnit tests"
+ depends on KUNIT=y
+
+endif # USB4
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
new file mode 100644
index 000000000..571537371
--- /dev/null
+++ b/drivers/thunderbolt/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-${CONFIG_USB4} := thunderbolt.o
+thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
+thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
+thunderbolt-objs += nvm.o retimer.o quirks.o
+
+thunderbolt-${CONFIG_ACPI} += acpi.o
+thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o
+thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
new file mode 100644
index 000000000..6355fdf7d
--- /dev/null
+++ b/drivers/thunderbolt/acpi.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI support
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/pm_runtime.h>
+
+#include "tb.h"
+
+static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
+ void **return_value)
+{
+ struct fwnode_reference_args args;
+ struct fwnode_handle *fwnode;
+ struct tb_nhi *nhi = data;
+ struct acpi_device *adev;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ fwnode = acpi_fwnode_handle(adev);
+ ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface",
+ NULL, 0, 0, &args);
+ if (ret)
+ return AE_OK;
+
+ /* It needs to reference this NHI */
+ if (nhi->pdev->dev.fwnode != args.fwnode)
+ goto out_put;
+
+ /*
+ * Try to find physical device walking upwards to the hierarcy.
+ * We need to do this because the xHCI driver might not yet be
+ * bound so the USB3 SuperSpeed ports are not yet created.
+ */
+ dev = acpi_get_first_physical_node(adev);
+ while (!dev) {
+ adev = adev->parent;
+ if (!adev)
+ break;
+ dev = acpi_get_first_physical_node(adev);
+ }
+
+ if (!dev)
+ goto out_put;
+
+ /*
+ * Check that the device is PCIe. This is because USB3
+ * SuperSpeed ports have this property and they are not power
+ * managed with the xHCI and the SuperSpeed hub so we create the
+ * link from xHCI instead.
+ */
+ while (dev && !dev_is_pci(dev))
+ dev = dev->parent;
+
+ if (!dev)
+ goto out_put;
+
+ /*
+ * Check that this actually matches the type of device we
+ * expect. It should either be xHCI or PCIe root/downstream
+ * port.
+ */
+ pdev = to_pci_dev(dev);
+ if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
+ (pci_is_pcie(pdev) &&
+ (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
+ const struct device_link *link;
+
+ /*
+ * Make them both active first to make sure the NHI does
+ * not runtime suspend before the consumer. The
+ * pm_runtime_put() below then allows the consumer to
+ * runtime suspend again (which then allows NHI runtime
+ * suspend too now that the device link is established).
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
+ link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_RPM_ACTIVE |
+ DL_FLAG_PM_RUNTIME);
+ if (link) {
+ dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+ dev_name(&pdev->dev));
+ } else {
+ dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+ dev_name(&pdev->dev));
+ }
+
+ pm_runtime_put(&pdev->dev);
+ }
+
+out_put:
+ fwnode_handle_put(args.fwnode);
+ return AE_OK;
+}
+
+/**
+ * tb_acpi_add_links() - Add device links based on ACPI description
+ * @nhi: Pointer to NHI
+ *
+ * Goes over ACPI namespace finding tunneled ports that reference to
+ * @nhi ACPI node. For each reference a device link is added. The link
+ * is automatically removed by the driver core.
+ */
+void tb_acpi_add_links(struct tb_nhi *nhi)
+{
+ acpi_status status;
+
+ if (!has_acpi_companion(&nhi->pdev->dev))
+ return;
+
+ /*
+ * Find all devices that have usb4-host-controller interface
+ * property that references to this NHI.
+ */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
+ tb_acpi_add_link, NULL, nhi, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
+}
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
new file mode 100644
index 000000000..6f571e912
--- /dev/null
+++ b/drivers/thunderbolt/cap.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - capabilities lookup
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+
+#include "tb.h"
+
+#define CAP_OFFSET_MAX 0xff
+#define VSE_CAP_OFFSET_MAX 0xffff
+#define TMU_ACCESS_EN BIT(20)
+
+static int tb_port_enable_tmu(struct tb_port *port, bool enable)
+{
+ struct tb_switch *sw = port->sw;
+ u32 value, offset;
+ int ret;
+
+ /*
+ * Legacy devices need to have TMU access enabled before port
+ * space can be fully accessed.
+ */
+ if (tb_switch_is_light_ridge(sw))
+ offset = 0x26;
+ else if (tb_switch_is_eagle_ridge(sw))
+ offset = 0x2a;
+ else
+ return 0;
+
+ ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ value |= TMU_ACCESS_EN;
+ else
+ value &= ~TMU_ACCESS_EN;
+
+ return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
+}
+
+static void tb_port_dummy_read(struct tb_port *port)
+{
+ /*
+ * When reading from next capability pointer location in port
+ * config space the read data is not cleared on LR. To avoid
+ * reading stale data on next read perform one dummy read after
+ * port capabilities are walked.
+ */
+ if (tb_switch_is_light_ridge(port->sw)) {
+ u32 dummy;
+
+ tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
+ }
+}
+
+/**
+ * tb_port_next_cap() - Return next capability in the linked list
+ * @port: Port to find the capability for
+ * @offset: Previous capability offset (%0 for start)
+ *
+ * Returns dword offset of the next capability in port config space
+ * capability list and returns it. Passing %0 returns the first entry in
+ * the capability list. If no next capability is found returns %0. In case
+ * of failure returns negative errno.
+ */
+int tb_port_next_cap(struct tb_port *port, unsigned int offset)
+{
+ struct tb_cap_any header;
+ int ret;
+
+ if (!offset)
+ return port->config.first_cap_offset;
+
+ ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+ if (ret)
+ return ret;
+
+ return header.basic.next;
+}
+
+static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+ int offset = 0;
+
+ do {
+ struct tb_cap_any header;
+ int ret;
+
+ offset = tb_port_next_cap(port, offset);
+ if (offset < 0)
+ return offset;
+
+ ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
+ if (ret)
+ return ret;
+
+ if (header.basic.cap == cap)
+ return offset;
+ } while (offset > 0);
+
+ return -ENOENT;
+}
+
+/**
+ * tb_port_find_cap() - Find port capability
+ * @port: Port to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
+{
+ int ret;
+
+ ret = tb_port_enable_tmu(port, true);
+ if (ret)
+ return ret;
+
+ ret = __tb_port_find_cap(port, cap);
+
+ tb_port_dummy_read(port);
+ tb_port_enable_tmu(port, false);
+
+ return ret;
+}
+
+/**
+ * tb_switch_next_cap() - Return next capability in the linked list
+ * @sw: Switch to find the capability for
+ * @offset: Previous capability offset (%0 for start)
+ *
+ * Finds dword offset of the next capability in router config space
+ * capability list and returns it. Passing %0 returns the first entry in
+ * the capability list. If no next capability is found returns %0. In case
+ * of failure returns negative errno.
+ */
+int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
+{
+ struct tb_cap_any header;
+ int ret;
+
+ if (!offset)
+ return sw->config.first_cap_offset;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
+ if (ret)
+ return ret;
+
+ switch (header.basic.cap) {
+ case TB_SWITCH_CAP_TMU:
+ ret = header.basic.next;
+ break;
+
+ case TB_SWITCH_CAP_VSE:
+ if (!header.extended_short.length)
+ ret = header.extended_long.next;
+ else
+ ret = header.extended_short.next;
+ break;
+
+ default:
+ tb_sw_dbg(sw, "unknown capability %#x at %#x\n",
+ header.basic.cap, offset);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret;
+}
+
+/**
+ * tb_switch_find_cap() - Find switch capability
+ * @sw Switch to find the capability for
+ * @cap: Capability to look
+ *
+ * Returns offset to start of capability or %-ENOENT if no such
+ * capability was found. Negative errno is returned if there was an
+ * error.
+ */
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
+{
+ int offset = 0;
+
+ do {
+ struct tb_cap_any header;
+ int ret;
+
+ offset = tb_switch_next_cap(sw, offset);
+ if (offset < 0)
+ return offset;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (header.basic.cap == cap)
+ return offset;
+ } while (offset);
+
+ return -ENOENT;
+}
+
+/**
+ * tb_switch_find_vse_cap() - Find switch vendor specific capability
+ * @sw: Switch to find the capability for
+ * @vsec: Vendor specific capability to look
+ *
+ * Functions enumerates vendor specific capabilities (VSEC) of a switch
+ * and returns offset when capability matching @vsec is found. If no
+ * such capability is found returns %-ENOENT. In case of error returns
+ * negative errno.
+ */
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
+{
+ int offset = 0;
+
+ do {
+ struct tb_cap_any header;
+ int ret;
+
+ offset = tb_switch_next_cap(sw, offset);
+ if (offset < 0)
+ return offset;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (header.extended_short.cap == TB_SWITCH_CAP_VSE &&
+ header.extended_short.vsec_id == vsec)
+ return offset;
+ } while (offset);
+
+ return -ENOENT;
+}
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
new file mode 100644
index 000000000..772acb190
--- /dev/null
+++ b/drivers/thunderbolt/ctl.c
@@ -0,0 +1,1040 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - control channel and configuration commands
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dmapool.h>
+#include <linux/workqueue.h>
+
+#include "ctl.h"
+
+
+#define TB_CTL_RX_PKG_COUNT 10
+#define TB_CTL_RETRIES 4
+
+/**
+ * struct tb_cfg - thunderbolt control channel
+ */
+struct tb_ctl {
+ struct tb_nhi *nhi;
+ struct tb_ring *tx;
+ struct tb_ring *rx;
+
+ struct dma_pool *frame_pool;
+ struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
+ struct mutex request_queue_lock;
+ struct list_head request_queue;
+ bool running;
+
+ event_cb callback;
+ void *callback_data;
+};
+
+
+#define tb_ctl_WARN(ctl, format, arg...) \
+ dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_err(ctl, format, arg...) \
+ dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_warn(ctl, format, arg...) \
+ dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_info(ctl, format, arg...) \
+ dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+#define tb_ctl_dbg(ctl, format, arg...) \
+ dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
+
+static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
+/* Serializes access to request kref_get/put */
+static DEFINE_MUTEX(tb_cfg_request_lock);
+
+/**
+ * tb_cfg_request_alloc() - Allocates a new config request
+ *
+ * This is refcounted object so when you are done with this, call
+ * tb_cfg_request_put() to it.
+ */
+struct tb_cfg_request *tb_cfg_request_alloc(void)
+{
+ struct tb_cfg_request *req;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+
+ kref_init(&req->kref);
+
+ return req;
+}
+
+/**
+ * tb_cfg_request_get() - Increase refcount of a request
+ * @req: Request whose refcount is increased
+ */
+void tb_cfg_request_get(struct tb_cfg_request *req)
+{
+ mutex_lock(&tb_cfg_request_lock);
+ kref_get(&req->kref);
+ mutex_unlock(&tb_cfg_request_lock);
+}
+
+static void tb_cfg_request_destroy(struct kref *kref)
+{
+ struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
+
+ kfree(req);
+}
+
+/**
+ * tb_cfg_request_put() - Decrease refcount and possibly release the request
+ * @req: Request whose refcount is decreased
+ *
+ * Call this function when you are done with the request. When refcount
+ * goes to %0 the object is released.
+ */
+void tb_cfg_request_put(struct tb_cfg_request *req)
+{
+ mutex_lock(&tb_cfg_request_lock);
+ kref_put(&req->kref, tb_cfg_request_destroy);
+ mutex_unlock(&tb_cfg_request_lock);
+}
+
+static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
+ struct tb_cfg_request *req)
+{
+ WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
+ WARN_ON(req->ctl);
+
+ mutex_lock(&ctl->request_queue_lock);
+ if (!ctl->running) {
+ mutex_unlock(&ctl->request_queue_lock);
+ return -ENOTCONN;
+ }
+ req->ctl = ctl;
+ list_add_tail(&req->list, &ctl->request_queue);
+ set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+ mutex_unlock(&ctl->request_queue_lock);
+ return 0;
+}
+
+static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
+{
+ struct tb_ctl *ctl = req->ctl;
+
+ mutex_lock(&ctl->request_queue_lock);
+ list_del(&req->list);
+ clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+ if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+ wake_up(&tb_cfg_request_cancel_queue);
+ mutex_unlock(&ctl->request_queue_lock);
+}
+
+static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
+{
+ return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
+}
+
+static struct tb_cfg_request *
+tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
+{
+ struct tb_cfg_request *req;
+ bool found = false;
+
+ mutex_lock(&pkg->ctl->request_queue_lock);
+ list_for_each_entry(req, &pkg->ctl->request_queue, list) {
+ tb_cfg_request_get(req);
+ if (req->match(req, pkg)) {
+ found = true;
+ break;
+ }
+ tb_cfg_request_put(req);
+ }
+ mutex_unlock(&pkg->ctl->request_queue_lock);
+
+ return found ? req : NULL;
+}
+
+/* utility functions */
+
+
+static int check_header(const struct ctl_pkg *pkg, u32 len,
+ enum tb_cfg_pkg_type type, u64 route)
+{
+ struct tb_cfg_header *header = pkg->buffer;
+
+ /* check frame, TODO: frame flags */
+ if (WARN(len != pkg->frame.size,
+ "wrong framesize (expected %#x, got %#x)\n",
+ len, pkg->frame.size))
+ return -EIO;
+ if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
+ type, pkg->frame.eof))
+ return -EIO;
+ if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
+ pkg->frame.sof))
+ return -EIO;
+
+ /* check header */
+ if (WARN(header->unknown != 1 << 9,
+ "header->unknown is %#x\n", header->unknown))
+ return -EIO;
+ if (WARN(route != tb_cfg_get_route(header),
+ "wrong route (expected %llx, got %llx)",
+ route, tb_cfg_get_route(header)))
+ return -EIO;
+ return 0;
+}
+
+static int check_config_address(struct tb_cfg_address addr,
+ enum tb_cfg_space space, u32 offset,
+ u32 length)
+{
+ if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
+ return -EIO;
+ if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
+ space, addr.space))
+ return -EIO;
+ if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
+ offset, addr.offset))
+ return -EIO;
+ if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
+ length, addr.length))
+ return -EIO;
+ /*
+ * We cannot check addr->port as it is set to the upstream port of the
+ * sender.
+ */
+ return 0;
+}
+
+static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
+{
+ struct cfg_error_pkg *pkg = response->buffer;
+ struct tb_ctl *ctl = response->ctl;
+ struct tb_cfg_result res = { 0 };
+ res.response_route = tb_cfg_get_route(&pkg->header);
+ res.response_port = 0;
+ res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
+ tb_cfg_get_route(&pkg->header));
+ if (res.err)
+ return res;
+
+ if (pkg->zero1)
+ tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
+ if (pkg->zero2)
+ tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
+ if (pkg->zero3)
+ tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
+
+ res.err = 1;
+ res.tb_error = pkg->error;
+ res.response_port = pkg->port;
+ return res;
+
+}
+
+static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
+ enum tb_cfg_pkg_type type, u64 route)
+{
+ struct tb_cfg_header *header = pkg->buffer;
+ struct tb_cfg_result res = { 0 };
+
+ if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+ return decode_error(pkg);
+
+ res.response_port = 0; /* will be updated later for cfg_read/write */
+ res.response_route = tb_cfg_get_route(header);
+ res.err = check_header(pkg, len, type, route);
+ return res;
+}
+
+static void tb_cfg_print_error(struct tb_ctl *ctl,
+ const struct tb_cfg_result *res)
+{
+ WARN_ON(res->err != 1);
+ switch (res->tb_error) {
+ case TB_CFG_ERROR_PORT_NOT_CONNECTED:
+ /* Port is not connected. This can happen during surprise
+ * removal. Do not warn. */
+ return;
+ case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
+ /*
+ * Invalid cfg_space/offset/length combination in
+ * cfg_read/cfg_write.
+ */
+ tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
+ res->response_route, res->response_port);
+ return;
+ case TB_CFG_ERROR_NO_SUCH_PORT:
+ /*
+ * - The route contains a non-existent port.
+ * - The route contains a non-PHY port (e.g. PCIe).
+ * - The port in cfg_read/cfg_write does not exist.
+ */
+ tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
+ res->response_route, res->response_port);
+ return;
+ case TB_CFG_ERROR_LOOP:
+ tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
+ res->response_route, res->response_port);
+ return;
+ case TB_CFG_ERROR_LOCK:
+ tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
+ res->response_route, res->response_port);
+ return;
+ default:
+ /* 5,6,7,9 and 11 are also valid error codes */
+ tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
+ res->response_route, res->response_port);
+ return;
+ }
+}
+
+static __be32 tb_crc(const void *data, size_t len)
+{
+ return cpu_to_be32(~__crc32c_le(~0, data, len));
+}
+
+static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
+{
+ if (pkg) {
+ dma_pool_free(pkg->ctl->frame_pool,
+ pkg->buffer, pkg->frame.buffer_phy);
+ kfree(pkg);
+ }
+}
+
+static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
+{
+ struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
+ if (!pkg)
+ return NULL;
+ pkg->ctl = ctl;
+ pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
+ &pkg->frame.buffer_phy);
+ if (!pkg->buffer) {
+ kfree(pkg);
+ return NULL;
+ }
+ return pkg;
+}
+
+
+/* RX/TX handling */
+
+static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
+ bool canceled)
+{
+ struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+ tb_ctl_pkg_free(pkg);
+}
+
+/**
+ * tb_cfg_tx() - transmit a packet on the control channel
+ *
+ * len must be a multiple of four.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
+ enum tb_cfg_pkg_type type)
+{
+ int res;
+ struct ctl_pkg *pkg;
+ if (len % 4 != 0) { /* required for le->be conversion */
+ tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
+ return -EINVAL;
+ }
+ if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
+ tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
+ len, TB_FRAME_SIZE - 4);
+ return -EINVAL;
+ }
+ pkg = tb_ctl_pkg_alloc(ctl);
+ if (!pkg)
+ return -ENOMEM;
+ pkg->frame.callback = tb_ctl_tx_callback;
+ pkg->frame.size = len + 4;
+ pkg->frame.sof = type;
+ pkg->frame.eof = type;
+ cpu_to_be32_array(pkg->buffer, data, len / 4);
+ *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
+
+ res = tb_ring_tx(ctl->tx, &pkg->frame);
+ if (res) /* ring is stopped */
+ tb_ctl_pkg_free(pkg);
+ return res;
+}
+
+/**
+ * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
+ */
+static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
+ struct ctl_pkg *pkg, size_t size)
+{
+ return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
+}
+
+static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
+{
+ tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
+ * We ignore failures during stop.
+ * All rx packets are referenced
+ * from ctl->rx_packets, so we do
+ * not loose them.
+ */
+}
+
+static int tb_async_error(const struct ctl_pkg *pkg)
+{
+ const struct cfg_error_pkg *error = pkg->buffer;
+
+ if (pkg->frame.eof != TB_CFG_PKG_ERROR)
+ return false;
+
+ switch (error->error) {
+ case TB_CFG_ERROR_LINK_ERROR:
+ case TB_CFG_ERROR_HEC_ERROR_DETECTED:
+ case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
+ bool canceled)
+{
+ struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
+ struct tb_cfg_request *req;
+ __be32 crc32;
+
+ if (canceled)
+ return; /*
+ * ring is stopped, packet is referenced from
+ * ctl->rx_packets.
+ */
+
+ if (frame->size < 4 || frame->size % 4 != 0) {
+ tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
+ frame->size);
+ goto rx;
+ }
+
+ frame->size -= 4; /* remove checksum */
+ crc32 = tb_crc(pkg->buffer, frame->size);
+ be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
+
+ switch (frame->eof) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ case TB_CFG_PKG_ERROR:
+ case TB_CFG_PKG_OVERRIDE:
+ case TB_CFG_PKG_RESET:
+ if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+ tb_ctl_err(pkg->ctl,
+ "RX: checksum mismatch, dropping packet\n");
+ goto rx;
+ }
+ if (tb_async_error(pkg)) {
+ tb_ctl_handle_event(pkg->ctl, frame->eof,
+ pkg, frame->size);
+ goto rx;
+ }
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ case TB_CFG_PKG_XDOMAIN_RESP:
+ case TB_CFG_PKG_XDOMAIN_REQ:
+ if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
+ tb_ctl_err(pkg->ctl,
+ "RX: checksum mismatch, dropping packet\n");
+ goto rx;
+ }
+ fallthrough;
+ case TB_CFG_PKG_ICM_EVENT:
+ if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
+ goto rx;
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * The received packet will be processed only if there is an
+ * active request and that the packet is what is expected. This
+ * prevents packets such as replies coming after timeout has
+ * triggered from messing with the active requests.
+ */
+ req = tb_cfg_request_find(pkg->ctl, pkg);
+ if (req) {
+ if (req->copy(req, pkg))
+ schedule_work(&req->work);
+ tb_cfg_request_put(req);
+ }
+
+rx:
+ tb_ctl_rx_submit(pkg);
+}
+
+static void tb_cfg_request_work(struct work_struct *work)
+{
+ struct tb_cfg_request *req = container_of(work, typeof(*req), work);
+
+ if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
+ req->callback(req->callback_data);
+
+ tb_cfg_request_dequeue(req);
+ tb_cfg_request_put(req);
+}
+
+/**
+ * tb_cfg_request() - Start control request not waiting for it to complete
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @callback: Callback called when the request is completed
+ * @callback_data: Data to be passed to @callback
+ *
+ * This queues @req on the given control channel without waiting for it
+ * to complete. When the request completes @callback is called.
+ */
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+ void (*callback)(void *), void *callback_data)
+{
+ int ret;
+
+ req->flags = 0;
+ req->callback = callback;
+ req->callback_data = callback_data;
+ INIT_WORK(&req->work, tb_cfg_request_work);
+ INIT_LIST_HEAD(&req->list);
+
+ tb_cfg_request_get(req);
+ ret = tb_cfg_request_enqueue(ctl, req);
+ if (ret)
+ goto err_put;
+
+ ret = tb_ctl_tx(ctl, req->request, req->request_size,
+ req->request_type);
+ if (ret)
+ goto err_dequeue;
+
+ if (!req->response)
+ schedule_work(&req->work);
+
+ return 0;
+
+err_dequeue:
+ tb_cfg_request_dequeue(req);
+err_put:
+ tb_cfg_request_put(req);
+
+ return ret;
+}
+
+/**
+ * tb_cfg_request_cancel() - Cancel a control request
+ * @req: Request to cancel
+ * @err: Error to assign to the request
+ *
+ * This function can be used to cancel ongoing request. It will wait
+ * until the request is not active anymore.
+ */
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
+{
+ set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
+ schedule_work(&req->work);
+ wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
+ req->result.err = err;
+}
+
+static void tb_cfg_request_complete(void *data)
+{
+ complete(data);
+}
+
+/**
+ * tb_cfg_request_sync() - Start control request and wait until it completes
+ * @ctl: Control channel to use
+ * @req: Request to start
+ * @timeout_msec: Timeout how long to wait @req to complete
+ *
+ * Starts a control request and waits until it completes. If timeout
+ * triggers the request is canceled before function returns. Note the
+ * caller needs to make sure only one message for given switch is active
+ * at a time.
+ */
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+ struct tb_cfg_request *req,
+ int timeout_msec)
+{
+ unsigned long timeout = msecs_to_jiffies(timeout_msec);
+ struct tb_cfg_result res = { 0 };
+ DECLARE_COMPLETION_ONSTACK(done);
+ int ret;
+
+ ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
+ if (ret) {
+ res.err = ret;
+ return res;
+ }
+
+ if (!wait_for_completion_timeout(&done, timeout))
+ tb_cfg_request_cancel(req, -ETIMEDOUT);
+
+ flush_work(&req->work);
+
+ return req->result;
+}
+
+/* public interface, alloc/start/stop/free */
+
+/**
+ * tb_ctl_alloc() - allocate a control channel
+ *
+ * cb will be invoked once for every hot plug event.
+ *
+ * Return: Returns a pointer on success or NULL on failure.
+ */
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
+{
+ int i;
+ struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return NULL;
+ ctl->nhi = nhi;
+ ctl->callback = cb;
+ ctl->callback_data = cb_data;
+
+ mutex_init(&ctl->request_queue_lock);
+ INIT_LIST_HEAD(&ctl->request_queue);
+ ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
+ TB_FRAME_SIZE, 4, 0);
+ if (!ctl->frame_pool)
+ goto err;
+
+ ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
+ if (!ctl->tx)
+ goto err;
+
+ ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff,
+ 0xffff, NULL, NULL);
+ if (!ctl->rx)
+ goto err;
+
+ for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
+ ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
+ if (!ctl->rx_packets[i])
+ goto err;
+ ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
+ }
+
+ tb_ctl_dbg(ctl, "control channel created\n");
+ return ctl;
+err:
+ tb_ctl_free(ctl);
+ return NULL;
+}
+
+/**
+ * tb_ctl_free() - free a control channel
+ *
+ * Must be called after tb_ctl_stop.
+ *
+ * Must NOT be called from ctl->callback.
+ */
+void tb_ctl_free(struct tb_ctl *ctl)
+{
+ int i;
+
+ if (!ctl)
+ return;
+
+ if (ctl->rx)
+ tb_ring_free(ctl->rx);
+ if (ctl->tx)
+ tb_ring_free(ctl->tx);
+
+ /* free RX packets */
+ for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
+ tb_ctl_pkg_free(ctl->rx_packets[i]);
+
+
+ dma_pool_destroy(ctl->frame_pool);
+ kfree(ctl);
+}
+
+/**
+ * tb_cfg_start() - start/resume the control channel
+ */
+void tb_ctl_start(struct tb_ctl *ctl)
+{
+ int i;
+ tb_ctl_dbg(ctl, "control channel starting...\n");
+ tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
+ tb_ring_start(ctl->rx);
+ for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
+ tb_ctl_rx_submit(ctl->rx_packets[i]);
+
+ ctl->running = true;
+}
+
+/**
+ * control() - pause the control channel
+ *
+ * All invocations of ctl->callback will have finished after this method
+ * returns.
+ *
+ * Must NOT be called from ctl->callback.
+ */
+void tb_ctl_stop(struct tb_ctl *ctl)
+{
+ mutex_lock(&ctl->request_queue_lock);
+ ctl->running = false;
+ mutex_unlock(&ctl->request_queue_lock);
+
+ tb_ring_stop(ctl->rx);
+ tb_ring_stop(ctl->tx);
+
+ if (!list_empty(&ctl->request_queue))
+ tb_ctl_WARN(ctl, "dangling request in request_queue\n");
+ INIT_LIST_HEAD(&ctl->request_queue);
+ tb_ctl_dbg(ctl, "control channel stopped\n");
+}
+
+/* public interface, commands */
+
+/**
+ * tb_cfg_ack_plug() - Ack hot plug/unplug event
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @port: Port where the hot plug/unplug happened
+ * @unplug: Ack hot plug or unplug
+ *
+ * Call this as response for hot plug/unplug event to ack it.
+ * Returns %0 on success or an error code on failure.
+ */
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
+{
+ struct cfg_error_pkg pkg = {
+ .header = tb_cfg_make_header(route),
+ .port = port,
+ .error = TB_CFG_ERROR_ACK_PLUG_EVENT,
+ .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
+ : TB_CFG_ERROR_PG_HOT_PLUG,
+ };
+ tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+ unplug ? "un" : "", route, port);
+ return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
+}
+
+static bool tb_cfg_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+ if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+ return true;
+
+ if (pkg->frame.eof != req->response_type)
+ return false;
+ if (route != tb_cfg_get_route(req->request))
+ return false;
+ if (pkg->frame.size != req->response_size)
+ return false;
+
+ if (pkg->frame.eof == TB_CFG_PKG_READ ||
+ pkg->frame.eof == TB_CFG_PKG_WRITE) {
+ const struct cfg_read_pkg *req_hdr = req->request;
+ const struct cfg_read_pkg *res_hdr = pkg->buffer;
+
+ if (req_hdr->addr.seq != res_hdr->addr.seq)
+ return false;
+ }
+
+ return true;
+}
+
+static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+ struct tb_cfg_result res;
+
+ /* Now make sure it is in expected format */
+ res = parse_header(pkg, req->response_size, req->response_type,
+ tb_cfg_get_route(req->request));
+ if (!res.err)
+ memcpy(req->response, pkg->buffer, req->response_size);
+
+ req->result = res;
+
+ /* Always complete when first response is received */
+ return true;
+}
+
+/**
+ * tb_cfg_reset() - send a reset packet and wait for a response
+ *
+ * If the switch at route is incorrectly configured then we will not receive a
+ * reply (even though the switch will reset). The caller should check for
+ * -ETIMEDOUT and attempt to reconfigure the switch.
+ */
+struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+ int timeout_msec)
+{
+ struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
+ struct tb_cfg_result res = { 0 };
+ struct tb_cfg_header reply;
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req) {
+ res.err = -ENOMEM;
+ return res;
+ }
+
+ req->match = tb_cfg_match;
+ req->copy = tb_cfg_copy;
+ req->request = &request;
+ req->request_size = sizeof(request);
+ req->request_type = TB_CFG_PKG_RESET;
+ req->response = &reply;
+ req->response_size = sizeof(reply);
+ req->response_type = TB_CFG_PKG_RESET;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ return res;
+}
+
+/**
+ * tb_cfg_read() - read from config space into buffer
+ *
+ * Offset and length are in dwords.
+ */
+struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
+ u64 route, u32 port, enum tb_cfg_space space,
+ u32 offset, u32 length, int timeout_msec)
+{
+ struct tb_cfg_result res = { 0 };
+ struct cfg_read_pkg request = {
+ .header = tb_cfg_make_header(route),
+ .addr = {
+ .port = port,
+ .space = space,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct cfg_write_pkg reply;
+ int retries = 0;
+
+ while (retries < TB_CTL_RETRIES) {
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req) {
+ res.err = -ENOMEM;
+ return res;
+ }
+
+ request.addr.seq = retries++;
+
+ req->match = tb_cfg_match;
+ req->copy = tb_cfg_copy;
+ req->request = &request;
+ req->request_size = sizeof(request);
+ req->request_type = TB_CFG_PKG_READ;
+ req->response = &reply;
+ req->response_size = 12 + 4 * length;
+ req->response_type = TB_CFG_PKG_READ;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ if (res.err != -ETIMEDOUT)
+ break;
+
+ /* Wait a bit (arbitrary time) until we send a retry */
+ usleep_range(10, 100);
+ }
+
+ if (res.err)
+ return res;
+
+ res.response_port = reply.addr.port;
+ res.err = check_config_address(reply.addr, space, offset, length);
+ if (!res.err)
+ memcpy(buffer, &reply.data, 4 * length);
+ return res;
+}
+
+/**
+ * tb_cfg_write() - write from buffer into config space
+ *
+ * Offset and length are in dwords.
+ */
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
+ u64 route, u32 port, enum tb_cfg_space space,
+ u32 offset, u32 length, int timeout_msec)
+{
+ struct tb_cfg_result res = { 0 };
+ struct cfg_write_pkg request = {
+ .header = tb_cfg_make_header(route),
+ .addr = {
+ .port = port,
+ .space = space,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct cfg_read_pkg reply;
+ int retries = 0;
+
+ memcpy(&request.data, buffer, length * 4);
+
+ while (retries < TB_CTL_RETRIES) {
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req) {
+ res.err = -ENOMEM;
+ return res;
+ }
+
+ request.addr.seq = retries++;
+
+ req->match = tb_cfg_match;
+ req->copy = tb_cfg_copy;
+ req->request = &request;
+ req->request_size = 12 + 4 * length;
+ req->request_type = TB_CFG_PKG_WRITE;
+ req->response = &reply;
+ req->response_size = sizeof(reply);
+ req->response_type = TB_CFG_PKG_WRITE;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ if (res.err != -ETIMEDOUT)
+ break;
+
+ /* Wait a bit (arbitrary time) until we send a retry */
+ usleep_range(10, 100);
+ }
+
+ if (res.err)
+ return res;
+
+ res.response_port = reply.addr.port;
+ res.err = check_config_address(reply.addr, space, offset, length);
+ return res;
+}
+
+static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
+ const struct tb_cfg_result *res)
+{
+ /*
+ * For unimplemented ports access to port config space may return
+ * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
+ * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
+ * that the caller can mark the port as disabled.
+ */
+ if (space == TB_CFG_PORT &&
+ res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
+ return -ENODEV;
+
+ tb_cfg_print_error(ctl, res);
+
+ if (res->tb_error == TB_CFG_ERROR_LOCK)
+ return -EACCES;
+ return -EIO;
+}
+
+int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+ enum tb_cfg_space space, u32 offset, u32 length)
+{
+ struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
+ space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
+ switch (res.err) {
+ case 0:
+ /* Success */
+ break;
+
+ case 1:
+ /* Thunderbolt error, tb_error holds the actual number */
+ return tb_cfg_get_error(ctl, space, &res);
+
+ case -ETIMEDOUT:
+ tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
+ route, space, offset);
+ break;
+
+ default:
+ WARN(1, "tb_cfg_read: %d\n", res.err);
+ break;
+ }
+ return res.err;
+}
+
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
+ enum tb_cfg_space space, u32 offset, u32 length)
+{
+ struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
+ space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
+ switch (res.err) {
+ case 0:
+ /* Success */
+ break;
+
+ case 1:
+ /* Thunderbolt error, tb_error holds the actual number */
+ return tb_cfg_get_error(ctl, space, &res);
+
+ case -ETIMEDOUT:
+ tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
+ route, space, offset);
+ break;
+
+ default:
+ WARN(1, "tb_cfg_write: %d\n", res.err);
+ break;
+ }
+ return res.err;
+}
+
+/**
+ * tb_cfg_get_upstream_port() - get upstream port number of switch at route
+ *
+ * Reads the first dword from the switches TB_CFG_SWITCH config area and
+ * returns the port number from which the reply originated.
+ *
+ * Return: Returns the upstream port number on success or an error code on
+ * failure.
+ */
+int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
+{
+ u32 dummy;
+ struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
+ TB_CFG_SWITCH, 0, 1,
+ TB_CFG_DEFAULT_TIMEOUT);
+ if (res.err == 1)
+ return -EIO;
+ if (res.err)
+ return res.err;
+ return res.response_port;
+}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
new file mode 100644
index 000000000..97cb03b38
--- /dev/null
+++ b/drivers/thunderbolt/ctl.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - control channel and configuration commands
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#ifndef _TB_CFG
+#define _TB_CFG
+
+#include <linux/kref.h>
+#include <linux/thunderbolt.h>
+
+#include "nhi.h"
+#include "tb_msgs.h"
+
+/* control channel */
+struct tb_ctl;
+
+typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size);
+
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
+void tb_ctl_start(struct tb_ctl *ctl);
+void tb_ctl_stop(struct tb_ctl *ctl);
+void tb_ctl_free(struct tb_ctl *ctl);
+
+/* configuration commands */
+
+#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */
+
+struct tb_cfg_result {
+ u64 response_route;
+ u32 response_port; /*
+ * If err = 1 then this is the port that send the
+ * error.
+ * If err = 0 and if this was a cfg_read/write then
+ * this is the the upstream port of the responding
+ * switch.
+ * Otherwise the field is set to zero.
+ */
+ int err; /* negative errors, 0 for success, 1 for tb errors */
+ enum tb_cfg_error tb_error; /* valid if err == 1 */
+};
+
+struct ctl_pkg {
+ struct tb_ctl *ctl;
+ void *buffer;
+ struct ring_frame frame;
+};
+
+/**
+ * struct tb_cfg_request - Control channel request
+ * @kref: Reference count
+ * @ctl: Pointer to the control channel structure. Only set when the
+ * request is queued.
+ * @request_size: Size of the request packet (in bytes)
+ * @request_type: Type of the request packet
+ * @response: Response is stored here
+ * @response_size: Maximum size of one response packet
+ * @response_type: Expected type of the response packet
+ * @npackets: Number of packets expected to be returned with this request
+ * @match: Function used to match the incoming packet
+ * @copy: Function used to copy the incoming packet to @response
+ * @callback: Callback called when the request is finished successfully
+ * @callback_data: Data to be passed to @callback
+ * @flags: Flags for the request
+ * @work: Work item used to complete the request
+ * @result: Result after the request has been completed
+ * @list: Requests are queued using this field
+ *
+ * An arbitrary request over Thunderbolt control channel. For standard
+ * control channel message, one should use tb_cfg_read/write() and
+ * friends if possible.
+ */
+struct tb_cfg_request {
+ struct kref kref;
+ struct tb_ctl *ctl;
+ const void *request;
+ size_t request_size;
+ enum tb_cfg_pkg_type request_type;
+ void *response;
+ size_t response_size;
+ enum tb_cfg_pkg_type response_type;
+ size_t npackets;
+ bool (*match)(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg);
+ bool (*copy)(struct tb_cfg_request *req, const struct ctl_pkg *pkg);
+ void (*callback)(void *callback_data);
+ void *callback_data;
+ unsigned long flags;
+ struct work_struct work;
+ struct tb_cfg_result result;
+ struct list_head list;
+};
+
+#define TB_CFG_REQUEST_ACTIVE 0
+#define TB_CFG_REQUEST_CANCELED 1
+
+struct tb_cfg_request *tb_cfg_request_alloc(void);
+void tb_cfg_request_get(struct tb_cfg_request *req);
+void tb_cfg_request_put(struct tb_cfg_request *req);
+int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
+ void (*callback)(void *), void *callback_data);
+void tb_cfg_request_cancel(struct tb_cfg_request *req, int err);
+struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
+ struct tb_cfg_request *req, int timeout_msec);
+
+static inline u64 tb_cfg_get_route(const struct tb_cfg_header *header)
+{
+ return (u64) header->route_hi << 32 | header->route_lo;
+}
+
+static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
+{
+ struct tb_cfg_header header = {
+ .route_hi = route >> 32,
+ .route_lo = route,
+ };
+ /* check for overflow, route_hi is not 32 bits! */
+ WARN_ON(tb_cfg_get_route(&header) != route);
+ return header;
+}
+
+int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
+struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
+ int timeout_msec);
+struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
+ u64 route, u32 port,
+ enum tb_cfg_space space, u32 offset,
+ u32 length, int timeout_msec);
+struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
+ u64 route, u32 port,
+ enum tb_cfg_space space, u32 offset,
+ u32 length, int timeout_msec);
+int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
+ enum tb_cfg_space space, u32 offset, u32 length);
+int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
+ enum tb_cfg_space space, u32 offset, u32 length);
+int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route);
+
+
+#endif
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
new file mode 100644
index 000000000..ed65d2b13
--- /dev/null
+++ b/drivers/thunderbolt/debugfs.c
@@ -0,0 +1,702 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Debugfs interface
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Gil Fine <gil.fine@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+#include <linux/uaccess.h>
+
+#include "tb.h"
+
+#define PORT_CAP_PCIE_LEN 1
+#define PORT_CAP_POWER_LEN 2
+#define PORT_CAP_LANE_LEN 3
+#define PORT_CAP_USB3_LEN 5
+#define PORT_CAP_DP_LEN 8
+#define PORT_CAP_TMU_LEN 8
+#define PORT_CAP_BASIC_LEN 9
+#define PORT_CAP_USB4_LEN 20
+
+#define SWITCH_CAP_TMU_LEN 26
+#define SWITCH_CAP_BASIC_LEN 27
+
+#define PATH_LEN 2
+
+#define COUNTER_SET_LEN 3
+
+#define DEBUGFS_ATTR(__space, __write) \
+static int __space ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __space ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __space ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __space ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .write = __write, \
+ .llseek = seq_lseek, \
+}
+
+#define DEBUGFS_ATTR_RO(__space) \
+ DEBUGFS_ATTR(__space, NULL)
+
+#define DEBUGFS_ATTR_RW(__space) \
+ DEBUGFS_ATTR(__space, __space ## _write)
+
+static struct dentry *tb_debugfs_root;
+
+static void *validate_and_copy_from_user(const void __user *user_buf,
+ size_t *count)
+{
+ size_t nbytes;
+ void *buf;
+
+ if (!*count)
+ return ERR_PTR(-EINVAL);
+
+ if (!access_ok(user_buf, *count))
+ return ERR_PTR(-EFAULT);
+
+ buf = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ nbytes = min_t(size_t, *count, PAGE_SIZE);
+ if (copy_from_user(buf, user_buf, nbytes)) {
+ free_page((unsigned long)buf);
+ return ERR_PTR(-EFAULT);
+ }
+
+ *count = nbytes;
+ return buf;
+}
+
+static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
+ int long_fmt_len)
+{
+ char *token;
+ u32 v[5];
+ int ret;
+
+ token = strsep(line, "\n");
+ if (!token)
+ return false;
+
+ /*
+ * For Adapter/Router configuration space:
+ * Short format is: offset value\n
+ * v[0] v[1]
+ * Long format as produced from the read side:
+ * offset relative_offset cap_id vs_cap_id value\n
+ * v[0] v[1] v[2] v[3] v[4]
+ *
+ * For Counter configuration space:
+ * Short format is: offset\n
+ * v[0]
+ * Long format as produced from the read side:
+ * offset relative_offset counter_id value\n
+ * v[0] v[1] v[2] v[3]
+ */
+ ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
+ /* In case of Counters, clear counter, "val" content is NA */
+ if (ret == short_fmt_len) {
+ *offs = v[0];
+ *val = v[short_fmt_len - 1];
+ return true;
+ } else if (ret == long_fmt_len) {
+ *offs = v[0];
+ *val = v[long_fmt_len - 1];
+ return true;
+ }
+
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
+static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct tb *tb = sw->tb;
+ char *line, *buf;
+ u32 val, offset;
+ int ret = 0;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* User did hardware changes behind the driver's back */
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+
+ line = buf;
+ while (parse_line(&line, &offset, &val, 2, 5)) {
+ if (port)
+ ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
+ else
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+
+ return regs_write(port->sw, port, user_buf, count, ppos);
+}
+
+static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_switch *sw = s->private;
+
+ return regs_write(sw, NULL, user_buf, count, ppos);
+}
+#define DEBUGFS_MODE 0600
+#else
+#define port_regs_write NULL
+#define switch_regs_write NULL
+#define DEBUGFS_MODE 0400
+#endif
+
+static int port_clear_all_counters(struct tb_port *port)
+{
+ u32 *buf;
+ int ret;
+
+ buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
+ COUNTER_SET_LEN * port->config.max_counters);
+ kfree(buf);
+
+ return ret;
+}
+
+static ssize_t counters_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = port->sw->tb;
+ char *buf;
+ int ret;
+
+ buf = validate_and_copy_from_user(user_buf, &count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ /* If written delimiter only, clear all counters in one shot */
+ if (buf[0] == '\n') {
+ ret = port_clear_all_counters(port);
+ } else {
+ char *line = buf;
+ u32 val, offset;
+
+ ret = -EINVAL;
+ while (parse_line(&line, &offset, &val, 1, 4)) {
+ ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
+ offset, 1);
+ if (ret)
+ break;
+ }
+ }
+
+ mutex_unlock(&tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+ free_page((unsigned long)buf);
+
+ return ret < 0 ? ret : count;
+}
+
+static void cap_show(struct seq_file *s, struct tb_switch *sw,
+ struct tb_port *port, unsigned int cap, u8 cap_id,
+ u8 vsec_id, int length)
+{
+ int ret, offset = 0;
+
+ while (length > 0) {
+ int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
+ u32 data[TB_MAX_CONFIG_RW_LENGTH];
+
+ if (port)
+ ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
+ dwords);
+ else
+ ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
+ if (ret) {
+ seq_printf(s, "0x%04x <not accessible>\n",
+ cap + offset);
+ if (dwords > 1)
+ seq_printf(s, "0x%04x ...\n", cap + offset + 1);
+ return;
+ }
+
+ for (i = 0; i < dwords; i++) {
+ seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
+ cap + offset + i, offset + i,
+ cap_id, vsec_id, data[i]);
+ }
+
+ length -= dwords;
+ offset += dwords;
+ }
+}
+
+static void port_cap_show(struct tb_port *port, struct seq_file *s,
+ unsigned int cap)
+{
+ struct tb_cap_any header;
+ u8 vsec_id = 0;
+ size_t length;
+ int ret;
+
+ ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n", cap);
+ return;
+ }
+
+ switch (header.basic.cap) {
+ case TB_PORT_CAP_PHY:
+ length = PORT_CAP_LANE_LEN;
+ break;
+
+ case TB_PORT_CAP_TIME1:
+ length = PORT_CAP_TMU_LEN;
+ break;
+
+ case TB_PORT_CAP_POWER:
+ length = PORT_CAP_POWER_LEN;
+ break;
+
+ case TB_PORT_CAP_ADAP:
+ if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
+ length = PORT_CAP_PCIE_LEN;
+ } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
+ length = PORT_CAP_DP_LEN;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ length = PORT_CAP_USB3_LEN;
+ } else {
+ seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
+ cap, header.basic.cap);
+ return;
+ }
+ break;
+
+ case TB_PORT_CAP_VSE:
+ if (!header.extended_short.length) {
+ ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
+ cap + 1, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n",
+ cap + 1);
+ return;
+ }
+ length = header.extended_long.length;
+ vsec_id = header.extended_short.vsec_id;
+ } else {
+ length = header.extended_short.length;
+ vsec_id = header.extended_short.vsec_id;
+ /*
+ * Ice Lake and Tiger Lake do not implement the
+ * full length of the capability, only first 32
+ * dwords so hard-code it here.
+ */
+ if (!vsec_id &&
+ (tb_switch_is_ice_lake(port->sw) ||
+ tb_switch_is_tiger_lake(port->sw)))
+ length = 32;
+ }
+ break;
+
+ case TB_PORT_CAP_USB4:
+ length = PORT_CAP_USB4_LEN;
+ break;
+
+ default:
+ seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
+ cap, header.basic.cap);
+ return;
+ }
+
+ cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
+}
+
+static void port_caps_show(struct tb_port *port, struct seq_file *s)
+{
+ int cap;
+
+ cap = tb_port_next_cap(port, 0);
+ while (cap > 0) {
+ port_cap_show(port, s, cap);
+ cap = tb_port_next_cap(port, cap);
+ }
+}
+
+static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
+{
+ u32 data[PORT_CAP_BASIC_LEN];
+ int ret, i;
+
+ ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(data); i++)
+ seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
+
+ return 0;
+}
+
+static int port_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
+
+ ret = port_basic_regs_show(port, s);
+ if (ret)
+ goto out_unlock;
+
+ port_caps_show(port, s);
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(port_regs);
+
+static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
+ unsigned int cap)
+{
+ struct tb_cap_any header;
+ int ret, length;
+ u8 vsec_id = 0;
+
+ ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n", cap);
+ return;
+ }
+
+ if (header.basic.cap == TB_SWITCH_CAP_VSE) {
+ if (!header.extended_short.length) {
+ ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
+ cap + 1, 1);
+ if (ret) {
+ seq_printf(s, "0x%04x <capability read failed>\n",
+ cap + 1);
+ return;
+ }
+ length = header.extended_long.length;
+ } else {
+ length = header.extended_short.length;
+ }
+ vsec_id = header.extended_short.vsec_id;
+ } else {
+ if (header.basic.cap == TB_SWITCH_CAP_TMU) {
+ length = SWITCH_CAP_TMU_LEN;
+ } else {
+ seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
+ cap, header.basic.cap);
+ return;
+ }
+ }
+
+ cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
+}
+
+static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
+{
+ int cap;
+
+ cap = tb_switch_next_cap(sw, 0);
+ while (cap > 0) {
+ switch_cap_show(sw, s, cap);
+ cap = tb_switch_next_cap(sw, cap);
+ }
+}
+
+static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
+{
+ u32 data[SWITCH_CAP_BASIC_LEN];
+ size_t dwords;
+ int ret, i;
+
+ /* Only USB4 has the additional registers */
+ if (tb_switch_is_usb4(sw))
+ dwords = ARRAY_SIZE(data);
+ else
+ dwords = 7;
+
+ ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
+
+ return 0;
+}
+
+static int switch_regs_show(struct seq_file *s, void *not_used)
+{
+ struct tb_switch *sw = s->private;
+ struct tb *tb = sw->tb;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
+
+ ret = switch_basic_regs_show(sw, s);
+ if (ret)
+ goto out_unlock;
+
+ switch_caps_show(sw, s);
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(switch_regs);
+
+static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
+{
+ u32 data[PATH_LEN];
+ int ret, i;
+
+ ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
+ ARRAY_SIZE(data));
+ if (ret) {
+ seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
+ hopid * PATH_LEN + i, i, hopid, data[i]);
+ }
+
+ return 0;
+}
+
+static int path_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int start, i, ret = 0;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out_rpm_put;
+ }
+
+ seq_puts(s, "# offset relative_offset in_hop_id value\n");
+
+ /* NHI and lane adapters have entry for path 0 */
+ if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
+ ret = path_show_one(port, s, 0);
+ if (ret)
+ goto out_unlock;
+ }
+
+ start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
+
+ for (i = start; i <= port->config.max_in_hop_id; i++) {
+ ret = path_show_one(port, s, i);
+ if (ret)
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&tb->lock);
+out_rpm_put:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RO(path);
+
+static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
+ int counter)
+{
+ u32 data[COUNTER_SET_LEN];
+ int ret, i;
+
+ ret = tb_port_read(port, data, TB_CFG_COUNTERS,
+ counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
+ if (ret) {
+ seq_printf(s, "0x%04x <not accessible>\n",
+ counter * COUNTER_SET_LEN);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
+ counter * COUNTER_SET_LEN + i, i, counter, data[i]);
+ }
+
+ return 0;
+}
+
+static int counters_show(struct seq_file *s, void *not_used)
+{
+ struct tb_port *port = s->private;
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ int i, ret = 0;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ seq_puts(s, "# offset relative_offset counter_id value\n");
+
+ for (i = 0; i < port->config.max_counters; i++) {
+ ret = counter_set_regs_show(port, s, i);
+ if (ret)
+ break;
+ }
+
+ mutex_unlock(&tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+DEBUGFS_ATTR_RW(counters);
+
+/**
+ * tb_switch_debugfs_init() - Add debugfs entries for router
+ * @sw: Pointer to the router
+ *
+ * Adds debugfs directories and files for given router.
+ */
+void tb_switch_debugfs_init(struct tb_switch *sw)
+{
+ struct dentry *debugfs_dir;
+ struct tb_port *port;
+
+ debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
+ sw->debugfs_dir = debugfs_dir;
+ debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
+ &switch_regs_fops);
+
+ tb_switch_for_each_port(sw, port) {
+ struct dentry *debugfs_dir;
+ char dir_name[10];
+
+ if (port->disabled)
+ continue;
+ if (port->config.type == TB_TYPE_INACTIVE)
+ continue;
+
+ snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
+ debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
+ debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
+ port, &port_regs_fops);
+ debugfs_create_file("path", 0400, debugfs_dir, port,
+ &path_fops);
+ if (port->config.counters_support)
+ debugfs_create_file("counters", 0600, debugfs_dir, port,
+ &counters_fops);
+ }
+}
+
+/**
+ * tb_switch_debugfs_remove() - Remove all router debugfs entries
+ * @sw: Pointer to the router
+ *
+ * Removes all previously added debugfs entries under this router.
+ */
+void tb_switch_debugfs_remove(struct tb_switch *sw)
+{
+ debugfs_remove_recursive(sw->debugfs_dir);
+}
+
+void tb_debugfs_init(void)
+{
+ tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
+}
+
+void tb_debugfs_exit(void)
+{
+ debugfs_remove_recursive(tb_debugfs_root);
+}
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
new file mode 100644
index 000000000..de219953c
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dma_port.h"
+#include "tb_regs.h"
+
+#define DMA_PORT_CAP 0x3e
+
+#define MAIL_DATA 1
+#define MAIL_DATA_DWORDS 16
+
+#define MAIL_IN 17
+#define MAIL_IN_CMD_SHIFT 28
+#define MAIL_IN_CMD_MASK GENMASK(31, 28)
+#define MAIL_IN_CMD_FLASH_WRITE 0x0
+#define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
+#define MAIL_IN_CMD_FLASH_READ 0x2
+#define MAIL_IN_CMD_POWER_CYCLE 0x4
+#define MAIL_IN_DWORDS_SHIFT 24
+#define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
+#define MAIL_IN_ADDRESS_SHIFT 2
+#define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
+#define MAIL_IN_CSS BIT(1)
+#define MAIL_IN_OP_REQUEST BIT(0)
+
+#define MAIL_OUT 18
+#define MAIL_OUT_STATUS_RESPONSE BIT(29)
+#define MAIL_OUT_STATUS_CMD_SHIFT 4
+#define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
+#define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
+#define MAIL_OUT_STATUS_COMPLETED 0
+#define MAIL_OUT_STATUS_ERR_AUTH 1
+#define MAIL_OUT_STATUS_ERR_ACCESS 2
+
+#define DMA_PORT_TIMEOUT 5000 /* ms */
+#define DMA_PORT_RETRIES 3
+
+/**
+ * struct tb_dma_port - DMA control port
+ * @sw: Switch the DMA port belongs to
+ * @port: Switch port number where DMA capability is found
+ * @base: Start offset of the mailbox registers
+ * @buf: Temporary buffer to store a single block
+ */
+struct tb_dma_port {
+ struct tb_switch *sw;
+ u8 port;
+ u32 base;
+ u8 *buf;
+};
+
+/*
+ * When the switch is in safe mode it supports very little functionality
+ * so we don't validate that much here.
+ */
+static bool dma_port_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
+
+ if (pkg->frame.eof == TB_CFG_PKG_ERROR)
+ return true;
+ if (pkg->frame.eof != req->response_type)
+ return false;
+ if (route != tb_cfg_get_route(req->request))
+ return false;
+ if (pkg->frame.size != req->response_size)
+ return false;
+
+ return true;
+}
+
+static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+ memcpy(req->response, pkg->buffer, req->response_size);
+ return true;
+}
+
+static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
+ u32 port, u32 offset, u32 length, int timeout_msec)
+{
+ struct cfg_read_pkg request = {
+ .header = tb_cfg_make_header(route),
+ .addr = {
+ .seq = 1,
+ .port = port,
+ .space = TB_CFG_PORT,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct tb_cfg_request *req;
+ struct cfg_write_pkg reply;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = dma_port_match;
+ req->copy = dma_port_copy;
+ req->request = &request;
+ req->request_size = sizeof(request);
+ req->request_type = TB_CFG_PKG_READ;
+ req->response = &reply;
+ req->response_size = 12 + 4 * length;
+ req->response_type = TB_CFG_PKG_READ;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ if (res.err)
+ return res.err;
+
+ memcpy(buffer, &reply.data, 4 * length);
+ return 0;
+}
+
+static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
+ u32 port, u32 offset, u32 length, int timeout_msec)
+{
+ struct cfg_write_pkg request = {
+ .header = tb_cfg_make_header(route),
+ .addr = {
+ .seq = 1,
+ .port = port,
+ .space = TB_CFG_PORT,
+ .offset = offset,
+ .length = length,
+ },
+ };
+ struct tb_cfg_request *req;
+ struct cfg_read_pkg reply;
+ struct tb_cfg_result res;
+
+ memcpy(&request.data, buffer, length * 4);
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = dma_port_match;
+ req->copy = dma_port_copy;
+ req->request = &request;
+ req->request_size = 12 + 4 * length;
+ req->request_type = TB_CFG_PKG_WRITE;
+ req->response = &reply;
+ req->response_size = sizeof(reply);
+ req->response_type = TB_CFG_PKG_WRITE;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ return res.err;
+}
+
+static int dma_find_port(struct tb_switch *sw)
+{
+ static const int ports[] = { 3, 5, 7 };
+ int i;
+
+ /*
+ * The DMA (NHI) port is either 3, 5 or 7 depending on the
+ * controller. Try all of them.
+ */
+ for (i = 0; i < ARRAY_SIZE(ports); i++) {
+ u32 type;
+ int ret;
+
+ ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
+ 2, 1, DMA_PORT_TIMEOUT);
+ if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
+ return ports[i];
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * dma_port_alloc() - Finds DMA control port from a switch pointed by route
+ * @sw: Switch from where find the DMA port
+ *
+ * Function checks if the switch NHI port supports DMA configuration
+ * based mailbox capability and if it does, allocates and initializes
+ * DMA port structure. Returns %NULL if the capabity was not found.
+ *
+ * The DMA control port is functional also when the switch is in safe
+ * mode.
+ */
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
+{
+ struct tb_dma_port *dma;
+ int port;
+
+ port = dma_find_port(sw);
+ if (port < 0)
+ return NULL;
+
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return NULL;
+
+ dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
+ if (!dma->buf) {
+ kfree(dma);
+ return NULL;
+ }
+
+ dma->sw = sw;
+ dma->port = port;
+ dma->base = DMA_PORT_CAP;
+
+ return dma;
+}
+
+/**
+ * dma_port_free() - Release DMA control port structure
+ * @dma: DMA control port
+ */
+void dma_port_free(struct tb_dma_port *dma)
+{
+ if (dma) {
+ kfree(dma->buf);
+ kfree(dma);
+ }
+}
+
+static int dma_port_wait_for_completion(struct tb_dma_port *dma,
+ unsigned int timeout)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout);
+ struct tb_switch *sw = dma->sw;
+
+ do {
+ int ret;
+ u32 in;
+
+ ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
+ dma->base + MAIL_IN, 1, 50);
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ return ret;
+ } else if (!(in & MAIL_IN_OP_REQUEST)) {
+ return 0;
+ }
+
+ usleep_range(50, 100);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+}
+
+static int status_to_errno(u32 status)
+{
+ switch (status & MAIL_OUT_STATUS_MASK) {
+ case MAIL_OUT_STATUS_COMPLETED:
+ return 0;
+ case MAIL_OUT_STATUS_ERR_AUTH:
+ return -EINVAL;
+ case MAIL_OUT_STATUS_ERR_ACCESS:
+ return -EACCES;
+ }
+
+ return -EIO;
+}
+
+static int dma_port_request(struct tb_dma_port *dma, u32 in,
+ unsigned int timeout)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 out;
+ int ret;
+
+ ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
+ dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = dma_port_wait_for_completion(dma, timeout);
+ if (ret)
+ return ret;
+
+ ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+ dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return status_to_errno(out);
+}
+
+static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
+ void *buf, u32 size)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 in, dwaddress, dwords;
+ int ret;
+
+ dwaddress = address / 4;
+ dwords = size / 4;
+
+ in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
+ if (dwords < MAIL_DATA_DWORDS)
+ in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+ in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+ in |= MAIL_IN_OP_REQUEST;
+
+ ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
+ dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+}
+
+static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
+ const void *buf, u32 size)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 in, dwaddress, dwords;
+ int ret;
+
+ dwords = size / 4;
+
+ /* Write the block to MAIL_DATA registers */
+ ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
+ dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
+
+ in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
+
+ /* CSS header write is always done to the same magic address */
+ if (address >= DMA_PORT_CSS_ADDRESS) {
+ dwaddress = DMA_PORT_CSS_ADDRESS;
+ in |= MAIL_IN_CSS;
+ } else {
+ dwaddress = address / 4;
+ }
+
+ in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
+ in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
+ in |= MAIL_IN_OP_REQUEST;
+
+ return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
+}
+
+/**
+ * dma_port_flash_read() - Read from active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of active region
+ * @buf: Buffer where the data is read
+ * @size: Size of the buffer
+ */
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+ void *buf, size_t size)
+{
+ unsigned int retries = DMA_PORT_RETRIES;
+
+ do {
+ unsigned int offset;
+ size_t nbytes;
+ int ret;
+
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4);
+
+ ret = dma_port_flash_read_block(dma, address, dma->buf,
+ ALIGN(nbytes, 4));
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ nbytes -= offset;
+ memcpy(buf, dma->buf + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+/**
+ * dma_port_flash_write() - Write to non-active flash region
+ * @dma: DMA control port
+ * @address: Address relative to the start of non-active region
+ * @buf: Data to write
+ * @size: Size of the buffer
+ *
+ * Writes block of data to the non-active flash region of the switch. If
+ * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
+ * using CSS command.
+ */
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+ const void *buf, size_t size)
+{
+ unsigned int retries = DMA_PORT_RETRIES;
+ unsigned int offset;
+
+ if (address >= DMA_PORT_CSS_ADDRESS) {
+ offset = 0;
+ if (size > DMA_PORT_CSS_MAX_SIZE)
+ return -E2BIG;
+ } else {
+ offset = address & 3;
+ address = address & ~3;
+ }
+
+ do {
+ u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
+ int ret;
+
+ memcpy(dma->buf + offset, buf, nbytes);
+
+ ret = dma_port_flash_write_block(dma, address, buf, nbytes);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+/**
+ * dma_port_flash_update_auth() - Starts flash authenticate cycle
+ * @dma: DMA control port
+ *
+ * Starts the flash update authentication cycle. If the image in the
+ * non-active area was valid, the switch starts upgrade process where
+ * active and non-active area get swapped in the end. Caller should call
+ * dma_port_flash_update_auth_status() to get status of this command.
+ * This is because if the switch in question is root switch the
+ * thunderbolt host controller gets reset as well.
+ */
+int dma_port_flash_update_auth(struct tb_dma_port *dma)
+{
+ u32 in;
+
+ in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
+ in |= MAIL_IN_OP_REQUEST;
+
+ return dma_port_request(dma, in, 150);
+}
+
+/**
+ * dma_port_flash_update_auth_status() - Reads status of update auth command
+ * @dma: DMA control port
+ * @status: Status code of the operation
+ *
+ * The function checks if there is status available from the last update
+ * auth command. Returns %0 if there is no status and no further
+ * action is required. If there is status, %1 is returned instead and
+ * @status holds the failure code.
+ *
+ * Negative return means there was an error reading status from the
+ * switch.
+ */
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
+{
+ struct tb_switch *sw = dma->sw;
+ u32 out, cmd;
+ int ret;
+
+ ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
+ dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Check if the status relates to flash update auth */
+ cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
+ if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
+ if (status)
+ *status = out & MAIL_OUT_STATUS_MASK;
+
+ /* Reset is needed in any case */
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_port_power_cycle() - Power cycles the switch
+ * @dma: DMA control port
+ *
+ * Triggers power cycle to the switch.
+ */
+int dma_port_power_cycle(struct tb_dma_port *dma)
+{
+ u32 in;
+
+ in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
+ in |= MAIL_IN_OP_REQUEST;
+
+ return dma_port_request(dma, in, 150);
+}
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
new file mode 100644
index 000000000..7deadd97c
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt DMA configuration based mailbox support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#ifndef DMA_PORT_H_
+#define DMA_PORT_H_
+
+#include "tb.h"
+
+struct tb_switch;
+struct tb_dma_port;
+
+#define DMA_PORT_CSS_ADDRESS 0x3fffff
+#define DMA_PORT_CSS_MAX_SIZE SZ_128
+
+struct tb_dma_port *dma_port_alloc(struct tb_switch *sw);
+void dma_port_free(struct tb_dma_port *dma);
+int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
+ void *buf, size_t size);
+int dma_port_flash_update_auth(struct tb_dma_port *dma);
+int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status);
+int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
+ const void *buf, size_t size);
+int dma_port_power_cycle(struct tb_dma_port *dma);
+
+#endif
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
new file mode 100644
index 000000000..f0de94f7a
--- /dev/null
+++ b/drivers/thunderbolt/domain.c
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt bus support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dmar.h>
+#include <linux/idr.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <crypto/hash.h>
+
+#include "tb.h"
+
+static DEFINE_IDA(tb_domain_ida);
+
+static bool match_service_id(const struct tb_service_id *id,
+ const struct tb_service *svc)
+{
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
+ if (strcmp(id->protocol_key, svc->key))
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
+ if (id->protocol_id != svc->prtcid)
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->protocol_version != svc->prtcvers)
+ return false;
+ }
+
+ if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
+ if (id->protocol_revision != svc->prtcrevs)
+ return false;
+ }
+
+ return true;
+}
+
+static const struct tb_service_id *__tb_service_match(struct device *dev,
+ struct device_driver *drv)
+{
+ struct tb_service_driver *driver;
+ const struct tb_service_id *ids;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return NULL;
+
+ driver = container_of(drv, struct tb_service_driver, driver);
+ if (!driver->id_table)
+ return NULL;
+
+ for (ids = driver->id_table; ids->match_flags != 0; ids++) {
+ if (match_service_id(ids, svc))
+ return ids;
+ }
+
+ return NULL;
+}
+
+static int tb_service_match(struct device *dev, struct device_driver *drv)
+{
+ return !!__tb_service_match(dev, drv);
+}
+
+static int tb_service_probe(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tb_service_driver *driver;
+ const struct tb_service_id *id;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ id = __tb_service_match(dev, &driver->driver);
+
+ return driver->probe(svc, id);
+}
+
+static int tb_service_remove(struct device *dev)
+{
+ struct tb_service *svc = tb_to_service(dev);
+ struct tb_service_driver *driver;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ if (driver->remove)
+ driver->remove(svc);
+
+ return 0;
+}
+
+static void tb_service_shutdown(struct device *dev)
+{
+ struct tb_service_driver *driver;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc || !dev->driver)
+ return;
+
+ driver = container_of(dev->driver, struct tb_service_driver, driver);
+ if (driver->shutdown)
+ driver->shutdown(svc);
+}
+
+static const char * const tb_security_names[] = {
+ [TB_SECURITY_NONE] = "none",
+ [TB_SECURITY_USER] = "user",
+ [TB_SECURITY_SECURE] = "secure",
+ [TB_SECURITY_DPONLY] = "dponly",
+ [TB_SECURITY_USBONLY] = "usbonly",
+};
+
+static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb *tb = container_of(dev, struct tb, dev);
+ uuid_t *uuids;
+ ssize_t ret;
+ int i;
+
+ uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
+ if (!uuids)
+ return -ENOMEM;
+
+ pm_runtime_get_sync(&tb->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+ ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
+ if (ret) {
+ mutex_unlock(&tb->lock);
+ goto out;
+ }
+ mutex_unlock(&tb->lock);
+
+ for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
+ if (!uuid_is_null(&uuids[i]))
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%pUb",
+ &uuids[i]);
+
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s",
+ i < tb->nboot_acl - 1 ? "," : "\n");
+ }
+
+out:
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+ kfree(uuids);
+
+ return ret;
+}
+
+static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tb *tb = container_of(dev, struct tb, dev);
+ char *str, *s, *uuid_str;
+ ssize_t ret = 0;
+ uuid_t *acl;
+ int i = 0;
+
+ /*
+ * Make sure the value is not bigger than tb->nboot_acl * UUID
+ * length + commas and optional "\n". Also the smallest allowable
+ * string is tb->nboot_acl * ",".
+ */
+ if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
+ return -EINVAL;
+ if (count < tb->nboot_acl - 1)
+ return -EINVAL;
+
+ str = kstrdup(buf, GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+
+ acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
+ if (!acl) {
+ ret = -ENOMEM;
+ goto err_free_str;
+ }
+
+ uuid_str = strim(str);
+ while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
+ size_t len = strlen(s);
+
+ if (len) {
+ if (len != UUID_STRING_LEN) {
+ ret = -EINVAL;
+ goto err_free_acl;
+ }
+ ret = uuid_parse(s, &acl[i]);
+ if (ret)
+ goto err_free_acl;
+ }
+
+ i++;
+ }
+
+ if (s || i < tb->nboot_acl) {
+ ret = -EINVAL;
+ goto err_free_acl;
+ }
+
+ pm_runtime_get_sync(&tb->dev);
+
+ if (mutex_lock_interruptible(&tb->lock)) {
+ ret = -ERESTARTSYS;
+ goto err_rpm_put;
+ }
+ ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+ if (!ret) {
+ /* Notify userspace about the change */
+ kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+ }
+ mutex_unlock(&tb->lock);
+
+err_rpm_put:
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+err_free_acl:
+ kfree(acl);
+err_free_str:
+ kfree(str);
+
+ return ret ?: count;
+}
+static DEVICE_ATTR_RW(boot_acl);
+
+static ssize_t iommu_dma_protection_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ /*
+ * Kernel DMA protection is a feature where Thunderbolt security is
+ * handled natively using IOMMU. It is enabled when IOMMU is
+ * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set.
+ */
+ return sprintf(buf, "%d\n",
+ iommu_present(&pci_bus_type) && dmar_platform_optin());
+}
+static DEVICE_ATTR_RO(iommu_dma_protection);
+
+static ssize_t security_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb *tb = container_of(dev, struct tb, dev);
+ const char *name = "unknown";
+
+ if (tb->security_level < ARRAY_SIZE(tb_security_names))
+ name = tb_security_names[tb->security_level];
+
+ return sprintf(buf, "%s\n", name);
+}
+static DEVICE_ATTR_RO(security);
+
+static struct attribute *domain_attrs[] = {
+ &dev_attr_boot_acl.attr,
+ &dev_attr_iommu_dma_protection.attr,
+ &dev_attr_security.attr,
+ NULL,
+};
+
+static umode_t domain_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tb *tb = container_of(dev, struct tb, dev);
+
+ if (attr == &dev_attr_boot_acl.attr) {
+ if (tb->nboot_acl &&
+ tb->cm_ops->get_boot_acl &&
+ tb->cm_ops->set_boot_acl)
+ return attr->mode;
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static struct attribute_group domain_attr_group = {
+ .is_visible = domain_attr_is_visible,
+ .attrs = domain_attrs,
+};
+
+static const struct attribute_group *domain_attr_groups[] = {
+ &domain_attr_group,
+ NULL,
+};
+
+struct bus_type tb_bus_type = {
+ .name = "thunderbolt",
+ .match = tb_service_match,
+ .probe = tb_service_probe,
+ .remove = tb_service_remove,
+ .shutdown = tb_service_shutdown,
+};
+
+static void tb_domain_release(struct device *dev)
+{
+ struct tb *tb = container_of(dev, struct tb, dev);
+
+ tb_ctl_free(tb->ctl);
+ destroy_workqueue(tb->wq);
+ ida_simple_remove(&tb_domain_ida, tb->index);
+ mutex_destroy(&tb->lock);
+ kfree(tb);
+}
+
+struct device_type tb_domain_type = {
+ .name = "thunderbolt_domain",
+ .release = tb_domain_release,
+};
+
+/**
+ * tb_domain_alloc() - Allocate a domain
+ * @nhi: Pointer to the host controller
+ * @privsize: Size of the connection manager private data
+ *
+ * Allocates and initializes a new Thunderbolt domain. Connection
+ * managers are expected to call this and then fill in @cm_ops
+ * accordingly.
+ *
+ * Call tb_domain_put() to release the domain before it has been added
+ * to the system.
+ *
+ * Return: allocated domain structure on %NULL in case of error
+ */
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
+{
+ struct tb *tb;
+
+ /*
+ * Make sure the structure sizes map with that the hardware
+ * expects because bit-fields are being used.
+ */
+ BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
+ BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
+ BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
+
+ tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
+ if (!tb)
+ return NULL;
+
+ tb->nhi = nhi;
+ mutex_init(&tb->lock);
+
+ tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ if (tb->index < 0)
+ goto err_free;
+
+ tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
+ if (!tb->wq)
+ goto err_remove_ida;
+
+ tb->dev.parent = &nhi->pdev->dev;
+ tb->dev.bus = &tb_bus_type;
+ tb->dev.type = &tb_domain_type;
+ tb->dev.groups = domain_attr_groups;
+ dev_set_name(&tb->dev, "domain%d", tb->index);
+ device_initialize(&tb->dev);
+
+ return tb;
+
+err_remove_ida:
+ ida_simple_remove(&tb_domain_ida, tb->index);
+err_free:
+ kfree(tb);
+
+ return NULL;
+}
+
+static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ struct tb *tb = data;
+
+ if (!tb->cm_ops->handle_event) {
+ tb_warn(tb, "domain does not have event handler\n");
+ return true;
+ }
+
+ switch (type) {
+ case TB_CFG_PKG_XDOMAIN_REQ:
+ case TB_CFG_PKG_XDOMAIN_RESP:
+ return tb_xdomain_handle_request(tb, type, buf, size);
+
+ default:
+ tb->cm_ops->handle_event(tb, type, buf, size);
+ }
+
+ return true;
+}
+
+/**
+ * tb_domain_add() - Add domain to the system
+ * @tb: Domain to add
+ *
+ * Starts the domain and adds it to the system. Hotplugging devices will
+ * work after this has been returned successfully. In order to remove
+ * and release the domain after this function has been called, call
+ * tb_domain_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_domain_add(struct tb *tb)
+{
+ int ret;
+
+ if (WARN_ON(!tb->cm_ops))
+ return -EINVAL;
+
+ mutex_lock(&tb->lock);
+
+ tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb);
+ if (!tb->ctl) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ /*
+ * tb_schedule_hotplug_handler may be called as soon as the config
+ * channel is started. Thats why we have to hold the lock here.
+ */
+ tb_ctl_start(tb->ctl);
+
+ if (tb->cm_ops->driver_ready) {
+ ret = tb->cm_ops->driver_ready(tb);
+ if (ret)
+ goto err_ctl_stop;
+ }
+
+ ret = device_add(&tb->dev);
+ if (ret)
+ goto err_ctl_stop;
+
+ /* Start the domain */
+ if (tb->cm_ops->start) {
+ ret = tb->cm_ops->start(tb);
+ if (ret)
+ goto err_domain_del;
+ }
+
+ /* This starts event processing */
+ mutex_unlock(&tb->lock);
+
+ device_init_wakeup(&tb->dev, true);
+
+ pm_runtime_no_callbacks(&tb->dev);
+ pm_runtime_set_active(&tb->dev);
+ pm_runtime_enable(&tb->dev);
+ pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_use_autosuspend(&tb->dev);
+
+ return 0;
+
+err_domain_del:
+ device_del(&tb->dev);
+err_ctl_stop:
+ tb_ctl_stop(tb->ctl);
+err_unlock:
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+/**
+ * tb_domain_remove() - Removes and releases a domain
+ * @tb: Domain to remove
+ *
+ * Stops the domain, removes it from the system and releases all
+ * resources once the last reference has been released.
+ */
+void tb_domain_remove(struct tb *tb)
+{
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->stop)
+ tb->cm_ops->stop(tb);
+ /* Stop the domain control traffic */
+ tb_ctl_stop(tb->ctl);
+ mutex_unlock(&tb->lock);
+
+ flush_workqueue(tb->wq);
+ device_unregister(&tb->dev);
+}
+
+/**
+ * tb_domain_suspend_noirq() - Suspend a domain
+ * @tb: Domain to suspend
+ *
+ * Suspends all devices in the domain and stops the control channel.
+ */
+int tb_domain_suspend_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ /*
+ * The control channel interrupt is left enabled during suspend
+ * and taking the lock here prevents any events happening before
+ * we actually have stopped the domain and the control channel.
+ */
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->suspend_noirq)
+ ret = tb->cm_ops->suspend_noirq(tb);
+ if (!ret)
+ tb_ctl_stop(tb->ctl);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+/**
+ * tb_domain_resume_noirq() - Resume a domain
+ * @tb: Domain to resume
+ *
+ * Re-starts the control channel, and resumes all devices connected to
+ * the domain.
+ */
+int tb_domain_resume_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ mutex_lock(&tb->lock);
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->resume_noirq)
+ ret = tb->cm_ops->resume_noirq(tb);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+int tb_domain_suspend(struct tb *tb)
+{
+ return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
+}
+
+int tb_domain_freeze_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ mutex_lock(&tb->lock);
+ if (tb->cm_ops->freeze_noirq)
+ ret = tb->cm_ops->freeze_noirq(tb);
+ if (!ret)
+ tb_ctl_stop(tb->ctl);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+int tb_domain_thaw_noirq(struct tb *tb)
+{
+ int ret = 0;
+
+ mutex_lock(&tb->lock);
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->thaw_noirq)
+ ret = tb->cm_ops->thaw_noirq(tb);
+ mutex_unlock(&tb->lock);
+
+ return ret;
+}
+
+void tb_domain_complete(struct tb *tb)
+{
+ if (tb->cm_ops->complete)
+ tb->cm_ops->complete(tb);
+}
+
+int tb_domain_runtime_suspend(struct tb *tb)
+{
+ if (tb->cm_ops->runtime_suspend) {
+ int ret = tb->cm_ops->runtime_suspend(tb);
+ if (ret)
+ return ret;
+ }
+ tb_ctl_stop(tb->ctl);
+ return 0;
+}
+
+int tb_domain_runtime_resume(struct tb *tb)
+{
+ tb_ctl_start(tb->ctl);
+ if (tb->cm_ops->runtime_resume) {
+ int ret = tb->cm_ops->runtime_resume(tb);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * tb_domain_approve_switch() - Approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * This will approve switch by connection manager specific means. In
+ * case of success the connection manager will create tunnels for all
+ * supported protocols.
+ */
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+
+ if (!tb->cm_ops->approve_switch)
+ return -EPERM;
+
+ /* The parent switch must be authorized before this one */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ if (!parent_sw || !parent_sw->authorized)
+ return -EINVAL;
+
+ return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_approve_switch_key() - Approve switch and add key
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function first adds
+ * key to the switch NVM using connection manager specific means. If
+ * adding the key is successful, the switch is approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+ int ret;
+
+ if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
+ return -EPERM;
+
+ /* The parent switch must be authorized before this one */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ if (!parent_sw || !parent_sw->authorized)
+ return -EINVAL;
+
+ ret = tb->cm_ops->add_switch_key(tb, sw);
+ if (ret)
+ return ret;
+
+ return tb->cm_ops->approve_switch(tb, sw);
+}
+
+/**
+ * tb_domain_challenge_switch_key() - Challenge and approve switch
+ * @tb: Domain the switch belongs to
+ * @sw: Switch to approve
+ *
+ * For switches that support secure connect, this function generates
+ * random challenge and sends it to the switch. The switch responds to
+ * this and if the response matches our random challenge, the switch is
+ * approved and connected.
+ *
+ * Return: %0 on success and negative errno in case of failure.
+ */
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ u8 challenge[TB_SWITCH_KEY_SIZE];
+ u8 response[TB_SWITCH_KEY_SIZE];
+ u8 hmac[TB_SWITCH_KEY_SIZE];
+ struct tb_switch *parent_sw;
+ struct crypto_shash *tfm;
+ struct shash_desc *shash;
+ int ret;
+
+ if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
+ return -EPERM;
+
+ /* The parent switch must be authorized before this one */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ if (!parent_sw || !parent_sw->authorized)
+ return -EINVAL;
+
+ get_random_bytes(challenge, sizeof(challenge));
+ ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
+ if (ret)
+ return ret;
+
+ tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
+ if (ret)
+ goto err_free_tfm;
+
+ shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!shash) {
+ ret = -ENOMEM;
+ goto err_free_tfm;
+ }
+
+ shash->tfm = tfm;
+
+ memset(hmac, 0, sizeof(hmac));
+ ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
+ if (ret)
+ goto err_free_shash;
+
+ /* The returned HMAC must match the one we calculated */
+ if (memcmp(response, hmac, sizeof(hmac))) {
+ ret = -EKEYREJECTED;
+ goto err_free_shash;
+ }
+
+ crypto_free_shash(tfm);
+ kfree(shash);
+
+ return tb->cm_ops->approve_switch(tb, sw);
+
+err_free_shash:
+ kfree(shash);
+err_free_tfm:
+ crypto_free_shash(tfm);
+
+ return ret;
+}
+
+/**
+ * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
+ * @tb: Domain whose PCIe paths to disconnect
+ *
+ * This needs to be called in preparation for NVM upgrade of the host
+ * controller. Makes sure all PCIe paths are disconnected.
+ *
+ * Return %0 on success and negative errno in case of error.
+ */
+int tb_domain_disconnect_pcie_paths(struct tb *tb)
+{
+ if (!tb->cm_ops->disconnect_pcie_paths)
+ return -EPERM;
+
+ return tb->cm_ops->disconnect_pcie_paths(tb);
+}
+
+/**
+ * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
+ * @tb: Domain enabling the DMA paths
+ * @xd: XDomain DMA paths are created to
+ *
+ * Calls connection manager specific method to enable DMA paths to the
+ * XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!tb->cm_ops->approve_xdomain_paths)
+ return -ENOTSUPP;
+
+ return tb->cm_ops->approve_xdomain_paths(tb, xd);
+}
+
+/**
+ * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
+ * @tb: Domain disabling the DMA paths
+ * @xd: XDomain whose DMA paths are disconnected
+ *
+ * Calls connection manager specific method to disconnect DMA paths to
+ * the XDomain in question.
+ *
+ * Return: 0% in case of success and negative errno otherwise. In
+ * particular returns %-ENOTSUPP if the connection manager
+ * implementation does not support XDomains.
+ */
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!tb->cm_ops->disconnect_xdomain_paths)
+ return -ENOTSUPP;
+
+ return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
+}
+
+static int disconnect_xdomain(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd;
+ struct tb *tb = data;
+ int ret = 0;
+
+ xd = tb_to_xdomain(dev);
+ if (xd && xd->tb == tb)
+ ret = tb_xdomain_disable_paths(xd);
+
+ return ret;
+}
+
+/**
+ * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
+ * @tb: Domain whose paths are disconnected
+ *
+ * This function can be used to disconnect all paths (PCIe, XDomain) for
+ * example in preparation for host NVM firmware upgrade. After this is
+ * called the paths cannot be established without resetting the switch.
+ *
+ * Return: %0 in case of success and negative errno otherwise.
+ */
+int tb_domain_disconnect_all_paths(struct tb *tb)
+{
+ int ret;
+
+ ret = tb_domain_disconnect_pcie_paths(tb);
+ if (ret)
+ return ret;
+
+ return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
+}
+
+int tb_domain_init(void)
+{
+ int ret;
+
+ tb_test_init();
+
+ tb_debugfs_init();
+ ret = tb_xdomain_init();
+ if (ret)
+ goto err_debugfs;
+ ret = bus_register(&tb_bus_type);
+ if (ret)
+ goto err_xdomain;
+
+ return 0;
+
+err_xdomain:
+ tb_xdomain_exit();
+err_debugfs:
+ tb_debugfs_exit();
+ tb_test_exit();
+
+ return ret;
+}
+
+void tb_domain_exit(void)
+{
+ bus_unregister(&tb_bus_type);
+ ida_destroy(&tb_domain_ida);
+ tb_nvm_exit();
+ tb_xdomain_exit();
+ tb_debugfs_exit();
+ tb_test_exit();
+}
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
new file mode 100644
index 000000000..0c8471be3
--- /dev/null
+++ b/drivers/thunderbolt/eeprom.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - eeprom access
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include "tb.h"
+
+/**
+ * tb_eeprom_ctl_write() - write control word
+ */
+static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
+{
+ return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
+}
+
+/**
+ * tb_eeprom_ctl_write() - read control word
+ */
+static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
+{
+ return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
+}
+
+enum tb_eeprom_transfer {
+ TB_EEPROM_IN,
+ TB_EEPROM_OUT,
+};
+
+/**
+ * tb_eeprom_active - enable rom access
+ *
+ * WARNING: Always disable access after usage. Otherwise the controller will
+ * fail to reprobe.
+ */
+static int tb_eeprom_active(struct tb_switch *sw, bool enable)
+{
+ struct tb_eeprom_ctl ctl;
+ int res = tb_eeprom_ctl_read(sw, &ctl);
+ if (res)
+ return res;
+ if (enable) {
+ ctl.access_high = 1;
+ res = tb_eeprom_ctl_write(sw, &ctl);
+ if (res)
+ return res;
+ ctl.access_low = 0;
+ return tb_eeprom_ctl_write(sw, &ctl);
+ } else {
+ ctl.access_low = 1;
+ res = tb_eeprom_ctl_write(sw, &ctl);
+ if (res)
+ return res;
+ ctl.access_high = 0;
+ return tb_eeprom_ctl_write(sw, &ctl);
+ }
+}
+
+/**
+ * tb_eeprom_transfer - transfer one bit
+ *
+ * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
+ * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
+ */
+static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
+ enum tb_eeprom_transfer direction)
+{
+ int res;
+ if (direction == TB_EEPROM_OUT) {
+ res = tb_eeprom_ctl_write(sw, ctl);
+ if (res)
+ return res;
+ }
+ ctl->clock = 1;
+ res = tb_eeprom_ctl_write(sw, ctl);
+ if (res)
+ return res;
+ if (direction == TB_EEPROM_IN) {
+ res = tb_eeprom_ctl_read(sw, ctl);
+ if (res)
+ return res;
+ }
+ ctl->clock = 0;
+ return tb_eeprom_ctl_write(sw, ctl);
+}
+
+/**
+ * tb_eeprom_out - write one byte to the bus
+ */
+static int tb_eeprom_out(struct tb_switch *sw, u8 val)
+{
+ struct tb_eeprom_ctl ctl;
+ int i;
+ int res = tb_eeprom_ctl_read(sw, &ctl);
+ if (res)
+ return res;
+ for (i = 0; i < 8; i++) {
+ ctl.data_out = val & 0x80;
+ res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
+ if (res)
+ return res;
+ val <<= 1;
+ }
+ return 0;
+}
+
+/**
+ * tb_eeprom_in - read one byte from the bus
+ */
+static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
+{
+ struct tb_eeprom_ctl ctl;
+ int i;
+ int res = tb_eeprom_ctl_read(sw, &ctl);
+ if (res)
+ return res;
+ *val = 0;
+ for (i = 0; i < 8; i++) {
+ *val <<= 1;
+ res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
+ if (res)
+ return res;
+ *val |= ctl.data_in;
+ }
+ return 0;
+}
+
+/**
+ * tb_eeprom_get_drom_offset - get drom offset within eeprom
+ */
+static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
+{
+ struct tb_cap_plug_events cap;
+ int res;
+
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
+ return -ENODEV;
+ }
+ res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
+ sizeof(cap) / 4);
+ if (res)
+ return res;
+
+ if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
+ tb_sw_warn(sw, "no NVM\n");
+ return -ENODEV;
+ }
+
+ if (cap.drom_offset > 0xffff) {
+ tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
+ cap.drom_offset);
+ return -ENXIO;
+ }
+ *offset = cap.drom_offset;
+ return 0;
+}
+
+/**
+ * tb_eeprom_read_n - read count bytes from offset into val
+ */
+static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+ size_t count)
+{
+ u16 drom_offset;
+ int i, res;
+
+ res = tb_eeprom_get_drom_offset(sw, &drom_offset);
+ if (res)
+ return res;
+
+ offset += drom_offset;
+
+ res = tb_eeprom_active(sw, true);
+ if (res)
+ return res;
+ res = tb_eeprom_out(sw, 3);
+ if (res)
+ return res;
+ res = tb_eeprom_out(sw, offset >> 8);
+ if (res)
+ return res;
+ res = tb_eeprom_out(sw, offset);
+ if (res)
+ return res;
+ for (i = 0; i < count; i++) {
+ res = tb_eeprom_in(sw, val + i);
+ if (res)
+ return res;
+ }
+ return tb_eeprom_active(sw, false);
+}
+
+static u8 tb_crc8(u8 *data, int len)
+{
+ int i, j;
+ u8 val = 0xff;
+ for (i = 0; i < len; i++) {
+ val ^= data[i];
+ for (j = 0; j < 8; j++)
+ val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
+ }
+ return val;
+}
+
+static u32 tb_crc32(void *data, size_t len)
+{
+ return ~__crc32c_le(~0, data, len);
+}
+
+#define TB_DROM_DATA_START 13
+struct tb_drom_header {
+ /* BYTE 0 */
+ u8 uid_crc8; /* checksum for uid */
+ /* BYTES 1-8 */
+ u64 uid;
+ /* BYTES 9-12 */
+ u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
+ /* BYTE 13 */
+ u8 device_rom_revision; /* should be <= 1 */
+ u16 data_len:10;
+ u8 __unknown1:6;
+ /* BYTES 16-21 */
+ u16 vendor_id;
+ u16 model_id;
+ u8 model_rev;
+ u8 eeprom_rev;
+} __packed;
+
+enum tb_drom_entry_type {
+ /* force unsigned to prevent "one-bit signed bitfield" warning */
+ TB_DROM_ENTRY_GENERIC = 0U,
+ TB_DROM_ENTRY_PORT,
+};
+
+struct tb_drom_entry_header {
+ u8 len;
+ u8 index:6;
+ bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
+ enum tb_drom_entry_type type:1;
+} __packed;
+
+struct tb_drom_entry_generic {
+ struct tb_drom_entry_header header;
+ u8 data[];
+} __packed;
+
+struct tb_drom_entry_port {
+ /* BYTES 0-1 */
+ struct tb_drom_entry_header header;
+ /* BYTE 2 */
+ u8 dual_link_port_rid:4;
+ u8 link_nr:1;
+ u8 unknown1:2;
+ bool has_dual_link_port:1;
+
+ /* BYTE 3 */
+ u8 dual_link_port_nr:6;
+ u8 unknown2:2;
+
+ /* BYTES 4 - 5 TODO decode */
+ u8 micro2:4;
+ u8 micro1:4;
+ u8 micro3;
+
+ /* BYTES 6-7, TODO: verify (find hardware that has these set) */
+ u8 peer_port_rid:4;
+ u8 unknown3:3;
+ bool has_peer_port:1;
+ u8 peer_port_nr:6;
+ u8 unknown4:2;
+} __packed;
+
+
+/**
+ * tb_drom_read_uid_only - read uid directly from drom
+ *
+ * Does not use the cached copy in sw->drom. Used during resume to check switch
+ * identity.
+ */
+int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
+{
+ u8 data[9];
+ u8 crc;
+ int res;
+
+ /* read uid */
+ res = tb_eeprom_read_n(sw, 0, data, 9);
+ if (res)
+ return res;
+
+ crc = tb_crc8(data + 1, 8);
+ if (crc != data[0]) {
+ tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
+ data[0], crc);
+ return -EIO;
+ }
+
+ *uid = *(u64 *)(data+1);
+ return 0;
+}
+
+static int tb_drom_parse_entry_generic(struct tb_switch *sw,
+ struct tb_drom_entry_header *header)
+{
+ const struct tb_drom_entry_generic *entry =
+ (const struct tb_drom_entry_generic *)header;
+
+ switch (header->index) {
+ case 1:
+ /* Length includes 2 bytes header so remove it before copy */
+ sw->vendor_name = kstrndup(entry->data,
+ header->len - sizeof(*header), GFP_KERNEL);
+ if (!sw->vendor_name)
+ return -ENOMEM;
+ break;
+
+ case 2:
+ sw->device_name = kstrndup(entry->data,
+ header->len - sizeof(*header), GFP_KERNEL);
+ if (!sw->device_name)
+ return -ENOMEM;
+ break;
+ }
+
+ return 0;
+}
+
+static int tb_drom_parse_entry_port(struct tb_switch *sw,
+ struct tb_drom_entry_header *header)
+{
+ struct tb_port *port;
+ int res;
+ enum tb_port_type type;
+
+ /*
+ * Some DROMs list more ports than the controller actually has
+ * so we skip those but allow the parser to continue.
+ */
+ if (header->index > sw->config.max_port_number) {
+ dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
+ return 0;
+ }
+
+ port = &sw->ports[header->index];
+ port->disabled = header->port_disabled;
+ if (port->disabled)
+ return 0;
+
+ res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
+ if (res)
+ return res;
+ type &= 0xffffff;
+
+ if (type == TB_TYPE_PORT) {
+ struct tb_drom_entry_port *entry = (void *) header;
+ if (header->len != sizeof(*entry)) {
+ tb_sw_warn(sw,
+ "port entry has size %#x (expected %#zx)\n",
+ header->len, sizeof(struct tb_drom_entry_port));
+ return -EIO;
+ }
+ port->link_nr = entry->link_nr;
+ if (entry->has_dual_link_port)
+ port->dual_link_port =
+ &port->sw->ports[entry->dual_link_port_nr];
+ }
+ return 0;
+}
+
+/**
+ * tb_drom_parse_entries - parse the linked list of drom entries
+ *
+ * Drom must have been copied to sw->drom.
+ */
+static int tb_drom_parse_entries(struct tb_switch *sw)
+{
+ struct tb_drom_header *header = (void *) sw->drom;
+ u16 pos = sizeof(*header);
+ u16 drom_size = header->data_len + TB_DROM_DATA_START;
+ int res;
+
+ while (pos < drom_size) {
+ struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
+ if (pos + 1 == drom_size || pos + entry->len > drom_size
+ || !entry->len) {
+ tb_sw_warn(sw, "DROM buffer overrun\n");
+ return -EILSEQ;
+ }
+
+ switch (entry->type) {
+ case TB_DROM_ENTRY_GENERIC:
+ res = tb_drom_parse_entry_generic(sw, entry);
+ break;
+ case TB_DROM_ENTRY_PORT:
+ res = tb_drom_parse_entry_port(sw, entry);
+ break;
+ }
+ if (res)
+ return res;
+
+ pos += entry->len;
+ }
+ return 0;
+}
+
+/**
+ * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
+ */
+static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
+{
+ struct device *dev = &sw->tb->nhi->pdev->dev;
+ int len, res;
+
+ len = device_property_count_u8(dev, "ThunderboltDROM");
+ if (len < 0 || len < sizeof(struct tb_drom_header))
+ return -EINVAL;
+
+ sw->drom = kmalloc(len, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
+ len);
+ if (res)
+ goto err;
+
+ *size = ((struct tb_drom_header *)sw->drom)->data_len +
+ TB_DROM_DATA_START;
+ if (*size > len)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(sw->drom);
+ sw->drom = NULL;
+ return -EINVAL;
+}
+
+static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
+{
+ u32 drom_offset;
+ int ret;
+
+ if (!sw->dma_port)
+ return -ENODEV;
+
+ ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
+ sw->cap_plug_events + 12, 1);
+ if (ret)
+ return ret;
+
+ if (!drom_offset)
+ return -ENODEV;
+
+ ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
+ sizeof(*size));
+ if (ret)
+ return ret;
+
+ /* Size includes CRC8 + UID + CRC32 */
+ *size += 1 + 8 + 4;
+ sw->drom = kzalloc(*size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
+ if (ret)
+ goto err_free;
+
+ /*
+ * Read UID from the minimal DROM because the one in NVM is just
+ * a placeholder.
+ */
+ tb_drom_read_uid_only(sw, &sw->uid);
+ return 0;
+
+err_free:
+ kfree(sw->drom);
+ sw->drom = NULL;
+ return ret;
+}
+
+static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size)
+{
+ int ret;
+
+ ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
+ if (ret)
+ return ret;
+
+ /* Size includes CRC8 + UID + CRC32 */
+ *size += 1 + 8 + 4;
+ sw->drom = kzalloc(*size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
+ if (ret) {
+ kfree(sw->drom);
+ sw->drom = NULL;
+ }
+
+ return ret;
+}
+
+static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
+ size_t count)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_drom_read(sw, offset, val, count);
+ return tb_eeprom_read_n(sw, offset, val, count);
+}
+
+/**
+ * tb_drom_read - copy drom to sw->drom and parse it
+ */
+int tb_drom_read(struct tb_switch *sw)
+{
+ u16 size;
+ u32 crc;
+ struct tb_drom_header *header;
+ int res, retries = 1;
+
+ if (sw->drom)
+ return 0;
+
+ if (tb_route(sw) == 0) {
+ /*
+ * Apple's NHI EFI driver supplies a DROM for the root switch
+ * in a device property. Use it if available.
+ */
+ if (tb_drom_copy_efi(sw, &size) == 0)
+ goto parse;
+
+ /* Non-Apple hardware has the DROM as part of NVM */
+ if (tb_drom_copy_nvm(sw, &size) == 0)
+ goto parse;
+
+ /*
+ * USB4 hosts may support reading DROM through router
+ * operations.
+ */
+ if (tb_switch_is_usb4(sw)) {
+ usb4_switch_read_uid(sw, &sw->uid);
+ if (!usb4_copy_host_drom(sw, &size))
+ goto parse;
+ } else {
+ /*
+ * The root switch contains only a dummy drom
+ * (header only, no entries). Hardcode the
+ * configuration here.
+ */
+ tb_drom_read_uid_only(sw, &sw->uid);
+ }
+
+ return 0;
+ }
+
+ res = tb_drom_read_n(sw, 14, (u8 *) &size, 2);
+ if (res)
+ return res;
+ size &= 0x3ff;
+ size += TB_DROM_DATA_START;
+ tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
+ if (size < sizeof(*header)) {
+ tb_sw_warn(sw, "drom too small, aborting\n");
+ return -EIO;
+ }
+
+ sw->drom = kzalloc(size, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+ res = tb_drom_read_n(sw, 0, sw->drom, size);
+ if (res)
+ goto err;
+
+parse:
+ header = (void *) sw->drom;
+
+ if (header->data_len + TB_DROM_DATA_START != size) {
+ tb_sw_warn(sw, "drom size mismatch, aborting\n");
+ goto err;
+ }
+
+ crc = tb_crc8((u8 *) &header->uid, 8);
+ if (crc != header->uid_crc8) {
+ tb_sw_warn(sw,
+ "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
+ header->uid_crc8, crc);
+ goto err;
+ }
+ if (!sw->uid)
+ sw->uid = header->uid;
+ sw->vendor = header->vendor_id;
+ sw->device = header->model_id;
+ tb_check_quirks(sw);
+
+ crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
+ if (crc != header->data_crc32) {
+ tb_sw_warn(sw,
+ "drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
+ header->data_crc32, crc);
+ }
+
+ if (header->device_rom_revision > 2)
+ tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
+ header->device_rom_revision);
+
+ res = tb_drom_parse_entries(sw);
+ /* If the DROM parsing fails, wait a moment and retry once */
+ if (res == -EILSEQ && retries--) {
+ tb_sw_warn(sw, "parsing DROM failed, retrying\n");
+ msleep(100);
+ res = tb_drom_read_n(sw, 0, sw->drom, size);
+ if (!res)
+ goto parse;
+ }
+
+ return res;
+err:
+ kfree(sw->drom);
+ sw->drom = NULL;
+ return -EIO;
+
+}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
new file mode 100644
index 000000000..90f1d9a53
--- /dev/null
+++ b/drivers/thunderbolt/icm.c
@@ -0,0 +1,2324 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Internal Thunderbolt Connection Manager. This is a firmware running on
+ * the Thunderbolt host controller performing most of the low-level
+ * handling.
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "ctl.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define PCIE2CIO_CMD 0x30
+#define PCIE2CIO_CMD_TIMEOUT BIT(31)
+#define PCIE2CIO_CMD_START BIT(30)
+#define PCIE2CIO_CMD_WRITE BIT(21)
+#define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
+#define PCIE2CIO_CMD_CS_SHIFT 19
+#define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
+#define PCIE2CIO_CMD_PORT_SHIFT 13
+
+#define PCIE2CIO_WRDATA 0x34
+#define PCIE2CIO_RDDATA 0x38
+
+#define PHY_PORT_CS1 0x37
+#define PHY_PORT_CS1_LINK_DISABLE BIT(14)
+#define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
+#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
+
+#define ICM_TIMEOUT 5000 /* ms */
+#define ICM_RETRIES 3
+#define ICM_APPROVE_TIMEOUT 10000 /* ms */
+#define ICM_MAX_LINK 4
+
+static bool start_icm;
+module_param(start_icm, bool, 0444);
+MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
+
+/**
+ * struct icm - Internal connection manager private data
+ * @request_lock: Makes sure only one message is send to ICM at time
+ * @rescan_work: Work used to rescan the surviving switches after resume
+ * @upstream_port: Pointer to the PCIe upstream port this host
+ * controller is connected. This is only set for systems
+ * where ICM needs to be started manually
+ * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
+ * (only set when @upstream_port is not %NULL)
+ * @safe_mode: ICM is in safe mode
+ * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
+ * @rpm: Does the controller support runtime PM (RTD3)
+ * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
+ * @veto: Is RTD3 veto in effect
+ * @is_supported: Checks if we can support ICM on this controller
+ * @cio_reset: Trigger CIO reset
+ * @get_mode: Read and return the ICM firmware mode (optional)
+ * @get_route: Find a route string for given switch
+ * @save_devices: Ask ICM to save devices to ACL when suspending (optional)
+ * @driver_ready: Send driver ready message to ICM
+ * @set_uuid: Set UUID for the root switch (optional)
+ * @device_connected: Handle device connected ICM message
+ * @device_disconnected: Handle device disconnected ICM message
+ * @xdomain_connected - Handle XDomain connected ICM message
+ * @xdomain_disconnected - Handle XDomain disconnected ICM message
+ * @rtd3_veto: Handle RTD3 veto notification ICM message
+ */
+struct icm {
+ struct mutex request_lock;
+ struct delayed_work rescan_work;
+ struct pci_dev *upstream_port;
+ size_t max_boot_acl;
+ int vnd_cap;
+ bool safe_mode;
+ bool rpm;
+ bool can_upgrade_nvm;
+ bool veto;
+ bool (*is_supported)(struct tb *tb);
+ int (*cio_reset)(struct tb *tb);
+ int (*get_mode)(struct tb *tb);
+ int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
+ void (*save_devices)(struct tb *tb);
+ int (*driver_ready)(struct tb *tb,
+ enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm);
+ void (*set_uuid)(struct tb *tb);
+ void (*device_connected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*device_disconnected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*xdomain_connected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*xdomain_disconnected)(struct tb *tb,
+ const struct icm_pkg_header *hdr);
+ void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
+};
+
+struct icm_notification {
+ struct work_struct work;
+ struct icm_pkg_header *pkg;
+ struct tb *tb;
+};
+
+struct ep_name_entry {
+ u8 len;
+ u8 type;
+ u8 data[];
+};
+
+#define EP_NAME_INTEL_VSS 0x10
+
+/* Intel Vendor specific structure */
+struct intel_vss {
+ u16 vendor;
+ u16 model;
+ u8 mc;
+ u8 flags;
+ u16 pci_devid;
+ u32 nvm_version;
+};
+
+#define INTEL_VSS_FLAGS_RTD3 BIT(0)
+
+static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
+{
+ const void *end = ep_name + size;
+
+ while (ep_name < end) {
+ const struct ep_name_entry *ep = ep_name;
+
+ if (!ep->len)
+ break;
+ if (ep_name + ep->len > end)
+ break;
+
+ if (ep->type == EP_NAME_INTEL_VSS)
+ return (const struct intel_vss *)ep->data;
+
+ ep_name += ep->len;
+ }
+
+ return NULL;
+}
+
+static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
+{
+ const struct intel_vss *vss;
+
+ vss = parse_intel_vss(ep_name, size);
+ if (vss)
+ return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
+
+ return false;
+}
+
+static inline struct tb *icm_to_tb(struct icm *icm)
+{
+ return ((void *)icm - sizeof(struct tb));
+}
+
+static inline u8 phy_port_from_route(u64 route, u8 depth)
+{
+ u8 link;
+
+ link = depth ? route >> ((depth - 1) * 8) : route;
+ return tb_phy_port_from_link(link);
+}
+
+static inline u8 dual_link_from_link(u8 link)
+{
+ return link ? ((link - 1) ^ 0x01) + 1 : 0;
+}
+
+static inline u64 get_route(u32 route_hi, u32 route_lo)
+{
+ return (u64)route_hi << 32 | route_lo;
+}
+
+static inline u64 get_parent_route(u64 route)
+{
+ int depth = tb_route_length(route);
+ return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
+}
+
+static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
+{
+ unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
+ u32 cmd;
+
+ do {
+ pci_read_config_dword(icm->upstream_port,
+ icm->vnd_cap + PCIE2CIO_CMD, &cmd);
+ if (!(cmd & PCIE2CIO_CMD_START)) {
+ if (cmd & PCIE2CIO_CMD_TIMEOUT)
+ break;
+ return 0;
+ }
+
+ msleep(50);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+}
+
+static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
+ unsigned int port, unsigned int index, u32 *data)
+{
+ struct pci_dev *pdev = icm->upstream_port;
+ int ret, vnd_cap = icm->vnd_cap;
+ u32 cmd;
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_START;
+ pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+ ret = pci2cio_wait_completion(icm, 5000);
+ if (ret)
+ return ret;
+
+ pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
+ return 0;
+}
+
+static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
+ unsigned int port, unsigned int index, u32 data)
+{
+ struct pci_dev *pdev = icm->upstream_port;
+ int vnd_cap = icm->vnd_cap;
+ u32 cmd;
+
+ pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+ pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
+
+ return pci2cio_wait_completion(icm, 5000);
+}
+
+static bool icm_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ const struct icm_pkg_header *res_hdr = pkg->buffer;
+ const struct icm_pkg_header *req_hdr = req->request;
+
+ if (pkg->frame.eof != req->response_type)
+ return false;
+ if (res_hdr->code != req_hdr->code)
+ return false;
+
+ return true;
+}
+
+static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
+{
+ const struct icm_pkg_header *hdr = pkg->buffer;
+
+ if (hdr->packet_id < req->npackets) {
+ size_t offset = hdr->packet_id * req->response_size;
+
+ memcpy(req->response + offset, pkg->buffer, req->response_size);
+ }
+
+ return hdr->packet_id == hdr->total_packets - 1;
+}
+
+static int icm_request(struct tb *tb, const void *request, size_t request_size,
+ void *response, size_t response_size, size_t npackets,
+ int retries, unsigned int timeout_msec)
+{
+ struct icm *icm = tb_priv(tb);
+
+ do {
+ struct tb_cfg_request *req;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = icm_match;
+ req->copy = icm_copy;
+ req->request = request;
+ req->request_size = request_size;
+ req->request_type = TB_CFG_PKG_ICM_CMD;
+ req->response = response;
+ req->npackets = npackets;
+ req->response_size = response_size;
+ req->response_type = TB_CFG_PKG_ICM_RESP;
+
+ mutex_lock(&icm->request_lock);
+ res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
+ mutex_unlock(&icm->request_lock);
+
+ tb_cfg_request_put(req);
+
+ if (res.err != -ETIMEDOUT)
+ return res.err == 1 ? -EIO : res.err;
+
+ usleep_range(20, 50);
+ } while (retries--);
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * If rescan is queued to run (we are resuming), postpone it to give the
+ * firmware some more time to send device connected notifications for next
+ * devices in the chain.
+ */
+static void icm_postpone_rescan(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (delayed_work_pending(&icm->rescan_work))
+ mod_delayed_work(tb->wq, &icm->rescan_work,
+ msecs_to_jiffies(500));
+}
+
+static void icm_veto_begin(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (!icm->veto) {
+ icm->veto = true;
+ /* Keep the domain powered while veto is in effect */
+ pm_runtime_get(&tb->dev);
+ }
+}
+
+static void icm_veto_end(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (icm->veto) {
+ icm->veto = false;
+ /* Allow the domain suspend now */
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+ }
+}
+
+static bool icm_firmware_running(const struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_ICM_EN);
+}
+
+static bool icm_fr_is_supported(struct tb *tb)
+{
+ return !x86_apple_machine;
+}
+
+static inline int icm_fr_get_switch_index(u32 port)
+{
+ int index;
+
+ if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
+ return 0;
+
+ index = port >> ICM_PORT_INDEX_SHIFT;
+ return index != 0xff ? index : 0;
+}
+
+static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+ struct icm_fr_pkg_get_topology_response *switches, *sw;
+ struct icm_fr_pkg_get_topology request = {
+ .hdr = { .code = ICM_GET_TOPOLOGY },
+ };
+ size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
+ int ret, index;
+ u8 i;
+
+ switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
+ if (!switches)
+ return -ENOMEM;
+
+ ret = icm_request(tb, &request, sizeof(request), switches,
+ sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ goto err_free;
+
+ sw = &switches[0];
+ index = icm_fr_get_switch_index(sw->ports[link]);
+ if (!index) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ sw = &switches[index];
+ for (i = 1; i < depth; i++) {
+ unsigned int j;
+
+ if (!(sw->first_data & ICM_SWITCH_USED)) {
+ ret = -ENODEV;
+ goto err_free;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
+ index = icm_fr_get_switch_index(sw->ports[j]);
+ if (index > sw->switch_index) {
+ sw = &switches[index];
+ break;
+ }
+ }
+ }
+
+ *route = get_route(sw->route_hi, sw->route_lo);
+
+err_free:
+ kfree(switches);
+ return ret;
+}
+
+static void icm_fr_save_devices(struct tb *tb)
+{
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
+}
+
+static int
+icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm_fr_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (security_level)
+ *security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
+
+ return 0;
+}
+
+static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+ struct icm_fr_pkg_approve_device request;
+ struct icm_fr_pkg_approve_device reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_APPROVE_DEVICE;
+ request.connection_id = sw->connection_id;
+ request.connection_key = sw->connection_key;
+
+ memset(&reply, 0, sizeof(reply));
+ /* Use larger timeout as establishing tunnels can take some time */
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+ tb_warn(tb, "PCIe tunnel creation failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ struct icm_fr_pkg_add_device_key request;
+ struct icm_fr_pkg_add_device_key_response reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_ADD_DEVICE_KEY;
+ request.connection_id = sw->connection_id;
+ request.connection_key = sw->connection_key;
+ memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+ tb_warn(tb, "Adding key to switch failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ const u8 *challenge, u8 *response)
+{
+ struct icm_fr_pkg_challenge_device request;
+ struct icm_fr_pkg_challenge_device_response reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_CHALLENGE_DEVICE;
+ request.connection_id = sw->connection_id;
+ request.connection_key = sw->connection_key;
+ memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EKEYREJECTED;
+ if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+ return -ENOKEY;
+
+ memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+ return 0;
+}
+
+static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct icm_fr_pkg_approve_xdomain_response reply;
+ struct icm_fr_pkg_approve_xdomain request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.hdr.code = ICM_APPROVE_XDOMAIN;
+ request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
+ memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+ request.transmit_path = xd->transmit_path;
+ request.transmit_ring = xd->transmit_ring;
+ request.receive_path = xd->receive_path;
+ request.receive_ring = xd->receive_ring;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ u8 phy_port;
+ u8 cmd;
+
+ phy_port = tb_phy_port_from_link(xd->link);
+ if (phy_port == 0)
+ cmd = NHI_MAILBOX_DISCONNECT_PA;
+ else
+ cmd = NHI_MAILBOX_DISCONNECT_PB;
+
+ nhi_mailbox_cmd(tb->nhi, cmd, 1);
+ usleep_range(10, 50);
+ nhi_mailbox_cmd(tb->nhi, cmd, 2);
+ return 0;
+}
+
+static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
+ const uuid_t *uuid)
+{
+ struct tb *tb = parent_sw->tb;
+ struct tb_switch *sw;
+
+ sw = tb_switch_alloc(tb, &parent_sw->dev, route);
+ if (IS_ERR(sw)) {
+ tb_warn(tb, "failed to allocate switch at %llx\n", route);
+ return sw;
+ }
+
+ sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
+ if (!sw->uuid) {
+ tb_switch_put(sw);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_completion(&sw->rpm_complete);
+ return sw;
+}
+
+static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
+{
+ u64 route = tb_route(sw);
+ int ret;
+
+ /* Link the two switches now */
+ tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+ tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
+
+ ret = tb_switch_add(sw);
+ if (ret)
+ tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+
+ return ret;
+}
+
+static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw,
+ u64 route, u8 connection_id, u8 connection_key,
+ u8 link, u8 depth, bool boot)
+{
+ /* Disconnect from parent */
+ tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+ /* Re-connect via updated port*/
+ tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
+
+ /* Update with the new addressing information */
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->connection_id = connection_id;
+ sw->connection_key = connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->boot = boot;
+
+ /* This switch still exists */
+ sw->is_unplugged = false;
+
+ /* Runtime resume is now complete */
+ complete(&sw->rpm_complete);
+}
+
+static void remove_switch(struct tb_switch *sw)
+{
+ struct tb_switch *parent_sw;
+
+ parent_sw = tb_to_switch(sw->dev.parent);
+ tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
+ tb_switch_remove(sw);
+}
+
+static void add_xdomain(struct tb_switch *sw, u64 route,
+ const uuid_t *local_uuid, const uuid_t *remote_uuid,
+ u8 link, u8 depth)
+{
+ struct tb_xdomain *xd;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
+ if (!xd)
+ goto out;
+
+ xd->link = link;
+ xd->depth = depth;
+
+ tb_port_at(route, sw)->xdomain = xd;
+
+ tb_xdomain_add(xd);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+}
+
+static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
+{
+ xd->link = link;
+ xd->route = route;
+ xd->is_unplugged = false;
+}
+
+static void remove_xdomain(struct tb_xdomain *xd)
+{
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(xd->dev.parent);
+ tb_port_at(xd->route, sw)->xdomain = NULL;
+ tb_xdomain_remove(xd);
+}
+
+static void
+icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_device_connected *pkg =
+ (const struct icm_fr_event_device_connected *)hdr;
+ enum tb_security_level security_level;
+ struct tb_switch *sw, *parent_sw;
+ bool boot, dual_lane, speed_gen3;
+ struct icm *icm = tb_priv(tb);
+ bool authorized = false;
+ struct tb_xdomain *xd;
+ u8 link, depth;
+ u64 route;
+ int ret;
+
+ icm_postpone_rescan(tb);
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+ authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+ security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+ ICM_FLAGS_SLEVEL_SHIFT;
+ boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
+
+ if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
+ tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
+ link, depth);
+ return;
+ }
+
+ sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+ if (sw) {
+ u8 phy_port, sw_phy_port;
+
+ parent_sw = tb_to_switch(sw->dev.parent);
+ sw_phy_port = tb_phy_port_from_link(sw->link);
+ phy_port = tb_phy_port_from_link(link);
+
+ /*
+ * On resume ICM will send us connected events for the
+ * devices that still are present. However, that
+ * information might have changed for example by the
+ * fact that a switch on a dual-link connection might
+ * have been enumerated using the other link now. Make
+ * sure our book keeping matches that.
+ */
+ if (sw->depth == depth && sw_phy_port == phy_port &&
+ !!sw->authorized == authorized) {
+ /*
+ * It was enumerated through another link so update
+ * route string accordingly.
+ */
+ if (sw->link != link) {
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to update route string for switch at %u.%u\n",
+ link, depth);
+ tb_switch_put(sw);
+ return;
+ }
+ } else {
+ route = tb_route(sw);
+ }
+
+ update_switch(parent_sw, sw, route, pkg->connection_id,
+ pkg->connection_key, link, depth, boot);
+ tb_switch_put(sw);
+ return;
+ }
+
+ /*
+ * User connected the same switch to another physical
+ * port or to another part of the topology. Remove the
+ * existing switch now before adding the new one.
+ */
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ /*
+ * If the switch was not found by UUID, look for a switch on
+ * same physical port (taking possible link aggregation into
+ * account) and depth. If we found one it is definitely a stale
+ * one so remove it first.
+ */
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ u8 dual_link;
+
+ dual_link = dual_link_from_link(link);
+ if (dual_link)
+ sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
+ }
+ if (sw) {
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ /* Remove existing XDomain connection if found */
+ xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
+ if (!parent_sw) {
+ tb_err(tb, "failed to find parent switch for %u.%u\n",
+ link, depth);
+ return;
+ }
+
+ ret = icm->get_route(tb, link, depth, &route);
+ if (ret) {
+ tb_err(tb, "failed to find route string for switch at %u.%u\n",
+ link, depth);
+ tb_switch_put(parent_sw);
+ return;
+ }
+
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->connection_key = pkg->connection_key;
+ sw->link = link;
+ sw->depth = depth;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
+
+ tb_switch_put(parent_sw);
+}
+
+static void
+icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_device_disconnected *pkg =
+ (const struct icm_fr_event_device_disconnected *)hdr;
+ struct tb_switch *sw;
+ u8 link, depth;
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+
+ if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
+ tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+ return;
+ }
+
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+ depth);
+ return;
+ }
+
+ remove_switch(sw);
+ tb_switch_put(sw);
+}
+
+static void
+icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_xdomain_connected *pkg =
+ (const struct icm_fr_event_xdomain_connected *)hdr;
+ struct tb_xdomain *xd;
+ struct tb_switch *sw;
+ u8 link, depth;
+ u64 route;
+
+ link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
+ depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
+ ICM_LINK_INFO_DEPTH_SHIFT;
+
+ if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
+ tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
+ return;
+ }
+
+ route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ u8 xd_phy_port, phy_port;
+
+ xd_phy_port = phy_port_from_route(xd->route, xd->depth);
+ phy_port = phy_port_from_route(route, depth);
+
+ if (xd->depth == depth && xd_phy_port == phy_port) {
+ update_xdomain(xd, route, link);
+ tb_xdomain_put(xd);
+ return;
+ }
+
+ /*
+ * If we find an existing XDomain connection remove it
+ * now. We need to go through login handshake and
+ * everything anyway to be able to re-establish the
+ * connection.
+ */
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * Look if there already exists an XDomain in the same place
+ * than the new one and in that case remove it because it is
+ * most likely another host that got disconnected.
+ */
+ xd = tb_xdomain_find_by_link_depth(tb, link, depth);
+ if (!xd) {
+ u8 dual_link;
+
+ dual_link = dual_link_from_link(link);
+ if (dual_link)
+ xd = tb_xdomain_find_by_link_depth(tb, dual_link,
+ depth);
+ }
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * If the user disconnected a switch during suspend and
+ * connected another host to the same port, remove the switch
+ * first.
+ */
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ sw = tb_switch_find_by_link_depth(tb, link, depth);
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
+ depth);
+ return;
+ }
+
+ add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
+ depth);
+ tb_switch_put(sw);
+}
+
+static void
+icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_fr_event_xdomain_disconnected *pkg =
+ (const struct icm_fr_event_xdomain_disconnected *)hdr;
+ struct tb_xdomain *xd;
+
+ /*
+ * If the connection is through one or multiple devices, the
+ * XDomain device is removed along with them so it is fine if we
+ * cannot find it here.
+ */
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+}
+
+static int icm_tr_cio_reset(struct tb *tb)
+{
+ return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
+}
+
+static int
+icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm_tr_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, 10, 2000);
+ if (ret)
+ return ret;
+
+ if (security_level)
+ *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
+ if (nboot_acl)
+ *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
+ ICM_TR_INFO_BOOT_ACL_SHIFT;
+ if (rpm)
+ *rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
+
+ return 0;
+}
+
+static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
+{
+ struct icm_tr_pkg_approve_device request;
+ struct icm_tr_pkg_approve_device reply;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_APPROVE_DEVICE;
+ request.route_lo = sw->config.route_lo;
+ request.route_hi = sw->config.route_hi;
+ request.connection_id = sw->connection_id;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+ tb_warn(tb, "PCIe tunnel creation failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
+{
+ struct icm_tr_pkg_add_device_key_response reply;
+ struct icm_tr_pkg_add_device_key request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_ADD_DEVICE_KEY;
+ request.route_lo = sw->config.route_lo;
+ request.route_hi = sw->config.route_hi;
+ request.connection_id = sw->connection_id;
+ memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR) {
+ tb_warn(tb, "Adding key to switch failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
+ const u8 *challenge, u8 *response)
+{
+ struct icm_tr_pkg_challenge_device_response reply;
+ struct icm_tr_pkg_challenge_device request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
+ request.hdr.code = ICM_CHALLENGE_DEVICE;
+ request.route_lo = sw->config.route_lo;
+ request.route_hi = sw->config.route_hi;
+ request.connection_id = sw->connection_id;
+ memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EKEYREJECTED;
+ if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
+ return -ENOKEY;
+
+ memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
+
+ return 0;
+}
+
+static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct icm_tr_pkg_approve_xdomain_response reply;
+ struct icm_tr_pkg_approve_xdomain request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.hdr.code = ICM_APPROVE_XDOMAIN;
+ request.route_hi = upper_32_bits(xd->route);
+ request.route_lo = lower_32_bits(xd->route);
+ request.transmit_path = xd->transmit_path;
+ request.transmit_ring = xd->transmit_ring;
+ request.receive_path = xd->receive_path;
+ request.receive_ring = xd->receive_ring;
+ memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
+ int stage)
+{
+ struct icm_tr_pkg_disconnect_xdomain_response reply;
+ struct icm_tr_pkg_disconnect_xdomain request;
+ int ret;
+
+ memset(&request, 0, sizeof(request));
+ request.hdr.code = ICM_DISCONNECT_XDOMAIN;
+ request.stage = stage;
+ request.route_hi = upper_32_bits(xd->route);
+ request.route_lo = lower_32_bits(xd->route);
+ memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ int ret;
+
+ ret = icm_tr_xdomain_tear_down(tb, xd, 1);
+ if (ret)
+ return ret;
+
+ usleep_range(10, 50);
+ return icm_tr_xdomain_tear_down(tb, xd, 2);
+}
+
+static void
+__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
+ bool force_rtd3)
+{
+ const struct icm_tr_event_device_connected *pkg =
+ (const struct icm_tr_event_device_connected *)hdr;
+ bool authorized, boot, dual_lane, speed_gen3;
+ enum tb_security_level security_level;
+ struct tb_switch *sw, *parent_sw;
+ struct tb_xdomain *xd;
+ u64 route;
+
+ icm_postpone_rescan(tb);
+
+ /*
+ * Currently we don't use the QoS information coming with the
+ * device connected message so simply just ignore that extra
+ * packet for now.
+ */
+ if (pkg->hdr.packet_id)
+ return;
+
+ route = get_route(pkg->route_hi, pkg->route_lo);
+ authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
+ security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
+ ICM_FLAGS_SLEVEL_SHIFT;
+ boot = pkg->link_info & ICM_LINK_INFO_BOOT;
+ dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
+ speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
+
+ if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
+ tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
+ route);
+ return;
+ }
+
+ sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
+ if (sw) {
+ /* Update the switch if it is still in the same place */
+ if (tb_route(sw) == route && !!sw->authorized == authorized) {
+ parent_sw = tb_to_switch(sw->dev.parent);
+ update_switch(parent_sw, sw, route, pkg->connection_id,
+ 0, 0, 0, boot);
+ tb_switch_put(sw);
+ return;
+ }
+
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ /* Another switch with the same address */
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ /* XDomain connection with the same address */
+ xd = tb_xdomain_find_by_route(tb, route);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
+ if (!parent_sw) {
+ tb_err(tb, "failed to find parent switch for %llx\n", route);
+ return;
+ }
+
+ pm_runtime_get_sync(&parent_sw->dev);
+
+ sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
+ if (!IS_ERR(sw)) {
+ sw->connection_id = pkg->connection_id;
+ sw->authorized = authorized;
+ sw->security_level = security_level;
+ sw->boot = boot;
+ sw->link_speed = speed_gen3 ? 20 : 10;
+ sw->link_width = dual_lane ? 2 : 1;
+ sw->rpm = force_rtd3;
+ if (!sw->rpm)
+ sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
+ sizeof(pkg->ep_name));
+
+ if (add_switch(parent_sw, sw))
+ tb_switch_put(sw);
+ }
+
+ pm_runtime_mark_last_busy(&parent_sw->dev);
+ pm_runtime_put_autosuspend(&parent_sw->dev);
+
+ tb_switch_put(parent_sw);
+}
+
+static void
+icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ __icm_tr_device_connected(tb, hdr, false);
+}
+
+static void
+icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_tr_event_device_disconnected *pkg =
+ (const struct icm_tr_event_device_disconnected *)hdr;
+ struct tb_switch *sw;
+ u64 route;
+
+ route = get_route(pkg->route_hi, pkg->route_lo);
+
+ sw = tb_switch_find_by_route(tb, route);
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
+ return;
+ }
+
+ remove_switch(sw);
+ tb_switch_put(sw);
+}
+
+static void
+icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_tr_event_xdomain_connected *pkg =
+ (const struct icm_tr_event_xdomain_connected *)hdr;
+ struct tb_xdomain *xd;
+ struct tb_switch *sw;
+ u64 route;
+
+ if (!tb->root_switch)
+ return;
+
+ route = get_route(pkg->local_route_hi, pkg->local_route_lo);
+
+ xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
+ if (xd) {
+ if (xd->route == route) {
+ update_xdomain(xd, route, 0);
+ tb_xdomain_put(xd);
+ return;
+ }
+
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /* An existing xdomain with the same address */
+ xd = tb_xdomain_find_by_route(tb, route);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+
+ /*
+ * If the user disconnected a switch during suspend and
+ * connected another host to the same port, remove the switch
+ * first.
+ */
+ sw = tb_switch_find_by_route(tb, route);
+ if (sw) {
+ remove_switch(sw);
+ tb_switch_put(sw);
+ }
+
+ sw = tb_switch_find_by_route(tb, get_parent_route(route));
+ if (!sw) {
+ tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
+ return;
+ }
+
+ add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
+ tb_switch_put(sw);
+}
+
+static void
+icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_tr_event_xdomain_disconnected *pkg =
+ (const struct icm_tr_event_xdomain_disconnected *)hdr;
+ struct tb_xdomain *xd;
+ u64 route;
+
+ route = get_route(pkg->route_hi, pkg->route_lo);
+
+ xd = tb_xdomain_find_by_route(tb, route);
+ if (xd) {
+ remove_xdomain(xd);
+ tb_xdomain_put(xd);
+ }
+}
+
+static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
+{
+ struct pci_dev *parent;
+
+ parent = pci_upstream_bridge(pdev);
+ while (parent) {
+ if (!pci_is_pcie(parent))
+ return NULL;
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
+ break;
+ parent = pci_upstream_bridge(parent);
+ }
+
+ if (!parent)
+ return NULL;
+
+ switch (parent->device) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ return parent;
+ }
+
+ return NULL;
+}
+
+static bool icm_ar_is_supported(struct tb *tb)
+{
+ struct pci_dev *upstream_port;
+ struct icm *icm = tb_priv(tb);
+
+ /*
+ * Starting from Alpine Ridge we can use ICM on Apple machines
+ * as well. We just need to reset and re-enable it first.
+ * However, only start it if explicitly asked by the user.
+ */
+ if (icm_firmware_running(tb->nhi))
+ return true;
+ if (!start_icm)
+ return false;
+
+ /*
+ * Find the upstream PCIe port in case we need to do reset
+ * through its vendor specific registers.
+ */
+ upstream_port = get_upstream_port(tb->nhi->pdev);
+ if (upstream_port) {
+ int cap;
+
+ cap = pci_find_ext_capability(upstream_port,
+ PCI_EXT_CAP_ID_VNDR);
+ if (cap > 0) {
+ icm->upstream_port = upstream_port;
+ icm->vnd_cap = cap;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int icm_ar_cio_reset(struct tb *tb)
+{
+ return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
+}
+
+static int icm_ar_get_mode(struct tb *tb)
+{
+ struct tb_nhi *nhi = tb->nhi;
+ int retries = 60;
+ u32 val;
+
+ do {
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ if (val & REG_FW_STS_NVM_AUTH_DONE)
+ break;
+ msleep(50);
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
+ return -ENODEV;
+ }
+
+ return nhi_mailbox_mode(nhi);
+}
+
+static int
+icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm_ar_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (security_level)
+ *security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
+ if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
+ *nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
+ ICM_AR_INFO_BOOT_ACL_SHIFT;
+ if (rpm)
+ *rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
+
+ return 0;
+}
+
+static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
+{
+ struct icm_ar_pkg_get_route_response reply;
+ struct icm_ar_pkg_get_route request = {
+ .hdr = { .code = ICM_GET_ROUTE },
+ .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ *route = get_route(reply.route_hi, reply.route_lo);
+ return 0;
+}
+
+static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
+{
+ struct icm_ar_pkg_preboot_acl_response reply;
+ struct icm_ar_pkg_preboot_acl request = {
+ .hdr = { .code = ICM_PREBOOT_ACL },
+ };
+ int ret, i;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ for (i = 0; i < nuuids; i++) {
+ u32 *uuid = (u32 *)&uuids[i];
+
+ uuid[0] = reply.acl[i].uuid_lo;
+ uuid[1] = reply.acl[i].uuid_hi;
+
+ if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
+ /* Map empty entries to null UUID */
+ uuid[0] = 0;
+ uuid[1] = 0;
+ } else if (uuid[0] != 0 || uuid[1] != 0) {
+ /* Upper two DWs are always one's */
+ uuid[2] = 0xffffffff;
+ uuid[3] = 0xffffffff;
+ }
+ }
+
+ return ret;
+}
+
+static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
+ size_t nuuids)
+{
+ struct icm_ar_pkg_preboot_acl_response reply;
+ struct icm_ar_pkg_preboot_acl request = {
+ .hdr = {
+ .code = ICM_PREBOOT_ACL,
+ .flags = ICM_FLAGS_WRITE,
+ },
+ };
+ int ret, i;
+
+ for (i = 0; i < nuuids; i++) {
+ const u32 *uuid = (const u32 *)&uuids[i];
+
+ if (uuid_is_null(&uuids[i])) {
+ /*
+ * Map null UUID to the empty (all one) entries
+ * for ICM.
+ */
+ request.acl[i].uuid_lo = 0xffffffff;
+ request.acl[i].uuid_hi = 0xffffffff;
+ } else {
+ /* Two high DWs need to be set to all one */
+ if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
+ return -EINVAL;
+
+ request.acl[i].uuid_lo = uuid[0];
+ request.acl[i].uuid_hi = uuid[1];
+ }
+ }
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, ICM_TIMEOUT);
+ if (ret)
+ return ret;
+
+ if (reply.hdr.flags & ICM_FLAGS_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+static int
+icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm_tr_pkg_driver_ready_response reply;
+ struct icm_pkg_driver_ready request = {
+ .hdr.code = ICM_DRIVER_READY,
+ };
+ int ret;
+
+ memset(&reply, 0, sizeof(reply));
+ ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
+ 1, ICM_RETRIES, 20000);
+ if (ret)
+ return ret;
+
+ /* Ice Lake always supports RTD3 */
+ if (rpm)
+ *rpm = true;
+
+ return 0;
+}
+
+static void icm_icl_set_uuid(struct tb *tb)
+{
+ struct tb_nhi *nhi = tb->nhi;
+ u32 uuid[4];
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
+ pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
+ uuid[2] = 0xffffffff;
+ uuid[3] = 0xffffffff;
+
+ tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+}
+
+static void
+icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ __icm_tr_device_connected(tb, hdr, true);
+}
+
+static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
+{
+ const struct icm_icl_event_rtd3_veto *pkg =
+ (const struct icm_icl_event_rtd3_veto *)hdr;
+
+ tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
+
+ if (pkg->veto_reason)
+ icm_veto_begin(tb);
+ else
+ icm_veto_end(tb);
+}
+
+static bool icm_tgl_is_supported(struct tb *tb)
+{
+ u32 val;
+
+ /*
+ * If the firmware is not running use software CM. This platform
+ * should fully support both.
+ */
+ val = ioread32(tb->nhi->iobase + REG_FW_STS);
+ return !!(val & REG_FW_STS_NVM_AUTH_DONE);
+}
+
+static void icm_handle_notification(struct work_struct *work)
+{
+ struct icm_notification *n = container_of(work, typeof(*n), work);
+ struct tb *tb = n->tb;
+ struct icm *icm = tb_priv(tb);
+
+ mutex_lock(&tb->lock);
+
+ /*
+ * When the domain is stopped we flush its workqueue but before
+ * that the root switch is removed. In that case we should treat
+ * the queued events as being canceled.
+ */
+ if (tb->root_switch) {
+ switch (n->pkg->code) {
+ case ICM_EVENT_DEVICE_CONNECTED:
+ icm->device_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_DEVICE_DISCONNECTED:
+ icm->device_disconnected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_CONNECTED:
+ icm->xdomain_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_DISCONNECTED:
+ icm->xdomain_disconnected(tb, n->pkg);
+ break;
+ case ICM_EVENT_RTD3_VETO:
+ icm->rtd3_veto(tb, n->pkg);
+ break;
+ }
+ }
+
+ mutex_unlock(&tb->lock);
+
+ kfree(n->pkg);
+ kfree(n);
+}
+
+static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ struct icm_notification *n;
+
+ n = kmalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return;
+
+ INIT_WORK(&n->work, icm_handle_notification);
+ n->pkg = kmemdup(buf, size, GFP_KERNEL);
+ n->tb = tb;
+
+ queue_work(tb->wq, &n->work);
+}
+
+static int
+__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
+ size_t *nboot_acl, bool *rpm)
+{
+ struct icm *icm = tb_priv(tb);
+ unsigned int retries = 50;
+ int ret;
+
+ ret = icm->driver_ready(tb, security_level, nboot_acl, rpm);
+ if (ret) {
+ tb_err(tb, "failed to send driver ready to ICM\n");
+ return ret;
+ }
+
+ /*
+ * Hold on here until the switch config space is accessible so
+ * that we can read root switch config successfully.
+ */
+ do {
+ struct tb_cfg_result res;
+ u32 tmp;
+
+ res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
+ 0, 1, 100);
+ if (!res.err)
+ return 0;
+
+ msleep(50);
+ } while (--retries);
+
+ tb_err(tb, "failed to read root switch config space, giving up\n");
+ return -ETIMEDOUT;
+}
+
+static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
+{
+ struct icm *icm = tb_priv(tb);
+ u32 val;
+
+ if (!icm->upstream_port)
+ return -ENODEV;
+
+ /* Put ARC to wait for CIO reset event to happen */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ val |= REG_FW_STS_CIO_RESET_REQ;
+ iowrite32(val, nhi->iobase + REG_FW_STS);
+
+ /* Re-start ARC */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ val |= REG_FW_STS_ICM_EN_INVERT;
+ val |= REG_FW_STS_ICM_EN_CPU;
+ iowrite32(val, nhi->iobase + REG_FW_STS);
+
+ /* Trigger CIO reset now */
+ return icm->cio_reset(tb);
+}
+
+static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
+{
+ unsigned int retries = 10;
+ int ret;
+ u32 val;
+
+ /* Check if the ICM firmware is already running */
+ if (icm_firmware_running(nhi))
+ return 0;
+
+ dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
+
+ ret = icm_firmware_reset(tb, nhi);
+ if (ret)
+ return ret;
+
+ /* Wait until the ICM firmware tells us it is up and running */
+ do {
+ /* Check that the ICM firmware is running */
+ val = ioread32(nhi->iobase + REG_FW_STS);
+ if (val & REG_FW_STS_NVM_AUTH_DONE)
+ return 0;
+
+ msleep(300);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int icm_reset_phy_port(struct tb *tb, int phy_port)
+{
+ struct icm *icm = tb_priv(tb);
+ u32 state0, state1;
+ int port0, port1;
+ u32 val0, val1;
+ int ret;
+
+ if (!icm->upstream_port)
+ return 0;
+
+ if (phy_port) {
+ port0 = 3;
+ port1 = 4;
+ } else {
+ port0 = 1;
+ port1 = 2;
+ }
+
+ /*
+ * Read link status of both null ports belonging to a single
+ * physical port.
+ */
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+ if (ret)
+ return ret;
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+ if (ret)
+ return ret;
+
+ state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
+ state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+ state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
+ state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
+
+ /* If they are both up we need to reset them now */
+ if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
+ return 0;
+
+ val0 |= PHY_PORT_CS1_LINK_DISABLE;
+ ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+ if (ret)
+ return ret;
+
+ val1 |= PHY_PORT_CS1_LINK_DISABLE;
+ ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+ if (ret)
+ return ret;
+
+ /* Wait a bit and then re-enable both ports */
+ usleep_range(10, 100);
+
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
+ if (ret)
+ return ret;
+ ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
+ if (ret)
+ return ret;
+
+ val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
+ ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
+ if (ret)
+ return ret;
+
+ val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
+ return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
+}
+
+static int icm_firmware_init(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = icm_firmware_start(tb, nhi);
+ if (ret) {
+ dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
+ return ret;
+ }
+
+ if (icm->get_mode) {
+ ret = icm->get_mode(tb);
+
+ switch (ret) {
+ case NHI_FW_SAFE_MODE:
+ icm->safe_mode = true;
+ break;
+
+ case NHI_FW_CM_MODE:
+ /* Ask ICM to accept all Thunderbolt devices */
+ nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
+ break;
+
+ default:
+ if (ret < 0)
+ return ret;
+
+ tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
+ return -ENODEV;
+ }
+ }
+
+ /*
+ * Reset both physical ports if there is anything connected to
+ * them already.
+ */
+ ret = icm_reset_phy_port(tb, 0);
+ if (ret)
+ dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
+ ret = icm_reset_phy_port(tb, 1);
+ if (ret)
+ dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
+
+ return 0;
+}
+
+static int icm_driver_ready(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+ int ret;
+
+ ret = icm_firmware_init(tb);
+ if (ret)
+ return ret;
+
+ if (icm->safe_mode) {
+ tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
+ tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
+ tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
+ return 0;
+ }
+
+ ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl,
+ &icm->rpm);
+ if (ret)
+ return ret;
+
+ /*
+ * Make sure the number of supported preboot ACL matches what we
+ * expect or disable the whole feature.
+ */
+ if (tb->nboot_acl > icm->max_boot_acl)
+ tb->nboot_acl = 0;
+
+ return 0;
+}
+
+static int icm_suspend(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (icm->save_devices)
+ icm->save_devices(tb);
+
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+ return 0;
+}
+
+/*
+ * Mark all switches (except root switch) below this one unplugged. ICM
+ * firmware will send us an updated list of switches after we have send
+ * it driver ready command. If a switch is not in that list it will be
+ * removed when we perform rescan.
+ */
+static void icm_unplug_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (tb_route(sw))
+ sw->is_unplugged = true;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->xdomain)
+ port->xdomain->is_unplugged = true;
+ else if (tb_port_has_remote(port))
+ icm_unplug_children(port->remote->sw);
+ }
+}
+
+static int complete_rpm(struct device *dev, void *data)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ if (sw)
+ complete(&sw->rpm_complete);
+ return 0;
+}
+
+static void remove_unplugged_switch(struct tb_switch *sw)
+{
+ struct device *parent = get_device(sw->dev.parent);
+
+ pm_runtime_get_sync(parent);
+
+ /*
+ * Signal this and switches below for rpm_complete because
+ * tb_switch_remove() calls pm_runtime_get_sync() that then waits
+ * for it.
+ */
+ complete_rpm(&sw->dev, NULL);
+ bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
+ tb_switch_remove(sw);
+
+ pm_runtime_mark_last_busy(parent);
+ pm_runtime_put_autosuspend(parent);
+
+ put_device(parent);
+}
+
+static void icm_free_unplugged_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->xdomain && port->xdomain->is_unplugged) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ } else if (tb_port_has_remote(port)) {
+ if (port->remote->sw->is_unplugged) {
+ remove_unplugged_switch(port->remote->sw);
+ port->remote = NULL;
+ } else {
+ icm_free_unplugged_children(port->remote->sw);
+ }
+ }
+ }
+}
+
+static void icm_rescan_work(struct work_struct *work)
+{
+ struct icm *icm = container_of(work, struct icm, rescan_work.work);
+ struct tb *tb = icm_to_tb(icm);
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch)
+ icm_free_unplugged_children(tb->root_switch);
+ mutex_unlock(&tb->lock);
+}
+
+static void icm_complete(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ if (tb->nhi->going_away)
+ return;
+
+ /*
+ * If RTD3 was vetoed before we entered system suspend allow it
+ * again now before driver ready is sent. Firmware sends a new RTD3
+ * veto if it is still the case after we have sent it driver ready
+ * command.
+ */
+ icm_veto_end(tb);
+ icm_unplug_children(tb->root_switch);
+
+ /*
+ * Now all existing children should be resumed, start events
+ * from ICM to get updated status.
+ */
+ __icm_driver_ready(tb, NULL, NULL, NULL);
+
+ /*
+ * We do not get notifications of devices that have been
+ * unplugged during suspend so schedule rescan to clean them up
+ * if any.
+ */
+ queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
+}
+
+static int icm_runtime_suspend(struct tb *tb)
+{
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+ return 0;
+}
+
+static int icm_runtime_suspend_switch(struct tb_switch *sw)
+{
+ if (tb_route(sw))
+ reinit_completion(&sw->rpm_complete);
+ return 0;
+}
+
+static int icm_runtime_resume_switch(struct tb_switch *sw)
+{
+ if (tb_route(sw)) {
+ if (!wait_for_completion_timeout(&sw->rpm_complete,
+ msecs_to_jiffies(500))) {
+ dev_dbg(&sw->dev, "runtime resuming timed out\n");
+ }
+ }
+ return 0;
+}
+
+static int icm_runtime_resume(struct tb *tb)
+{
+ /*
+ * We can reuse the same resume functionality than with system
+ * suspend.
+ */
+ icm_complete(tb);
+ return 0;
+}
+
+static int icm_start(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+ int ret;
+
+ if (icm->safe_mode)
+ tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
+ else
+ tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+ if (IS_ERR(tb->root_switch))
+ return PTR_ERR(tb->root_switch);
+
+ tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
+ tb->root_switch->rpm = icm->rpm;
+
+ if (icm->set_uuid)
+ icm->set_uuid(tb);
+
+ ret = tb_switch_add(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ tb->root_switch = NULL;
+ }
+
+ return ret;
+}
+
+static void icm_stop(struct tb *tb)
+{
+ struct icm *icm = tb_priv(tb);
+
+ cancel_delayed_work(&icm->rescan_work);
+ tb_switch_remove(tb->root_switch);
+ tb->root_switch = NULL;
+ nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
+}
+
+static int icm_disconnect_pcie_paths(struct tb *tb)
+{
+ return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
+}
+
+/* Falcon Ridge */
+static const struct tb_cm_ops icm_fr_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .suspend = icm_suspend,
+ .complete = icm_complete,
+ .handle_event = icm_handle_event,
+ .approve_switch = icm_fr_approve_switch,
+ .add_switch_key = icm_fr_add_switch_key,
+ .challenge_switch_key = icm_fr_challenge_switch_key,
+ .disconnect_pcie_paths = icm_disconnect_pcie_paths,
+ .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
+};
+
+/* Alpine Ridge */
+static const struct tb_cm_ops icm_ar_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .suspend = icm_suspend,
+ .complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
+ .runtime_suspend_switch = icm_runtime_suspend_switch,
+ .runtime_resume_switch = icm_runtime_resume_switch,
+ .handle_event = icm_handle_event,
+ .get_boot_acl = icm_ar_get_boot_acl,
+ .set_boot_acl = icm_ar_set_boot_acl,
+ .approve_switch = icm_fr_approve_switch,
+ .add_switch_key = icm_fr_add_switch_key,
+ .challenge_switch_key = icm_fr_challenge_switch_key,
+ .disconnect_pcie_paths = icm_disconnect_pcie_paths,
+ .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
+};
+
+/* Titan Ridge */
+static const struct tb_cm_ops icm_tr_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .suspend = icm_suspend,
+ .complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
+ .runtime_suspend_switch = icm_runtime_suspend_switch,
+ .runtime_resume_switch = icm_runtime_resume_switch,
+ .handle_event = icm_handle_event,
+ .get_boot_acl = icm_ar_get_boot_acl,
+ .set_boot_acl = icm_ar_set_boot_acl,
+ .approve_switch = icm_tr_approve_switch,
+ .add_switch_key = icm_tr_add_switch_key,
+ .challenge_switch_key = icm_tr_challenge_switch_key,
+ .disconnect_pcie_paths = icm_disconnect_pcie_paths,
+ .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
+/* Ice Lake */
+static const struct tb_cm_ops icm_icl_ops = {
+ .driver_ready = icm_driver_ready,
+ .start = icm_start,
+ .stop = icm_stop,
+ .complete = icm_complete,
+ .runtime_suspend = icm_runtime_suspend,
+ .runtime_resume = icm_runtime_resume,
+ .handle_event = icm_handle_event,
+ .approve_xdomain_paths = icm_tr_approve_xdomain_paths,
+ .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
+};
+
+struct tb *icm_probe(struct tb_nhi *nhi)
+{
+ struct icm *icm;
+ struct tb *tb;
+
+ tb = tb_domain_alloc(nhi, sizeof(struct icm));
+ if (!tb)
+ return NULL;
+
+ icm = tb_priv(tb);
+ INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
+ mutex_init(&icm->request_lock);
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ icm->can_upgrade_nvm = true;
+ icm->is_supported = icm_fr_is_supported;
+ icm->get_route = icm_fr_get_route;
+ icm->save_devices = icm_fr_save_devices;
+ icm->driver_ready = icm_fr_driver_ready;
+ icm->device_connected = icm_fr_device_connected;
+ icm->device_disconnected = icm_fr_device_disconnected;
+ icm->xdomain_connected = icm_fr_xdomain_connected;
+ icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
+ tb->cm_ops = &icm_fr_ops;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
+ icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+ /*
+ * NVM upgrade has not been tested on Apple systems and
+ * they don't provide images publicly either. To be on
+ * the safe side prevent root switch NVM upgrade on Macs
+ * for now.
+ */
+ icm->can_upgrade_nvm = !x86_apple_machine;
+ icm->is_supported = icm_ar_is_supported;
+ icm->cio_reset = icm_ar_cio_reset;
+ icm->get_mode = icm_ar_get_mode;
+ icm->get_route = icm_ar_get_route;
+ icm->save_devices = icm_fr_save_devices;
+ icm->driver_ready = icm_ar_driver_ready;
+ icm->device_connected = icm_fr_device_connected;
+ icm->device_disconnected = icm_fr_device_disconnected;
+ icm->xdomain_connected = icm_fr_xdomain_connected;
+ icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
+ tb->cm_ops = &icm_ar_ops;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
+ icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
+ icm->can_upgrade_nvm = !x86_apple_machine;
+ icm->is_supported = icm_ar_is_supported;
+ icm->cio_reset = icm_tr_cio_reset;
+ icm->get_mode = icm_ar_get_mode;
+ icm->driver_ready = icm_tr_driver_ready;
+ icm->device_connected = icm_tr_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ tb->cm_ops = &icm_tr_ops;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ icm->is_supported = icm_fr_is_supported;
+ icm->driver_ready = icm_icl_driver_ready;
+ icm->set_uuid = icm_icl_set_uuid;
+ icm->device_connected = icm_icl_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ icm->rtd3_veto = icm_icl_rtd3_veto;
+ tb->cm_ops = &icm_icl_ops;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+ icm->is_supported = icm_tgl_is_supported;
+ icm->driver_ready = icm_icl_driver_ready;
+ icm->set_uuid = icm_icl_set_uuid;
+ icm->device_connected = icm_icl_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ icm->rtd3_veto = icm_icl_rtd3_veto;
+ tb->cm_ops = &icm_icl_ops;
+ break;
+
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
+ icm->is_supported = icm_tgl_is_supported;
+ icm->get_mode = icm_ar_get_mode;
+ icm->driver_ready = icm_tr_driver_ready;
+ icm->device_connected = icm_tr_device_connected;
+ icm->device_disconnected = icm_tr_device_disconnected;
+ icm->xdomain_connected = icm_tr_xdomain_connected;
+ icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
+ tb->cm_ops = &icm_tr_ops;
+ break;
+ }
+
+ if (!icm->is_supported || !icm->is_supported(tb)) {
+ dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
+ tb_domain_put(tb);
+ return NULL;
+ }
+
+ return tb;
+}
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
new file mode 100644
index 000000000..41e6c738f
--- /dev/null
+++ b/drivers/thunderbolt/lc.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt link controller support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include "tb.h"
+
+/**
+ * tb_lc_read_uuid() - Read switch UUID from link controller common register
+ * @sw: Switch whose UUID is read
+ * @uuid: UUID is placed here
+ */
+int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
+{
+ if (!sw->cap_lc)
+ return -EINVAL;
+ return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
+}
+
+static int read_lc_desc(struct tb_switch *sw, u32 *desc)
+{
+ if (!sw->cap_lc)
+ return -EINVAL;
+ return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
+}
+
+static int find_port_lc_cap(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int start, phys, ret, size;
+ u32 desc;
+
+ ret = read_lc_desc(sw, &desc);
+ if (ret)
+ return ret;
+
+ /* Start of port LC registers */
+ start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+ size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+ phys = tb_phy_port_from_link(port->port);
+
+ return sw->cap_lc + start + phys * size;
+}
+
+static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
+{
+ bool upstream = tb_is_upstream_port(port);
+ struct tb_switch *sw = port->sw;
+ u32 ctrl, lane;
+ int cap, ret;
+
+ if (sw->generation < 2)
+ return 0;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ /* Resolve correct lane */
+ if (port->port % 2)
+ lane = TB_LC_SX_CTRL_L1C;
+ else
+ lane = TB_LC_SX_CTRL_L2C;
+
+ if (configured) {
+ ctrl |= lane;
+ if (upstream)
+ ctrl |= TB_LC_SX_CTRL_UPSTREAM;
+ } else {
+ ctrl &= ~lane;
+ if (upstream)
+ ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
+ }
+
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_configure_port() - Let LC know about configured port
+ * @port: Port that is set as configured
+ *
+ * Sets the port configured for power management purposes.
+ */
+int tb_lc_configure_port(struct tb_port *port)
+{
+ return tb_lc_set_port_configured(port, true);
+}
+
+/**
+ * tb_lc_unconfigure_port() - Let LC know about unconfigured port
+ * @port: Port that is set as configured
+ *
+ * Sets the port unconfigured for power management purposes.
+ */
+void tb_lc_unconfigure_port(struct tb_port *port)
+{
+ tb_lc_set_port_configured(port, false);
+}
+
+static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
+{
+ struct tb_switch *sw = port->sw;
+ u32 ctrl, lane;
+ int cap, ret;
+
+ if (sw->generation < 2)
+ return 0;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ /* Resolve correct lane */
+ if (port->port % 2)
+ lane = TB_LC_SX_CTRL_L1D;
+ else
+ lane = TB_LC_SX_CTRL_L2D;
+
+ if (configure)
+ ctrl |= lane;
+ else
+ ctrl &= ~lane;
+
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
+ * @port: Switch downstream port connected to another host
+ *
+ * Sets the lane configured for XDomain accordingly so that the LC knows
+ * about this. Returns %0 in success and negative errno in failure.
+ */
+int tb_lc_configure_xdomain(struct tb_port *port)
+{
+ return tb_lc_set_xdomain_configured(port, true);
+}
+
+/**
+ * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
+ * @port: Switch downstream port that was connected to another host
+ *
+ * Unsets the lane XDomain configuration.
+ */
+void tb_lc_unconfigure_xdomain(struct tb_port *port)
+{
+ tb_lc_set_xdomain_configured(port, false);
+}
+
+static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
+ unsigned int flags)
+{
+ u32 ctrl;
+ int ret;
+
+ /*
+ * Enable wake on PCIe and USB4 (wake coming from another
+ * router).
+ */
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
+ offset + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP |
+ TB_LC_SX_CTRL_WOU4);
+
+ if (flags & TB_WAKE_ON_CONNECT)
+ ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
+ if (flags & TB_WAKE_ON_USB4)
+ ctrl |= TB_LC_SX_CTRL_WOU4;
+ if (flags & TB_WAKE_ON_PCIE)
+ ctrl |= TB_LC_SX_CTRL_WOP;
+
+ return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
+}
+
+/**
+ * tb_lc_set_wake() - Enable/disable wake
+ * @sw: Switch whose wakes to configure
+ * @flags: Wakeup flags (%0 to disable)
+ *
+ * For each LC sets wake bits accordingly.
+ */
+int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+ int start, size, nlc, ret, i;
+ u32 desc;
+
+ if (sw->generation < 2)
+ return 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ ret = read_lc_desc(sw, &desc);
+ if (ret)
+ return ret;
+
+ /* Figure out number of link controllers */
+ nlc = desc & TB_LC_DESC_NLC_MASK;
+ start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+ size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+
+ /* For each link controller set sleep bit */
+ for (i = 0; i < nlc; i++) {
+ unsigned int offset = sw->cap_lc + start + i * size;
+
+ ret = tb_lc_set_wake_one(sw, offset, flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
+ * @sw: Switch to set sleep
+ *
+ * Let the switch link controllers know that the switch is going to
+ * sleep.
+ */
+int tb_lc_set_sleep(struct tb_switch *sw)
+{
+ int start, size, nlc, ret, i;
+ u32 desc;
+
+ if (sw->generation < 2)
+ return 0;
+
+ ret = read_lc_desc(sw, &desc);
+ if (ret)
+ return ret;
+
+ /* Figure out number of link controllers */
+ nlc = desc & TB_LC_DESC_NLC_MASK;
+ start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
+ size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
+
+ /* For each link controller set sleep bit */
+ for (i = 0; i < nlc; i++) {
+ unsigned int offset = sw->cap_lc + start + i * size;
+ u32 ctrl;
+
+ ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
+ offset + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+
+ ctrl |= TB_LC_SX_CTRL_SLP;
+ ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
+ offset + TB_LC_SX_CTRL, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
+ * @sw: Switch to check
+ *
+ * Checks whether conditions for lane bonding from parent to @sw are
+ * possible.
+ */
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int cap, ret;
+ u32 val;
+
+ if (sw->generation < 2)
+ return false;
+
+ up = tb_upstream_port(sw);
+ cap = find_port_lc_cap(up);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_PORT_ATTR_BE);
+}
+
+static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
+ struct tb_port *in)
+{
+ struct tb_port *port;
+
+ /* The first DP IN port is sink 0 and second is sink 1 */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_is_dpin(port))
+ return in != port;
+ }
+
+ return -EINVAL;
+}
+
+static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
+{
+ u32 val, alloc;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Sink is available for CM/SW to use if the allocation valie is
+ * either 0 or 1.
+ */
+ if (!sink) {
+ alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
+ return 0;
+ } else {
+ alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
+ * @sw: Switch whose DP sink is queried
+ * @in: DP IN port to check
+ *
+ * Queries through LC SNK_ALLOCATION registers whether DP sink is available
+ * for the given DP IN port or not.
+ */
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
+{
+ int sink;
+
+ /*
+ * For older generations sink is always available as there is no
+ * allocation mechanism.
+ */
+ if (sw->generation < 3)
+ return true;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return false;
+
+ return !tb_lc_dp_sink_available(sw, sink);
+}
+
+/**
+ * tb_lc_dp_sink_alloc() - Allocate DP sink
+ * @sw: Switch whose DP sink is allocated
+ * @in: DP IN port the DP sink is allocated for
+ *
+ * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
+ * resource is available and allocation is successful returns %0. In all
+ * other cases returs negative errno. In particular %-EBUSY is returned if
+ * the resource was not available.
+ */
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink) {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
+ } else {
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+ val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
+ TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
+ }
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d allocated\n", sink);
+ return 0;
+}
+
+/**
+ * tb_lc_dp_sink_dealloc() - De-allocate DP sink
+ * @sw: Switch whose DP sink is de-allocated
+ * @in: DP IN port whose DP sink is de-allocated
+ *
+ * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
+ */
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret, sink;
+ u32 val;
+
+ if (sw->generation < 3)
+ return 0;
+
+ sink = tb_lc_dp_sink_from_port(sw, in);
+ if (sink < 0)
+ return sink;
+
+ /* Needs to be owned by CM/SW */
+ ret = tb_lc_dp_sink_available(sw, sink);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ if (!sink)
+ val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
+ else
+ val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "sink %d de-allocated\n", sink);
+ return 0;
+}
+
+/**
+ * tb_lc_force_power() - Forces LC to be powered on
+ * @sw: Thunderbolt switch
+ *
+ * This is useful to let authentication cycle pass even without
+ * a Thunderbolt link present.
+ */
+int tb_lc_force_power(struct tb_switch *sw)
+{
+ u32 in = 0xffff;
+
+ return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
+}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
new file mode 100644
index 000000000..fd1b59397
--- /dev/null
+++ b/drivers/thunderbolt/nhi.c
@@ -0,0 +1,1391 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Thunderbolt driver - NHI driver
+ *
+ * The NHI (native host interface) is the pci device that allows us to send and
+ * receive frames from the thunderbolt bus.
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/property.h>
+#include <linux/platform_data/x86/apple.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
+
+#define RING_FIRST_USABLE_HOPID 1
+
+/*
+ * Minimal number of vectors when we use MSI-X. Two for control channel
+ * Rx/Tx and the rest four are for cross domain DMA paths.
+ */
+#define MSIX_MIN_VECS 6
+#define MSIX_MAX_VECS 16
+
+#define NHI_MAILBOX_TIMEOUT 500 /* ms */
+
+static int ring_interrupt_index(const struct tb_ring *ring)
+{
+ int bit = ring->hop;
+ if (!ring->is_tx)
+ bit += ring->nhi->hop_count;
+ return bit;
+}
+
+/**
+ * ring_interrupt_active() - activate/deactivate interrupts for a single ring
+ *
+ * ring->nhi->lock must be held.
+ */
+static void ring_interrupt_active(struct tb_ring *ring, bool active)
+{
+ int reg = REG_RING_INTERRUPT_BASE +
+ ring_interrupt_index(ring) / 32 * 4;
+ int bit = ring_interrupt_index(ring) & 31;
+ int mask = 1 << bit;
+ u32 old, new;
+
+ if (ring->irq > 0) {
+ u32 step, shift, ivr, misc;
+ void __iomem *ivr_base;
+ int index;
+
+ if (ring->is_tx)
+ index = ring->hop;
+ else
+ index = ring->hop + ring->nhi->hop_count;
+
+ /*
+ * Ask the hardware to clear interrupt status bits automatically
+ * since we already know which interrupt was triggered.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
+ misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
+ iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
+ }
+
+ ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
+ step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+ shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
+ ivr = ioread32(ivr_base + step);
+ ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
+ if (active)
+ ivr |= ring->vector << shift;
+ iowrite32(ivr, ivr_base + step);
+ }
+
+ old = ioread32(ring->nhi->iobase + reg);
+ if (active)
+ new = old | mask;
+ else
+ new = old & ~mask;
+
+ dev_dbg(&ring->nhi->pdev->dev,
+ "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+ active ? "enabling" : "disabling", reg, bit, old, new);
+
+ if (new == old)
+ dev_WARN(&ring->nhi->pdev->dev,
+ "interrupt for %s %d is already %s\n",
+ RING_TYPE(ring), ring->hop,
+ active ? "enabled" : "disabled");
+ iowrite32(new, ring->nhi->iobase + reg);
+}
+
+/**
+ * nhi_disable_interrupts() - disable interrupts for all rings
+ *
+ * Use only during init and shutdown.
+ */
+static void nhi_disable_interrupts(struct tb_nhi *nhi)
+{
+ int i = 0;
+ /* disable interrupts */
+ for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
+ iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+
+ /* clear interrupt status bits */
+ for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
+ ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+}
+
+/* ring helper methods */
+
+static void __iomem *ring_desc_base(struct tb_ring *ring)
+{
+ void __iomem *io = ring->nhi->iobase;
+ io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
+ io += ring->hop * 16;
+ return io;
+}
+
+static void __iomem *ring_options_base(struct tb_ring *ring)
+{
+ void __iomem *io = ring->nhi->iobase;
+ io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
+ io += ring->hop * 32;
+ return io;
+}
+
+static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
+{
+ /*
+ * The other 16-bits in the register is read-only and writes to it
+ * are ignored by the hardware so we can save one ioread32() by
+ * filling the read-only bits with zeroes.
+ */
+ iowrite32(cons, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
+{
+ /* See ring_iowrite_cons() above for explanation */
+ iowrite32(prod << 16, ring_desc_base(ring) + 8);
+}
+
+static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
+{
+ iowrite32(value, ring_desc_base(ring) + offset);
+}
+
+static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
+{
+ iowrite32(value, ring_desc_base(ring) + offset);
+ iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
+}
+
+static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
+{
+ iowrite32(value, ring_options_base(ring) + offset);
+}
+
+static bool ring_full(struct tb_ring *ring)
+{
+ return ((ring->head + 1) % ring->size) == ring->tail;
+}
+
+static bool ring_empty(struct tb_ring *ring)
+{
+ return ring->head == ring->tail;
+}
+
+/**
+ * ring_write_descriptors() - post frames from ring->queue to the controller
+ *
+ * ring->lock is held.
+ */
+static void ring_write_descriptors(struct tb_ring *ring)
+{
+ struct ring_frame *frame, *n;
+ struct ring_desc *descriptor;
+ list_for_each_entry_safe(frame, n, &ring->queue, list) {
+ if (ring_full(ring))
+ break;
+ list_move_tail(&frame->list, &ring->in_flight);
+ descriptor = &ring->descriptors[ring->head];
+ descriptor->phys = frame->buffer_phy;
+ descriptor->time = 0;
+ descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
+ if (ring->is_tx) {
+ descriptor->length = frame->size;
+ descriptor->eof = frame->eof;
+ descriptor->sof = frame->sof;
+ }
+ ring->head = (ring->head + 1) % ring->size;
+ if (ring->is_tx)
+ ring_iowrite_prod(ring, ring->head);
+ else
+ ring_iowrite_cons(ring, ring->head);
+ }
+}
+
+/**
+ * ring_work() - progress completed frames
+ *
+ * If the ring is shutting down then all frames are marked as canceled and
+ * their callbacks are invoked.
+ *
+ * Otherwise we collect all completed frame from the ring buffer, write new
+ * frame to the ring buffer and invoke the callbacks for the completed frames.
+ */
+static void ring_work(struct work_struct *work)
+{
+ struct tb_ring *ring = container_of(work, typeof(*ring), work);
+ struct ring_frame *frame;
+ bool canceled = false;
+ unsigned long flags;
+ LIST_HEAD(done);
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (!ring->running) {
+ /* Move all frames to done and mark them as canceled. */
+ list_splice_tail_init(&ring->in_flight, &done);
+ list_splice_tail_init(&ring->queue, &done);
+ canceled = true;
+ goto invoke_callback;
+ }
+
+ while (!ring_empty(ring)) {
+ if (!(ring->descriptors[ring->tail].flags
+ & RING_DESC_COMPLETED))
+ break;
+ frame = list_first_entry(&ring->in_flight, typeof(*frame),
+ list);
+ list_move_tail(&frame->list, &done);
+ if (!ring->is_tx) {
+ frame->size = ring->descriptors[ring->tail].length;
+ frame->eof = ring->descriptors[ring->tail].eof;
+ frame->sof = ring->descriptors[ring->tail].sof;
+ frame->flags = ring->descriptors[ring->tail].flags;
+ }
+ ring->tail = (ring->tail + 1) % ring->size;
+ }
+ ring_write_descriptors(ring);
+
+invoke_callback:
+ /* allow callbacks to schedule new work */
+ spin_unlock_irqrestore(&ring->lock, flags);
+ while (!list_empty(&done)) {
+ frame = list_first_entry(&done, typeof(*frame), list);
+ /*
+ * The callback may reenqueue or delete frame.
+ * Do not hold on to it.
+ */
+ list_del_init(&frame->list);
+ if (frame->callback)
+ frame->callback(ring, frame, canceled);
+ }
+}
+
+int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->running) {
+ list_add_tail(&frame->list, &ring->queue);
+ ring_write_descriptors(ring);
+ } else {
+ ret = -ESHUTDOWN;
+ }
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
+
+/**
+ * tb_ring_poll() - Poll one completed frame from the ring
+ * @ring: Ring to poll
+ *
+ * This function can be called when @start_poll callback of the @ring
+ * has been called. It will read one completed frame from the ring and
+ * return it to the caller. Returns %NULL if there is no more completed
+ * frames.
+ */
+struct ring_frame *tb_ring_poll(struct tb_ring *ring)
+{
+ struct ring_frame *frame = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (!ring->running)
+ goto unlock;
+ if (ring_empty(ring))
+ goto unlock;
+
+ if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
+ frame = list_first_entry(&ring->in_flight, typeof(*frame),
+ list);
+ list_del_init(&frame->list);
+
+ if (!ring->is_tx) {
+ frame->size = ring->descriptors[ring->tail].length;
+ frame->eof = ring->descriptors[ring->tail].eof;
+ frame->sof = ring->descriptors[ring->tail].sof;
+ frame->flags = ring->descriptors[ring->tail].flags;
+ }
+
+ ring->tail = (ring->tail + 1) % ring->size;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return frame;
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll);
+
+static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
+{
+ int idx = ring_interrupt_index(ring);
+ int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
+ int bit = idx % 32;
+ u32 val;
+
+ val = ioread32(ring->nhi->iobase + reg);
+ if (mask)
+ val &= ~BIT(bit);
+ else
+ val |= BIT(bit);
+ iowrite32(val, ring->nhi->iobase + reg);
+}
+
+/* Both @nhi->lock and @ring->lock should be held */
+static void __ring_interrupt(struct tb_ring *ring)
+{
+ if (!ring->running)
+ return;
+
+ if (ring->start_poll) {
+ __ring_interrupt_mask(ring, true);
+ ring->start_poll(ring->poll_data);
+ } else {
+ schedule_work(&ring->work);
+ }
+}
+
+/**
+ * tb_ring_poll_complete() - Re-start interrupt for the ring
+ * @ring: Ring to re-start the interrupt
+ *
+ * This will re-start (unmask) the ring interrupt once the user is done
+ * with polling.
+ */
+void tb_ring_poll_complete(struct tb_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->nhi->lock, flags);
+ spin_lock(&ring->lock);
+ if (ring->start_poll)
+ __ring_interrupt_mask(ring, false);
+ spin_unlock(&ring->lock);
+ spin_unlock_irqrestore(&ring->nhi->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
+
+static irqreturn_t ring_msix(int irq, void *data)
+{
+ struct tb_ring *ring = data;
+
+ spin_lock(&ring->nhi->lock);
+ spin_lock(&ring->lock);
+ __ring_interrupt(ring);
+ spin_unlock(&ring->lock);
+ spin_unlock(&ring->nhi->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
+{
+ struct tb_nhi *nhi = ring->nhi;
+ unsigned long irqflags;
+ int ret;
+
+ if (!nhi->pdev->msix_enabled)
+ return 0;
+
+ ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ ring->vector = ret;
+
+ ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
+ if (ret < 0)
+ goto err_ida_remove;
+
+ ring->irq = ret;
+
+ irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
+ ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
+ if (ret)
+ goto err_ida_remove;
+
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&nhi->msix_ida, ring->vector);
+
+ return ret;
+}
+
+static void ring_release_msix(struct tb_ring *ring)
+{
+ if (ring->irq <= 0)
+ return;
+
+ free_irq(ring->irq, ring);
+ ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ring->vector = 0;
+ ring->irq = 0;
+}
+
+static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
+{
+ int ret = 0;
+
+ spin_lock_irq(&nhi->lock);
+
+ if (ring->hop < 0) {
+ unsigned int i;
+
+ /*
+ * Automatically allocate HopID from the non-reserved
+ * range 1 .. hop_count - 1.
+ */
+ for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
+ if (ring->is_tx) {
+ if (!nhi->tx_rings[i]) {
+ ring->hop = i;
+ break;
+ }
+ } else {
+ if (!nhi->rx_rings[i]) {
+ ring->hop = i;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
+ dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ if (ring->is_tx && nhi->tx_rings[ring->hop]) {
+ dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
+ ring->hop);
+ ret = -EBUSY;
+ goto err_unlock;
+ } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
+ dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
+ ring->hop);
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ if (ring->is_tx)
+ nhi->tx_rings[ring->hop] = ring;
+ else
+ nhi->rx_rings[ring->hop] = ring;
+
+err_unlock:
+ spin_unlock_irq(&nhi->lock);
+
+ return ret;
+}
+
+static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
+ bool transmit, unsigned int flags,
+ u16 sof_mask, u16 eof_mask,
+ void (*start_poll)(void *),
+ void *poll_data)
+{
+ struct tb_ring *ring = NULL;
+
+ dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
+ transmit ? "TX" : "RX", hop, size);
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ spin_lock_init(&ring->lock);
+ INIT_LIST_HEAD(&ring->queue);
+ INIT_LIST_HEAD(&ring->in_flight);
+ INIT_WORK(&ring->work, ring_work);
+
+ ring->nhi = nhi;
+ ring->hop = hop;
+ ring->is_tx = transmit;
+ ring->size = size;
+ ring->flags = flags;
+ ring->sof_mask = sof_mask;
+ ring->eof_mask = eof_mask;
+ ring->head = 0;
+ ring->tail = 0;
+ ring->running = false;
+ ring->start_poll = start_poll;
+ ring->poll_data = poll_data;
+
+ ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
+ size * sizeof(*ring->descriptors),
+ &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
+ if (!ring->descriptors)
+ goto err_free_ring;
+
+ if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
+ goto err_free_descs;
+
+ if (nhi_alloc_hop(nhi, ring))
+ goto err_release_msix;
+
+ return ring;
+
+err_release_msix:
+ ring_release_msix(ring);
+err_free_descs:
+ dma_free_coherent(&ring->nhi->pdev->dev,
+ ring->size * sizeof(*ring->descriptors),
+ ring->descriptors, ring->descriptors_dma);
+err_free_ring:
+ kfree(ring);
+
+ return NULL;
+}
+
+/**
+ * tb_ring_alloc_tx() - Allocate DMA ring for transmit
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ */
+struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags)
+{
+ return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
+
+/**
+ * tb_ring_alloc_rx() - Allocate DMA ring for receive
+ * @nhi: Pointer to the NHI the ring is to be allocated
+ * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
+ * @size: Number of entries in the ring
+ * @flags: Flags for the ring
+ * @sof_mask: Mask of PDF values that start a frame
+ * @eof_mask: Mask of PDF values that end a frame
+ * @start_poll: If not %NULL the ring will call this function when an
+ * interrupt is triggered and masked, instead of callback
+ * in each Rx frame.
+ * @poll_data: Optional data passed to @start_poll
+ */
+struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
+ unsigned int flags, u16 sof_mask, u16 eof_mask,
+ void (*start_poll)(void *), void *poll_data)
+{
+ return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
+ start_poll, poll_data);
+}
+EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
+
+/**
+ * tb_ring_start() - enable a ring
+ *
+ * Must not be invoked in parallel with tb_ring_stop().
+ */
+void tb_ring_start(struct tb_ring *ring)
+{
+ u16 frame_size;
+ u32 flags;
+
+ spin_lock_irq(&ring->nhi->lock);
+ spin_lock(&ring->lock);
+ if (ring->nhi->going_away)
+ goto err;
+ if (ring->running) {
+ dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
+ goto err;
+ }
+ dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
+ RING_TYPE(ring), ring->hop);
+
+ if (ring->flags & RING_FLAG_FRAME) {
+ /* Means 4096 */
+ frame_size = 0;
+ flags = RING_FLAG_ENABLE;
+ } else {
+ frame_size = TB_FRAME_SIZE;
+ flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
+ }
+
+ ring_iowrite64desc(ring, ring->descriptors_dma, 0);
+ if (ring->is_tx) {
+ ring_iowrite32desc(ring, ring->size, 12);
+ ring_iowrite32options(ring, 0, 4); /* time releated ? */
+ ring_iowrite32options(ring, flags, 0);
+ } else {
+ u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
+
+ ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
+ ring_iowrite32options(ring, sof_eof_mask, 4);
+ ring_iowrite32options(ring, flags, 0);
+ }
+ ring_interrupt_active(ring, true);
+ ring->running = true;
+err:
+ spin_unlock(&ring->lock);
+ spin_unlock_irq(&ring->nhi->lock);
+}
+EXPORT_SYMBOL_GPL(tb_ring_start);
+
+/**
+ * tb_ring_stop() - shutdown a ring
+ *
+ * Must not be invoked from a callback.
+ *
+ * This method will disable the ring. Further calls to
+ * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
+ * called.
+ *
+ * All enqueued frames will be canceled and their callbacks will be executed
+ * with frame->canceled set to true (on the callback thread). This method
+ * returns only after all callback invocations have finished.
+ */
+void tb_ring_stop(struct tb_ring *ring)
+{
+ spin_lock_irq(&ring->nhi->lock);
+ spin_lock(&ring->lock);
+ dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
+ RING_TYPE(ring), ring->hop);
+ if (ring->nhi->going_away)
+ goto err;
+ if (!ring->running) {
+ dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
+ RING_TYPE(ring), ring->hop);
+ goto err;
+ }
+ ring_interrupt_active(ring, false);
+
+ ring_iowrite32options(ring, 0, 0);
+ ring_iowrite64desc(ring, 0, 0);
+ ring_iowrite32desc(ring, 0, 8);
+ ring_iowrite32desc(ring, 0, 12);
+ ring->head = 0;
+ ring->tail = 0;
+ ring->running = false;
+
+err:
+ spin_unlock(&ring->lock);
+ spin_unlock_irq(&ring->nhi->lock);
+
+ /*
+ * schedule ring->work to invoke callbacks on all remaining frames.
+ */
+ schedule_work(&ring->work);
+ flush_work(&ring->work);
+}
+EXPORT_SYMBOL_GPL(tb_ring_stop);
+
+/*
+ * tb_ring_free() - free ring
+ *
+ * When this method returns all invocations of ring->callback will have
+ * finished.
+ *
+ * Ring must be stopped.
+ *
+ * Must NOT be called from ring_frame->callback!
+ */
+void tb_ring_free(struct tb_ring *ring)
+{
+ spin_lock_irq(&ring->nhi->lock);
+ /*
+ * Dissociate the ring from the NHI. This also ensures that
+ * nhi_interrupt_work cannot reschedule ring->work.
+ */
+ if (ring->is_tx)
+ ring->nhi->tx_rings[ring->hop] = NULL;
+ else
+ ring->nhi->rx_rings[ring->hop] = NULL;
+
+ if (ring->running) {
+ dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
+ RING_TYPE(ring), ring->hop);
+ }
+ spin_unlock_irq(&ring->nhi->lock);
+
+ ring_release_msix(ring);
+
+ dma_free_coherent(&ring->nhi->pdev->dev,
+ ring->size * sizeof(*ring->descriptors),
+ ring->descriptors, ring->descriptors_dma);
+
+ ring->descriptors = NULL;
+ ring->descriptors_dma = 0;
+
+
+ dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
+ ring->hop);
+
+ /**
+ * ring->work can no longer be scheduled (it is scheduled only
+ * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
+ * to finish before freeing the ring.
+ */
+ flush_work(&ring->work);
+ kfree(ring);
+}
+EXPORT_SYMBOL_GPL(tb_ring_free);
+
+/**
+ * nhi_mailbox_cmd() - Send a command through NHI mailbox
+ * @nhi: Pointer to the NHI structure
+ * @cmd: Command to send
+ * @data: Data to be send with the command
+ *
+ * Sends mailbox command to the firmware running on NHI. Returns %0 in
+ * case of success and negative errno in case of failure.
+ */
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
+{
+ ktime_t timeout;
+ u32 val;
+
+ iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
+
+ val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+ val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
+ val |= REG_INMAIL_OP_REQUEST | cmd;
+ iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
+
+ timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
+ do {
+ val = ioread32(nhi->iobase + REG_INMAIL_CMD);
+ if (!(val & REG_INMAIL_OP_REQUEST))
+ break;
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ if (val & REG_INMAIL_OP_REQUEST)
+ return -ETIMEDOUT;
+ if (val & REG_INMAIL_ERROR)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * nhi_mailbox_mode() - Return current firmware operation mode
+ * @nhi: Pointer to the NHI structure
+ *
+ * The function reads current firmware operation mode using NHI mailbox
+ * registers and returns it to the caller.
+ */
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
+{
+ u32 val;
+
+ val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
+ val &= REG_OUTMAIL_CMD_OPMODE_MASK;
+ val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
+
+ return (enum nhi_fw_mode)val;
+}
+
+static void nhi_interrupt_work(struct work_struct *work)
+{
+ struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
+ int value = 0; /* Suppress uninitialized usage warning. */
+ int bit;
+ int hop = -1;
+ int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
+ struct tb_ring *ring;
+
+ spin_lock_irq(&nhi->lock);
+
+ /*
+ * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
+ * (TX, RX, RX overflow). We iterate over the bits and read a new
+ * dwords as required. The registers are cleared on read.
+ */
+ for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
+ if (bit % 32 == 0)
+ value = ioread32(nhi->iobase
+ + REG_RING_NOTIFY_BASE
+ + 4 * (bit / 32));
+ if (++hop == nhi->hop_count) {
+ hop = 0;
+ type++;
+ }
+ if ((value & (1 << (bit % 32))) == 0)
+ continue;
+ if (type == 2) {
+ dev_warn(&nhi->pdev->dev,
+ "RX overflow for ring %d\n",
+ hop);
+ continue;
+ }
+ if (type == 0)
+ ring = nhi->tx_rings[hop];
+ else
+ ring = nhi->rx_rings[hop];
+ if (ring == NULL) {
+ dev_warn(&nhi->pdev->dev,
+ "got interrupt for inactive %s ring %d\n",
+ type ? "RX" : "TX",
+ hop);
+ continue;
+ }
+
+ spin_lock(&ring->lock);
+ __ring_interrupt(ring);
+ spin_unlock(&ring->lock);
+ }
+ spin_unlock_irq(&nhi->lock);
+}
+
+static irqreturn_t nhi_msi(int irq, void *data)
+{
+ struct tb_nhi *nhi = data;
+ schedule_work(&nhi->interrupt_work);
+ return IRQ_HANDLED;
+}
+
+static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_suspend_noirq(tb);
+ if (ret)
+ return ret;
+
+ if (nhi->ops && nhi->ops->suspend_noirq) {
+ ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nhi_suspend_noirq(struct device *dev)
+{
+ return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
+}
+
+static int nhi_freeze_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_freeze_noirq(tb);
+}
+
+static int nhi_thaw_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_thaw_noirq(tb);
+}
+
+static bool nhi_wake_supported(struct pci_dev *pdev)
+{
+ u8 val;
+
+ /*
+ * If power rails are sustainable for wakeup from S4 this
+ * property is set by the BIOS.
+ */
+ if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
+ return !!val;
+
+ return true;
+}
+
+static int nhi_poweroff_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool wakeup;
+
+ wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
+ return __nhi_suspend_noirq(dev, wakeup);
+}
+
+static void nhi_enable_int_throttling(struct tb_nhi *nhi)
+{
+ /* Throttling is specified in 256ns increments */
+ u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
+ unsigned int i;
+
+ /*
+ * Configure interrupt throttling for all vectors even if we
+ * only use few.
+ */
+ for (i = 0; i < MSIX_MAX_VECS; i++) {
+ u32 reg = REG_INT_THROTTLING_RATE + i * 4;
+ iowrite32(throttle, nhi->iobase + reg);
+ }
+}
+
+static int nhi_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ /*
+ * Check that the device is still there. It may be that the user
+ * unplugged last device which causes the host controller to go
+ * away on PCs.
+ */
+ if (!pci_device_is_present(pdev)) {
+ nhi->going_away = true;
+ } else {
+ if (nhi->ops && nhi->ops->resume_noirq) {
+ ret = nhi->ops->resume_noirq(nhi);
+ if (ret)
+ return ret;
+ }
+ nhi_enable_int_throttling(tb->nhi);
+ }
+
+ return tb_domain_resume_noirq(tb);
+}
+
+static int nhi_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ return tb_domain_suspend(tb);
+}
+
+static void nhi_complete(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+
+ /*
+ * If we were runtime suspended when system suspend started,
+ * schedule runtime resume now. It should bring the domain back
+ * to functional state.
+ */
+ if (pm_runtime_suspended(&pdev->dev))
+ pm_runtime_resume(&pdev->dev);
+ else
+ tb_domain_complete(tb);
+}
+
+static int nhi_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ ret = tb_domain_runtime_suspend(tb);
+ if (ret)
+ return ret;
+
+ if (nhi->ops && nhi->ops->runtime_suspend) {
+ ret = nhi->ops->runtime_suspend(tb->nhi);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int nhi_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+ int ret;
+
+ if (nhi->ops && nhi->ops->runtime_resume) {
+ ret = nhi->ops->runtime_resume(nhi);
+ if (ret)
+ return ret;
+ }
+
+ nhi_enable_int_throttling(nhi);
+ return tb_domain_runtime_resume(tb);
+}
+
+static void nhi_shutdown(struct tb_nhi *nhi)
+{
+ int i;
+
+ dev_dbg(&nhi->pdev->dev, "shutdown\n");
+
+ for (i = 0; i < nhi->hop_count; i++) {
+ if (nhi->tx_rings[i])
+ dev_WARN(&nhi->pdev->dev,
+ "TX ring %d is still active\n", i);
+ if (nhi->rx_rings[i])
+ dev_WARN(&nhi->pdev->dev,
+ "RX ring %d is still active\n", i);
+ }
+ nhi_disable_interrupts(nhi);
+ /*
+ * We have to release the irq before calling flush_work. Otherwise an
+ * already executing IRQ handler could call schedule_work again.
+ */
+ if (!nhi->pdev->msix_enabled) {
+ devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
+ flush_work(&nhi->interrupt_work);
+ }
+ ida_destroy(&nhi->msix_ida);
+
+ if (nhi->ops && nhi->ops->shutdown)
+ nhi->ops->shutdown(nhi);
+}
+
+static int nhi_init_msi(struct tb_nhi *nhi)
+{
+ struct pci_dev *pdev = nhi->pdev;
+ int res, irq, nvec;
+
+ /* In case someone left them on. */
+ nhi_disable_interrupts(nhi);
+
+ nhi_enable_int_throttling(nhi);
+
+ ida_init(&nhi->msix_ida);
+
+ /*
+ * The NHI has 16 MSI-X vectors or a single MSI. We first try to
+ * get all MSI-X vectors and if we succeed, each ring will have
+ * one MSI-X. If for some reason that does not work out, we
+ * fallback to a single MSI.
+ */
+ nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
+ PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (nvec < 0)
+ return nvec;
+
+ INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
+
+ irq = pci_irq_vector(nhi->pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res = devm_request_irq(&pdev->dev, irq, nhi_msi,
+ IRQF_NO_SUSPEND, "thunderbolt", nhi);
+ if (res) {
+ dev_err(&pdev->dev, "request_irq failed, aborting\n");
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static bool nhi_imr_valid(struct pci_dev *pdev)
+{
+ u8 val;
+
+ if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
+ return !!val;
+
+ return true;
+}
+
+/*
+ * During suspend the Thunderbolt controller is reset and all PCIe
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
+ * during resume. This adds device links between the tunneled PCIe
+ * downstream ports and the NHI so that the device core will make sure
+ * NHI is resumed first before the rest.
+ */
+static void tb_apple_add_links(struct tb_nhi *nhi)
+{
+ struct pci_dev *upstream, *pdev;
+
+ if (!x86_apple_machine)
+ return;
+
+ switch (nhi->pdev->device) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
+ break;
+ default:
+ return;
+ }
+
+ upstream = pci_upstream_bridge(nhi->pdev);
+ while (upstream) {
+ if (!pci_is_pcie(upstream))
+ return;
+ if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
+ break;
+ upstream = pci_upstream_bridge(upstream);
+ }
+
+ if (!upstream)
+ return;
+
+ /*
+ * For each hotplug downstream port, create add device link
+ * back to NHI so that PCIe tunnels can be re-established after
+ * sleep.
+ */
+ for_each_pci_bridge(pdev, upstream->subordinate) {
+ const struct device_link *link;
+
+ if (!pci_is_pcie(pdev))
+ continue;
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
+ !pdev->is_hotplug_bridge)
+ continue;
+
+ link = device_link_add(&pdev->dev, &nhi->pdev->dev,
+ DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_PM_RUNTIME);
+ if (link) {
+ dev_dbg(&nhi->pdev->dev, "created link from %s\n",
+ dev_name(&pdev->dev));
+ } else {
+ dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
+ dev_name(&pdev->dev));
+ }
+ }
+}
+
+static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct tb_nhi *nhi;
+ struct tb *tb;
+ int res;
+
+ if (!nhi_imr_valid(pdev)) {
+ dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
+ return -ENODEV;
+ }
+
+ res = pcim_enable_device(pdev);
+ if (res) {
+ dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
+ return res;
+ }
+
+ res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
+ if (res) {
+ dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+ return res;
+ }
+
+ nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
+ if (!nhi)
+ return -ENOMEM;
+
+ nhi->pdev = pdev;
+ nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
+ /* cannot fail - table is allocated bin pcim_iomap_regions */
+ nhi->iobase = pcim_iomap_table(pdev)[0];
+ nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
+ dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
+
+ nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
+ sizeof(*nhi->tx_rings), GFP_KERNEL);
+ nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
+ sizeof(*nhi->rx_rings), GFP_KERNEL);
+ if (!nhi->tx_rings || !nhi->rx_rings)
+ return -ENOMEM;
+
+ res = nhi_init_msi(nhi);
+ if (res) {
+ dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
+ return res;
+ }
+
+ spin_lock_init(&nhi->lock);
+
+ res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (res)
+ res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (res) {
+ dev_err(&pdev->dev, "failed to set DMA mask\n");
+ return res;
+ }
+
+ pci_set_master(pdev);
+
+ if (nhi->ops && nhi->ops->init) {
+ res = nhi->ops->init(nhi);
+ if (res)
+ return res;
+ }
+
+ tb_apple_add_links(nhi);
+ tb_acpi_add_links(nhi);
+
+ tb = icm_probe(nhi);
+ if (!tb)
+ tb = tb_probe(nhi);
+ if (!tb) {
+ dev_err(&nhi->pdev->dev,
+ "failed to determine connection manager, aborting\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+
+ res = tb_domain_add(tb);
+ if (res) {
+ /*
+ * At this point the RX/TX rings might already have been
+ * activated. Do a proper shutdown.
+ */
+ tb_domain_put(tb);
+ nhi_shutdown(nhi);
+ return res;
+ }
+ pci_set_drvdata(pdev, tb);
+
+ device_wakeup_enable(&pdev->dev);
+
+ pm_runtime_allow(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+}
+
+static void nhi_remove(struct pci_dev *pdev)
+{
+ struct tb *tb = pci_get_drvdata(pdev);
+ struct tb_nhi *nhi = tb->nhi;
+
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_forbid(&pdev->dev);
+
+ tb_domain_remove(tb);
+ nhi_shutdown(nhi);
+}
+
+/*
+ * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
+ * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
+ * resume_noirq until we are done.
+ */
+static const struct dev_pm_ops nhi_pm_ops = {
+ .suspend_noirq = nhi_suspend_noirq,
+ .resume_noirq = nhi_resume_noirq,
+ .freeze_noirq = nhi_freeze_noirq, /*
+ * we just disable hotplug, the
+ * pci-tunnels stay alive.
+ */
+ .thaw_noirq = nhi_thaw_noirq,
+ .restore_noirq = nhi_resume_noirq,
+ .suspend = nhi_suspend,
+ .poweroff_noirq = nhi_poweroff_noirq,
+ .poweroff = nhi_suspend,
+ .complete = nhi_complete,
+ .runtime_suspend = nhi_runtime_suspend,
+ .runtime_resume = nhi_runtime_resume,
+};
+
+static struct pci_device_id nhi_ids[] = {
+ /*
+ * We have to specify class, the TB bridges use the same device and
+ * vendor (sub)id on gen 1 and gen 2 controllers.
+ */
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
+ .subvendor = 0x2222, .subdevice = 0x1111,
+ },
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
+ .subvendor = 0x2222, .subdevice = 0x1111,
+ },
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+ },
+ {
+ .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
+ },
+
+ /* Thunderbolt 3 */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+
+ { 0,}
+};
+
+MODULE_DEVICE_TABLE(pci, nhi_ids);
+MODULE_LICENSE("GPL");
+
+static struct pci_driver nhi_driver = {
+ .name = "thunderbolt",
+ .id_table = nhi_ids,
+ .probe = nhi_probe,
+ .remove = nhi_remove,
+ .shutdown = nhi_remove,
+ .driver.pm = &nhi_pm_ops,
+};
+
+static int __init nhi_init(void)
+{
+ int ret;
+
+ ret = tb_domain_init();
+ if (ret)
+ return ret;
+ ret = pci_register_driver(&nhi_driver);
+ if (ret)
+ tb_domain_exit();
+ return ret;
+}
+
+static void __exit nhi_unload(void)
+{
+ pci_unregister_driver(&nhi_driver);
+ tb_domain_exit();
+}
+
+rootfs_initcall(nhi_init);
+module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
new file mode 100644
index 000000000..7ad6d3f05
--- /dev/null
+++ b/drivers/thunderbolt/nhi.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - NHI driver
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#ifndef DSL3510_H_
+#define DSL3510_H_
+
+#include <linux/thunderbolt.h>
+
+enum nhi_fw_mode {
+ NHI_FW_SAFE_MODE,
+ NHI_FW_AUTH_MODE,
+ NHI_FW_EP_MODE,
+ NHI_FW_CM_MODE,
+};
+
+enum nhi_mailbox_cmd {
+ NHI_MAILBOX_SAVE_DEVS = 0x05,
+ NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
+ NHI_MAILBOX_DRV_UNLOADS = 0x07,
+ NHI_MAILBOX_DISCONNECT_PA = 0x10,
+ NHI_MAILBOX_DISCONNECT_PB = 0x11,
+ NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
+};
+
+int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data);
+enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi);
+
+/**
+ * struct tb_nhi_ops - NHI specific optional operations
+ * @init: NHI specific initialization
+ * @suspend_noirq: NHI specific suspend_noirq hook
+ * @resume_noirq: NHI specific resume_noirq hook
+ * @runtime_suspend: NHI specific runtime_suspend hook
+ * @runtime_resume: NHI specific runtime_resume hook
+ * @shutdown: NHI specific shutdown
+ */
+struct tb_nhi_ops {
+ int (*init)(struct tb_nhi *nhi);
+ int (*suspend_noirq)(struct tb_nhi *nhi, bool wakeup);
+ int (*resume_noirq)(struct tb_nhi *nhi);
+ int (*runtime_suspend)(struct tb_nhi *nhi);
+ int (*runtime_resume)(struct tb_nhi *nhi);
+ void (*shutdown)(struct tb_nhi *nhi);
+};
+
+extern const struct tb_nhi_ops icl_nhi_ops;
+
+/*
+ * PCI IDs used in this driver from Win Ridge forward. There is no
+ * need for the PCI quirk anymore as we will use ICM also on Apple
+ * hardware.
+ */
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134
+#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d
+#define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE 0x15e7
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI 0x15e8
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
+#define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d
+#define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17
+#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b
+#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d
+#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0 0x9a1f
+#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
+
+#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
+
+#endif
diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c
new file mode 100644
index 000000000..96da07e88
--- /dev/null
+++ b/drivers/thunderbolt/nhi_ops.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHI specific operations
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/suspend.h>
+
+#include "nhi.h"
+#include "nhi_regs.h"
+#include "tb.h"
+
+/* Ice Lake specific NHI operations */
+
+#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */
+
+static int check_for_device(struct device *dev, void *data)
+{
+ return tb_is_switch(dev);
+}
+
+static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
+{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
+ int ret;
+
+ ret = device_for_each_child(&tb->root_switch->dev, NULL,
+ check_for_device);
+ return ret > 0;
+}
+
+static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
+{
+ u32 vs_cap;
+
+ /*
+ * The Thunderbolt host controller is present always in Ice Lake
+ * but the firmware may not be loaded and running (depending
+ * whether there is device connected and so on). Each time the
+ * controller is used we need to "Force Power" it first and wait
+ * for the firmware to indicate it is up and running. This "Force
+ * Power" is really not about actually powering on/off the
+ * controller so it is accessible even if "Force Power" is off.
+ *
+ * The actual power management happens inside shared ACPI power
+ * resources using standard ACPI methods.
+ */
+ pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
+ if (power) {
+ vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
+ vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
+ vs_cap |= VS_CAP_22_FORCE_POWER;
+ } else {
+ vs_cap &= ~VS_CAP_22_FORCE_POWER;
+ }
+ pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
+
+ if (power) {
+ unsigned int retries = 350;
+ u32 val;
+
+ /* Wait until the firmware tells it is up and running */
+ do {
+ pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
+ if (val & VS_CAP_9_FW_READY)
+ return 0;
+ usleep_range(3000, 3100);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
+{
+ u32 data;
+
+ data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
+ pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
+}
+
+static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
+{
+ unsigned long end;
+ u32 data;
+
+ if (!timeout)
+ goto clear;
+
+ end = jiffies + msecs_to_jiffies(timeout);
+ do {
+ pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
+ if (data & VS_CAP_18_DONE)
+ goto clear;
+ usleep_range(1000, 1100);
+ } while (time_before(jiffies, end));
+
+ return -ETIMEDOUT;
+
+clear:
+ /* Clear the valid bit */
+ pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
+ return 0;
+}
+
+static void icl_nhi_set_ltr(struct tb_nhi *nhi)
+{
+ u32 max_ltr, ltr;
+
+ pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
+ max_ltr &= 0xffff;
+ /* Program the same value for both snoop and no-snoop */
+ ltr = max_ltr << 16 | max_ltr;
+ pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
+}
+
+static int icl_nhi_suspend(struct tb_nhi *nhi)
+{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
+ int ret;
+
+ if (icl_nhi_is_device_connected(nhi))
+ return 0;
+
+ if (tb_switch_is_icm(tb->root_switch)) {
+ /*
+ * If there is no device connected we need to perform
+ * both: a handshake through LC mailbox and force power
+ * down before entering D3.
+ */
+ icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
+ ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return icl_nhi_force_power(nhi, false);
+}
+
+static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
+{
+ struct tb *tb = pci_get_drvdata(nhi->pdev);
+ enum icl_lc_mailbox_cmd cmd;
+
+ if (!pm_suspend_via_firmware())
+ return icl_nhi_suspend(nhi);
+
+ if (!tb_switch_is_icm(tb->root_switch))
+ return 0;
+
+ cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
+ icl_nhi_lc_mailbox_cmd(nhi, cmd);
+ return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
+}
+
+static int icl_nhi_resume(struct tb_nhi *nhi)
+{
+ int ret;
+
+ ret = icl_nhi_force_power(nhi, true);
+ if (ret)
+ return ret;
+
+ icl_nhi_set_ltr(nhi);
+ return 0;
+}
+
+static void icl_nhi_shutdown(struct tb_nhi *nhi)
+{
+ icl_nhi_force_power(nhi, false);
+}
+
+const struct tb_nhi_ops icl_nhi_ops = {
+ .init = icl_nhi_resume,
+ .suspend_noirq = icl_nhi_suspend_noirq,
+ .resume_noirq = icl_nhi_resume,
+ .runtime_suspend = icl_nhi_suspend,
+ .runtime_resume = icl_nhi_resume,
+ .shutdown = icl_nhi_shutdown,
+};
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
new file mode 100644
index 000000000..0d4970dce
--- /dev/null
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - NHI registers
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#ifndef NHI_REGS_H_
+#define NHI_REGS_H_
+
+#include <linux/types.h>
+
+enum ring_flags {
+ RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */
+ RING_FLAG_E2E_FLOW_CONTROL = 1 << 28,
+ RING_FLAG_PCI_NO_SNOOP = 1 << 29,
+ RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */
+ RING_FLAG_ENABLE = 1 << 31,
+};
+
+/**
+ * struct ring_desc - TX/RX ring entry
+ *
+ * For TX set length/eof/sof.
+ * For RX length/eof/sof are set by the NHI.
+ */
+struct ring_desc {
+ u64 phys;
+ u32 length:12;
+ u32 eof:4;
+ u32 sof:4;
+ enum ring_desc_flags flags:12;
+ u32 time; /* write zero */
+} __packed;
+
+/* NHI registers in bar 0 */
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring tail (set by NHI)
+ * 10: ring head (index of first non posted descriptor)
+ * 12: descriptor count
+ */
+#define REG_TX_RING_BASE 0x00000
+
+/*
+ * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: physical pointer to an array of struct ring_desc
+ * 08: ring head (index of first not posted descriptor)
+ * 10: ring tail (set by NHI)
+ * 12: descriptor count
+ * 14: max frame sizes (anything larger than 0x100 has no effect)
+ */
+#define REG_RX_RING_BASE 0x08000
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum_ring_flags
+ * 04: isoch time stamp ?? (write 0)
+ * ..: unknown
+ */
+#define REG_TX_OPTIONS_BASE 0x19800
+
+/*
+ * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT)
+ * 00: enum ring_flags
+ * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to
+ * the corresponding TX hop id.
+ * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings)
+ * ..: unknown
+ */
+#define REG_RX_OPTIONS_BASE 0x29800
+#define REG_RX_OPTIONS_E2E_HOP_MASK GENMASK(22, 12)
+#define REG_RX_OPTIONS_E2E_HOP_SHIFT 12
+
+/*
+ * three bitfields: tx, rx, rx overflow
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
+ * cleared on read. New interrupts are fired only after ALL registers have been
+ * read (even those containing only disabled rings).
+ */
+#define REG_RING_NOTIFY_BASE 0x37800
+#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+
+/*
+ * two bitfields: rx, tx
+ * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To
+ * enable/disable interrupts set/clear the corresponding bits.
+ */
+#define REG_RING_INTERRUPT_BASE 0x38200
+#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
+
+#define REG_INT_THROTTLING_RATE 0x38c00
+
+/* Interrupt Vector Allocation */
+#define REG_INT_VEC_ALLOC_BASE 0x38c40
+#define REG_INT_VEC_ALLOC_BITS 4
+#define REG_INT_VEC_ALLOC_MASK GENMASK(3, 0)
+#define REG_INT_VEC_ALLOC_REGS (32 / REG_INT_VEC_ALLOC_BITS)
+
+/* The last 11 bits contain the number of hops supported by the NHI port. */
+#define REG_HOP_COUNT 0x39640
+
+#define REG_DMA_MISC 0x39864
+#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
+
+#define REG_INMAIL_DATA 0x39900
+
+#define REG_INMAIL_CMD 0x39904
+#define REG_INMAIL_CMD_MASK GENMASK(7, 0)
+#define REG_INMAIL_ERROR BIT(30)
+#define REG_INMAIL_OP_REQUEST BIT(31)
+
+#define REG_OUTMAIL_CMD 0x3990c
+#define REG_OUTMAIL_CMD_OPMODE_SHIFT 8
+#define REG_OUTMAIL_CMD_OPMODE_MASK GENMASK(11, 8)
+
+#define REG_FW_STS 0x39944
+#define REG_FW_STS_NVM_AUTH_DONE BIT(31)
+#define REG_FW_STS_CIO_RESET_REQ BIT(30)
+#define REG_FW_STS_ICM_EN_CPU BIT(2)
+#define REG_FW_STS_ICM_EN_INVERT BIT(1)
+#define REG_FW_STS_ICM_EN BIT(0)
+
+/* ICL NHI VSEC registers */
+
+/* FW ready */
+#define VS_CAP_9 0xc8
+#define VS_CAP_9_FW_READY BIT(31)
+/* UUID */
+#define VS_CAP_10 0xcc
+#define VS_CAP_11 0xd0
+/* LTR */
+#define VS_CAP_15 0xe0
+#define VS_CAP_16 0xe4
+/* TBT2PCIe */
+#define VS_CAP_18 0xec
+#define VS_CAP_18_DONE BIT(0)
+/* PCIe2TBT */
+#define VS_CAP_19 0xf0
+#define VS_CAP_19_VALID BIT(0)
+#define VS_CAP_19_CMD_SHIFT 1
+#define VS_CAP_19_CMD_MASK GENMASK(7, 1)
+/* Force power */
+#define VS_CAP_22 0xfc
+#define VS_CAP_22_FORCE_POWER BIT(1)
+#define VS_CAP_22_DMA_DELAY_MASK GENMASK(31, 24)
+#define VS_CAP_22_DMA_DELAY_SHIFT 24
+
+/**
+ * enum icl_lc_mailbox_cmd - ICL specific LC mailbox commands
+ * @ICL_LC_GO2SX: Ask LC to enter Sx without wake
+ * @ICL_LC_GO2SX_NO_WAKE: Ask LC to enter Sx with wake
+ * @ICL_LC_PREPARE_FOR_RESET: Prepare LC for reset
+ */
+enum icl_lc_mailbox_cmd {
+ ICL_LC_GO2SX = 0x02,
+ ICL_LC_GO2SX_NO_WAKE = 0x03,
+ ICL_LC_PREPARE_FOR_RESET = 0x21,
+};
+
+#endif
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
new file mode 100644
index 000000000..29de6d95c
--- /dev/null
+++ b/drivers/thunderbolt/nvm.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVM helpers
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "tb.h"
+
+static DEFINE_IDA(nvm_ida);
+
+/**
+ * tb_nvm_alloc() - Allocate new NVM structure
+ * @dev: Device owning the NVM
+ *
+ * Allocates new NVM structure with unique @id and returns it. In case
+ * of error returns ERR_PTR().
+ */
+struct tb_nvm *tb_nvm_alloc(struct device *dev)
+{
+ struct tb_nvm *nvm;
+ int ret;
+
+ nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
+ if (!nvm)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(nvm);
+ return ERR_PTR(ret);
+ }
+
+ nvm->id = ret;
+ nvm->dev = dev;
+
+ return nvm;
+}
+
+/**
+ * tb_nvm_add_active() - Adds active NVMem device to NVM
+ * @nvm: NVM structure
+ * @size: Size of the active NVM in bytes
+ * @reg_read: Pointer to the function to read the NVM (passed directly to the
+ * NVMem device)
+ *
+ * Registers new active NVmem device for @nvm. The @reg_read is called
+ * directly from NVMem so it must handle possible concurrent access if
+ * needed. The first parameter passed to @reg_read is @nvm structure.
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read)
+{
+ struct nvmem_config config;
+ struct nvmem_device *nvmem;
+
+ memset(&config, 0, sizeof(config));
+
+ config.name = "nvm_active";
+ config.reg_read = reg_read;
+ config.read_only = true;
+ config.id = nvm->id;
+ config.stride = 4;
+ config.word_size = 4;
+ config.size = size;
+ config.dev = nvm->dev;
+ config.owner = THIS_MODULE;
+ config.priv = nvm;
+
+ nvmem = nvmem_register(&config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ nvm->active = nvmem;
+ return 0;
+}
+
+/**
+ * tb_nvm_write_buf() - Write data to @nvm buffer
+ * @nvm: NVM structure
+ * @offset: Offset where to write the data
+ * @val: Data buffer to write
+ * @bytes: Number of bytes to write
+ *
+ * Helper function to cache the new NVM image before it is actually
+ * written to the flash. Copies @bytes from @val to @nvm->buf starting
+ * from @offset.
+ */
+int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
+ size_t bytes)
+{
+ if (!nvm->buf) {
+ nvm->buf = vmalloc(NVM_MAX_SIZE);
+ if (!nvm->buf)
+ return -ENOMEM;
+ }
+
+ nvm->flushed = false;
+ nvm->buf_data_size = offset + bytes;
+ memcpy(nvm->buf + offset, val, bytes);
+ return 0;
+}
+
+/**
+ * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
+ * @nvm: NVM structure
+ * @size: Size of the non-active NVM in bytes
+ * @reg_write: Pointer to the function to write the NVM (passed directly
+ * to the NVMem device)
+ *
+ * Registers new non-active NVmem device for @nvm. The @reg_write is called
+ * directly from NVMem so it must handle possible concurrent access if
+ * needed. The first parameter passed to @reg_write is @nvm structure.
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
+ nvmem_reg_write_t reg_write)
+{
+ struct nvmem_config config;
+ struct nvmem_device *nvmem;
+
+ memset(&config, 0, sizeof(config));
+
+ config.name = "nvm_non_active";
+ config.reg_write = reg_write;
+ config.root_only = true;
+ config.id = nvm->id;
+ config.stride = 4;
+ config.word_size = 4;
+ config.size = size;
+ config.dev = nvm->dev;
+ config.owner = THIS_MODULE;
+ config.priv = nvm;
+
+ nvmem = nvmem_register(&config);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
+
+ nvm->non_active = nvmem;
+ return 0;
+}
+
+/**
+ * tb_nvm_free() - Release NVM and its resources
+ * @nvm: NVM structure to release
+ *
+ * Releases NVM and the NVMem devices if they were registered.
+ */
+void tb_nvm_free(struct tb_nvm *nvm)
+{
+ if (nvm) {
+ if (nvm->non_active)
+ nvmem_unregister(nvm->non_active);
+ if (nvm->active)
+ nvmem_unregister(nvm->active);
+ vfree(nvm->buf);
+ ida_simple_remove(&nvm_ida, nvm->id);
+ }
+ kfree(nvm);
+}
+
+void tb_nvm_exit(void)
+{
+ ida_destroy(&nvm_ida);
+}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
new file mode 100644
index 000000000..03e7b714d
--- /dev/null
+++ b/drivers/thunderbolt/path.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - path/tunnel functionality
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "tb.h"
+
+static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
+{
+ const struct tb_port *port = hop->in_port;
+
+ tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
+ hop->in_hop_index, regs->out_port, regs->next_hop);
+ tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
+ regs->weight, regs->priority,
+ regs->initial_credits, regs->drop_packages);
+ tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
+ regs->counter_enable, regs->counter);
+ tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+ regs->ingress_fc, regs->egress_fc,
+ regs->ingress_shared_buffer, regs->egress_shared_buffer);
+ tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+ regs->unknown1, regs->unknown2, regs->unknown3);
+}
+
+static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
+ int dst_hopid)
+{
+ struct tb_port *port, *out_port = NULL;
+ struct tb_regs_hop hop;
+ struct tb_switch *sw;
+ int i, ret, hopid;
+
+ hopid = src_hopid;
+ port = src;
+
+ for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
+ sw = port->sw;
+
+ ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
+ if (ret) {
+ tb_port_warn(port, "failed to read path at %d\n", hopid);
+ return NULL;
+ }
+
+ if (!hop.enable)
+ return NULL;
+
+ out_port = &sw->ports[hop.out_port];
+ hopid = hop.next_hop;
+ port = out_port->remote;
+ }
+
+ return out_port && hopid == dst_hopid ? out_port : NULL;
+}
+
+static int tb_path_find_src_hopid(struct tb_port *src,
+ const struct tb_port *dst, int dst_hopid)
+{
+ struct tb_port *out;
+ int i;
+
+ for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
+ out = tb_path_find_dst_port(src, i, dst_hopid);
+ if (out == dst)
+ return i;
+ }
+
+ return 0;
+}
+
+/**
+ * tb_path_discover() - Discover a path
+ * @src: First input port of a path
+ * @src_hopid: Starting HopID of a path (%-1 if don't care)
+ * @dst: Expected destination port of the path (%NULL if don't care)
+ * @dst_hopid: HopID to the @dst (%-1 if don't care)
+ * @last: Last port is filled here if not %NULL
+ * @name: Name of the path
+ *
+ * Follows a path starting from @src and @src_hopid to the last output
+ * port of the path. Allocates HopIDs for the visited ports. Call
+ * tb_path_free() to release the path and allocated HopIDs when the path
+ * is not needed anymore.
+ *
+ * Note function discovers also incomplete paths so caller should check
+ * that the @dst port is the expected one. If it is not, the path can be
+ * cleaned up by calling tb_path_deactivate() before tb_path_free().
+ *
+ * Return: Discovered path on success, %NULL in case of failure
+ */
+struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid,
+ struct tb_port **last, const char *name)
+{
+ struct tb_port *out_port;
+ struct tb_regs_hop hop;
+ struct tb_path *path;
+ struct tb_switch *sw;
+ struct tb_port *p;
+ size_t num_hops;
+ int ret, i, h;
+
+ if (src_hopid < 0 && dst) {
+ /*
+ * For incomplete paths the intermediate HopID can be
+ * different from the one used by the protocol adapter
+ * so in that case find a path that ends on @dst with
+ * matching @dst_hopid. That should give us the correct
+ * HopID for the @src.
+ */
+ src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
+ if (!src_hopid)
+ return NULL;
+ }
+
+ p = src;
+ h = src_hopid;
+ num_hops = 0;
+
+ for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
+ sw = p->sw;
+
+ ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
+ if (ret) {
+ tb_port_warn(p, "failed to read path at %d\n", h);
+ return NULL;
+ }
+
+ /* If the hop is not enabled we got an incomplete path */
+ if (!hop.enable)
+ break;
+
+ out_port = &sw->ports[hop.out_port];
+ if (last)
+ *last = out_port;
+
+ h = hop.next_hop;
+ p = out_port->remote;
+ num_hops++;
+ }
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path)
+ return NULL;
+
+ path->name = name;
+ path->tb = src->sw->tb;
+ path->path_length = num_hops;
+ path->activated = true;
+
+ path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+ if (!path->hops) {
+ kfree(path);
+ return NULL;
+ }
+
+ p = src;
+ h = src_hopid;
+
+ for (i = 0; i < num_hops; i++) {
+ int next_hop;
+
+ sw = p->sw;
+
+ ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
+ if (ret) {
+ tb_port_warn(p, "failed to read path at %d\n", h);
+ goto err;
+ }
+
+ if (tb_port_alloc_in_hopid(p, h, h) < 0)
+ goto err;
+
+ out_port = &sw->ports[hop.out_port];
+ next_hop = hop.next_hop;
+
+ if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
+ tb_port_release_in_hopid(p, h);
+ goto err;
+ }
+
+ path->hops[i].in_port = p;
+ path->hops[i].in_hop_index = h;
+ path->hops[i].in_counter_index = -1;
+ path->hops[i].out_port = out_port;
+ path->hops[i].next_hop_index = next_hop;
+
+ h = next_hop;
+ p = out_port->remote;
+ }
+
+ return path;
+
+err:
+ tb_port_warn(src, "failed to discover path starting at HopID %d\n",
+ src_hopid);
+ tb_path_free(path);
+ return NULL;
+}
+
+/**
+ * tb_path_alloc() - allocate a thunderbolt path between two ports
+ * @tb: Domain pointer
+ * @src: Source port of the path
+ * @src_hopid: HopID used for the first ingress port in the path
+ * @dst: Destination port of the path
+ * @dst_hopid: HopID used for the last egress port in the path
+ * @link_nr: Preferred link if there are dual links on the path
+ * @name: Name of the path
+ *
+ * Creates path between two ports starting with given @src_hopid. Reserves
+ * HopIDs for each port (they can be different from @src_hopid depending on
+ * how many HopIDs each port already have reserved). If there are dual
+ * links on the path, prioritizes using @link_nr but takes into account
+ * that the lanes may be bonded.
+ *
+ * Return: Returns a tb_path on success or NULL on failure.
+ */
+struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid, int link_nr,
+ const char *name)
+{
+ struct tb_port *in_port, *out_port, *first_port, *last_port;
+ int in_hopid, out_hopid;
+ struct tb_path *path;
+ size_t num_hops;
+ int i, ret;
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path)
+ return NULL;
+
+ first_port = last_port = NULL;
+ i = 0;
+ tb_for_each_port_on_path(src, dst, in_port) {
+ if (!first_port)
+ first_port = in_port;
+ last_port = in_port;
+ i++;
+ }
+
+ /* Check that src and dst are reachable */
+ if (first_port != src || last_port != dst) {
+ kfree(path);
+ return NULL;
+ }
+
+ /* Each hop takes two ports */
+ num_hops = i / 2;
+
+ path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
+ if (!path->hops) {
+ kfree(path);
+ return NULL;
+ }
+
+ in_hopid = src_hopid;
+ out_port = NULL;
+
+ for (i = 0; i < num_hops; i++) {
+ in_port = tb_next_port_on_path(src, dst, out_port);
+ if (!in_port)
+ goto err;
+
+ /* When lanes are bonded primary link must be used */
+ if (!in_port->bonded && in_port->dual_link_port &&
+ in_port->link_nr != link_nr)
+ in_port = in_port->dual_link_port;
+
+ ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
+ if (ret < 0)
+ goto err;
+ in_hopid = ret;
+
+ out_port = tb_next_port_on_path(src, dst, in_port);
+ if (!out_port)
+ goto err;
+
+ /*
+ * Pick up right port when going from non-bonded to
+ * bonded or from bonded to non-bonded.
+ */
+ if (out_port->dual_link_port) {
+ if (!in_port->bonded && out_port->bonded &&
+ out_port->link_nr) {
+ /*
+ * Use primary link when going from
+ * non-bonded to bonded.
+ */
+ out_port = out_port->dual_link_port;
+ } else if (!out_port->bonded &&
+ out_port->link_nr != link_nr) {
+ /*
+ * If out port is not bonded follow
+ * link_nr.
+ */
+ out_port = out_port->dual_link_port;
+ }
+ }
+
+ if (i == num_hops - 1)
+ ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
+ dst_hopid);
+ else
+ ret = tb_port_alloc_out_hopid(out_port, -1, -1);
+
+ if (ret < 0)
+ goto err;
+ out_hopid = ret;
+
+ path->hops[i].in_hop_index = in_hopid;
+ path->hops[i].in_port = in_port;
+ path->hops[i].in_counter_index = -1;
+ path->hops[i].out_port = out_port;
+ path->hops[i].next_hop_index = out_hopid;
+
+ in_hopid = out_hopid;
+ }
+
+ path->tb = tb;
+ path->path_length = num_hops;
+ path->name = name;
+
+ return path;
+
+err:
+ tb_path_free(path);
+ return NULL;
+}
+
+/**
+ * tb_path_free() - free a path
+ * @path: Path to free
+ *
+ * Frees a path. The path does not need to be deactivated.
+ */
+void tb_path_free(struct tb_path *path)
+{
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ const struct tb_path_hop *hop = &path->hops[i];
+
+ if (hop->in_port)
+ tb_port_release_in_hopid(hop->in_port,
+ hop->in_hop_index);
+ if (hop->out_port)
+ tb_port_release_out_hopid(hop->out_port,
+ hop->next_hop_index);
+ }
+
+ kfree(path->hops);
+ kfree(path);
+}
+
+static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
+{
+ int i, res;
+ for (i = first_hop; i < path->path_length; i++) {
+ res = tb_port_add_nfc_credits(path->hops[i].in_port,
+ -path->nfc_credits);
+ if (res)
+ tb_port_warn(path->hops[i].in_port,
+ "nfc credits deallocation failed for hop %d\n",
+ i);
+ }
+}
+
+static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
+ bool clear_fc)
+{
+ struct tb_regs_hop hop;
+ ktime_t timeout;
+ int ret;
+
+ /* Disable the path */
+ ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+ if (ret)
+ return ret;
+
+ /* Already disabled */
+ if (!hop.enable)
+ return 0;
+
+ hop.enable = 0;
+
+ ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+ if (ret)
+ return ret;
+
+ /* Wait until it is drained */
+ timeout = ktime_add_ms(ktime_get(), 500);
+ do {
+ ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
+ if (ret)
+ return ret;
+
+ if (!hop.pending) {
+ if (clear_fc) {
+ /* Clear flow control */
+ hop.ingress_fc = 0;
+ hop.egress_fc = 0;
+ hop.ingress_shared_buffer = 0;
+ hop.egress_shared_buffer = 0;
+
+ return tb_port_write(port, &hop, TB_CFG_HOPS,
+ 2 * hop_index, 2);
+ }
+
+ return 0;
+ }
+
+ usleep_range(10, 20);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
+{
+ int i, res;
+
+ for (i = first_hop; i < path->path_length; i++) {
+ res = __tb_path_deactivate_hop(path->hops[i].in_port,
+ path->hops[i].in_hop_index,
+ path->clear_fc);
+ if (res && res != -ENODEV)
+ tb_port_warn(path->hops[i].in_port,
+ "hop deactivation failed for hop %d, index %d\n",
+ i, path->hops[i].in_hop_index);
+ }
+}
+
+void tb_path_deactivate(struct tb_path *path)
+{
+ if (!path->activated) {
+ tb_WARN(path->tb, "trying to deactivate an inactive path\n");
+ return;
+ }
+ tb_dbg(path->tb,
+ "deactivating %s path from %llx:%x to %llx:%x\n",
+ path->name, tb_route(path->hops[0].in_port->sw),
+ path->hops[0].in_port->port,
+ tb_route(path->hops[path->path_length - 1].out_port->sw),
+ path->hops[path->path_length - 1].out_port->port);
+ __tb_path_deactivate_hops(path, 0);
+ __tb_path_deallocate_nfc(path, 0);
+ path->activated = false;
+}
+
+/**
+ * tb_path_activate() - activate a path
+ *
+ * Activate a path starting with the last hop and iterating backwards. The
+ * caller must fill path->hops before calling tb_path_activate().
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_path_activate(struct tb_path *path)
+{
+ int i, res;
+ enum tb_path_port out_mask, in_mask;
+ if (path->activated) {
+ tb_WARN(path->tb, "trying to activate already activated path\n");
+ return -EINVAL;
+ }
+
+ tb_dbg(path->tb,
+ "activating %s path from %llx:%x to %llx:%x\n",
+ path->name, tb_route(path->hops[0].in_port->sw),
+ path->hops[0].in_port->port,
+ tb_route(path->hops[path->path_length - 1].out_port->sw),
+ path->hops[path->path_length - 1].out_port->port);
+
+ /* Clear counters. */
+ for (i = path->path_length - 1; i >= 0; i--) {
+ if (path->hops[i].in_counter_index == -1)
+ continue;
+ res = tb_port_clear_counter(path->hops[i].in_port,
+ path->hops[i].in_counter_index);
+ if (res)
+ goto err;
+ }
+
+ /* Add non flow controlled credits. */
+ for (i = path->path_length - 1; i >= 0; i--) {
+ res = tb_port_add_nfc_credits(path->hops[i].in_port,
+ path->nfc_credits);
+ if (res) {
+ __tb_path_deallocate_nfc(path, i);
+ goto err;
+ }
+ }
+
+ /* Activate hops. */
+ for (i = path->path_length - 1; i >= 0; i--) {
+ struct tb_regs_hop hop = { 0 };
+
+ /* If it is left active deactivate it first */
+ __tb_path_deactivate_hop(path->hops[i].in_port,
+ path->hops[i].in_hop_index, path->clear_fc);
+
+ /* dword 0 */
+ hop.next_hop = path->hops[i].next_hop_index;
+ hop.out_port = path->hops[i].out_port->port;
+ hop.initial_credits = path->hops[i].initial_credits;
+ hop.unknown1 = 0;
+ hop.enable = 1;
+
+ /* dword 1 */
+ out_mask = (i == path->path_length - 1) ?
+ TB_PATH_DESTINATION : TB_PATH_INTERNAL;
+ in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
+ hop.weight = path->weight;
+ hop.unknown2 = 0;
+ hop.priority = path->priority;
+ hop.drop_packages = path->drop_packages;
+ hop.counter = path->hops[i].in_counter_index;
+ hop.counter_enable = path->hops[i].in_counter_index != -1;
+ hop.ingress_fc = path->ingress_fc_enable & in_mask;
+ hop.egress_fc = path->egress_fc_enable & out_mask;
+ hop.ingress_shared_buffer = path->ingress_shared_buffer
+ & in_mask;
+ hop.egress_shared_buffer = path->egress_shared_buffer
+ & out_mask;
+ hop.unknown3 = 0;
+
+ tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
+ tb_dump_hop(&path->hops[i], &hop);
+ res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
+ 2 * path->hops[i].in_hop_index, 2);
+ if (res) {
+ __tb_path_deactivate_hops(path, i);
+ __tb_path_deallocate_nfc(path, 0);
+ goto err;
+ }
+ }
+ path->activated = true;
+ tb_dbg(path->tb, "path activation complete\n");
+ return 0;
+err:
+ tb_WARN(path->tb, "path activation failed\n");
+ return res;
+}
+
+/**
+ * tb_path_is_invalid() - check whether any ports on the path are invalid
+ *
+ * Return: Returns true if the path is invalid, false otherwise.
+ */
+bool tb_path_is_invalid(struct tb_path *path)
+{
+ int i = 0;
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port->sw->is_unplugged)
+ return true;
+ if (path->hops[i].out_port->sw->is_unplugged)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tb_path_port_on_path() - Does the path go through certain port
+ * @path: Path to check
+ * @port: Switch to check
+ *
+ * Goes over all hops on path and checks if @port is any of them.
+ * Direction does not matter.
+ */
+bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
+{
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ if (path->hops[i].in_port == port ||
+ path->hops[i].out_port == port)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
new file mode 100644
index 000000000..d5b0cdb8f
--- /dev/null
+++ b/drivers/thunderbolt/property.c
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt XDomain property support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+#include <linux/thunderbolt.h>
+
+struct tb_property_entry {
+ u32 key_hi;
+ u32 key_lo;
+ u16 length;
+ u8 reserved;
+ u8 type;
+ u32 value;
+};
+
+struct tb_property_rootdir_entry {
+ u32 magic;
+ u32 length;
+ struct tb_property_entry entries[];
+};
+
+struct tb_property_dir_entry {
+ u32 uuid[4];
+ struct tb_property_entry entries[];
+};
+
+#define TB_PROPERTY_ROOTDIR_MAGIC 0x55584401
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+ size_t block_len, unsigned int dir_offset, size_t dir_len,
+ bool is_root);
+
+static inline void parse_dwdata(void *dst, const void *src, size_t dwords)
+{
+ be32_to_cpu_array(dst, src, dwords);
+}
+
+static inline void format_dwdata(void *dst, const void *src, size_t dwords)
+{
+ cpu_to_be32_array(dst, src, dwords);
+}
+
+static bool tb_property_entry_valid(const struct tb_property_entry *entry,
+ size_t block_len)
+{
+ switch (entry->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ case TB_PROPERTY_TYPE_DATA:
+ case TB_PROPERTY_TYPE_TEXT:
+ if (entry->length > block_len)
+ return false;
+ if (entry->value + entry->length > block_len)
+ return false;
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ if (entry->length != 1)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool tb_property_key_valid(const char *key)
+{
+ return key && strlen(key) <= TB_PROPERTY_KEY_SIZE;
+}
+
+static struct tb_property *
+tb_property_alloc(const char *key, enum tb_property_type type)
+{
+ struct tb_property *property;
+
+ property = kzalloc(sizeof(*property), GFP_KERNEL);
+ if (!property)
+ return NULL;
+
+ strcpy(property->key, key);
+ property->type = type;
+ INIT_LIST_HEAD(&property->list);
+
+ return property;
+}
+
+static struct tb_property *tb_property_parse(const u32 *block, size_t block_len,
+ const struct tb_property_entry *entry)
+{
+ char key[TB_PROPERTY_KEY_SIZE + 1];
+ struct tb_property *property;
+ struct tb_property_dir *dir;
+
+ if (!tb_property_entry_valid(entry, block_len))
+ return NULL;
+
+ parse_dwdata(key, entry, 2);
+ key[TB_PROPERTY_KEY_SIZE] = '\0';
+
+ property = tb_property_alloc(key, entry->type);
+ if (!property)
+ return NULL;
+
+ property->length = entry->length;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ dir = __tb_property_parse_dir(block, block_len, entry->value,
+ entry->length, false);
+ if (!dir) {
+ kfree(property);
+ return NULL;
+ }
+ property->value.dir = dir;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ property->value.data = kcalloc(property->length, sizeof(u32),
+ GFP_KERNEL);
+ if (!property->value.data) {
+ kfree(property);
+ return NULL;
+ }
+ parse_dwdata(property->value.data, block + entry->value,
+ entry->length);
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ property->value.text = kcalloc(property->length, sizeof(u32),
+ GFP_KERNEL);
+ if (!property->value.text) {
+ kfree(property);
+ return NULL;
+ }
+ parse_dwdata(property->value.text, block + entry->value,
+ entry->length);
+ /* Force null termination */
+ property->value.text[property->length * 4 - 1] = '\0';
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ property->value.immediate = entry->value;
+ break;
+
+ default:
+ property->type = TB_PROPERTY_TYPE_UNKNOWN;
+ break;
+ }
+
+ return property;
+}
+
+static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
+ size_t block_len, unsigned int dir_offset, size_t dir_len, bool is_root)
+{
+ const struct tb_property_entry *entries;
+ size_t i, content_len, nentries;
+ unsigned int content_offset;
+ struct tb_property_dir *dir;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ if (is_root) {
+ content_offset = dir_offset + 2;
+ content_len = dir_len;
+ } else {
+ dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
+ GFP_KERNEL);
+ if (!dir->uuid) {
+ tb_property_free_dir(dir);
+ return NULL;
+ }
+ content_offset = dir_offset + 4;
+ content_len = dir_len - 4; /* Length includes UUID */
+ }
+
+ entries = (const struct tb_property_entry *)&block[content_offset];
+ nentries = content_len / (sizeof(*entries) / 4);
+
+ INIT_LIST_HEAD(&dir->properties);
+
+ for (i = 0; i < nentries; i++) {
+ struct tb_property *property;
+
+ property = tb_property_parse(block, block_len, &entries[i]);
+ if (!property) {
+ tb_property_free_dir(dir);
+ return NULL;
+ }
+
+ list_add_tail(&property->list, &dir->properties);
+ }
+
+ return dir;
+}
+
+/**
+ * tb_property_parse_dir() - Parses properties from given property block
+ * @block: Property block to parse
+ * @block_len: Number of dword elements in the property block
+ *
+ * This function parses the XDomain properties data block into format that
+ * can be traversed using the helper functions provided by this module.
+ * Upon success returns the parsed directory. In case of error returns
+ * %NULL. The resulting &struct tb_property_dir needs to be released by
+ * calling tb_property_free_dir() when not needed anymore.
+ *
+ * The @block is expected to be root directory.
+ */
+struct tb_property_dir *tb_property_parse_dir(const u32 *block,
+ size_t block_len)
+{
+ const struct tb_property_rootdir_entry *rootdir =
+ (const struct tb_property_rootdir_entry *)block;
+
+ if (rootdir->magic != TB_PROPERTY_ROOTDIR_MAGIC)
+ return NULL;
+ if (rootdir->length > block_len)
+ return NULL;
+
+ return __tb_property_parse_dir(block, block_len, 0, rootdir->length,
+ true);
+}
+
+/**
+ * tb_property_create_dir() - Creates new property directory
+ * @uuid: UUID used to identify the particular directory
+ *
+ * Creates new, empty property directory. If @uuid is %NULL then the
+ * directory is assumed to be root directory.
+ */
+struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
+{
+ struct tb_property_dir *dir;
+
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+ if (!dir)
+ return NULL;
+
+ INIT_LIST_HEAD(&dir->properties);
+ if (uuid) {
+ dir->uuid = kmemdup(uuid, sizeof(*dir->uuid), GFP_KERNEL);
+ if (!dir->uuid) {
+ kfree(dir);
+ return NULL;
+ }
+ }
+
+ return dir;
+}
+EXPORT_SYMBOL_GPL(tb_property_create_dir);
+
+static void tb_property_free(struct tb_property *property)
+{
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ tb_property_free_dir(property->value.dir);
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ kfree(property->value.data);
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ kfree(property->value.text);
+ break;
+
+ default:
+ break;
+ }
+
+ kfree(property);
+}
+
+/**
+ * tb_property_free_dir() - Release memory allocated for property directory
+ * @dir: Directory to release
+ *
+ * This will release all the memory the directory occupies including all
+ * descendants. It is OK to pass %NULL @dir, then the function does
+ * nothing.
+ */
+void tb_property_free_dir(struct tb_property_dir *dir)
+{
+ struct tb_property *property, *tmp;
+
+ if (!dir)
+ return;
+
+ list_for_each_entry_safe(property, tmp, &dir->properties, list) {
+ list_del(&property->list);
+ tb_property_free(property);
+ }
+ kfree(dir->uuid);
+ kfree(dir);
+}
+EXPORT_SYMBOL_GPL(tb_property_free_dir);
+
+static size_t tb_property_dir_length(const struct tb_property_dir *dir,
+ bool recurse, size_t *data_len)
+{
+ const struct tb_property *property;
+ size_t len = 0;
+
+ if (dir->uuid)
+ len += sizeof(*dir->uuid) / 4;
+ else
+ len += sizeof(struct tb_property_rootdir_entry) / 4;
+
+ list_for_each_entry(property, &dir->properties, list) {
+ len += sizeof(struct tb_property_entry) / 4;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ if (recurse) {
+ len += tb_property_dir_length(
+ property->value.dir, recurse, data_len);
+ }
+ /* Reserve dword padding after each directory */
+ if (data_len)
+ *data_len += 1;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ case TB_PROPERTY_TYPE_TEXT:
+ if (data_len)
+ *data_len += property->length;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return len;
+}
+
+static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
+ u32 *block, unsigned int start_offset, size_t block_len)
+{
+ unsigned int data_offset, dir_end;
+ const struct tb_property *property;
+ struct tb_property_entry *entry;
+ size_t dir_len, data_len = 0;
+ int ret;
+
+ /*
+ * The structure of property block looks like following. Leaf
+ * data/text is included right after the directory and each
+ * directory follows each other (even nested ones).
+ *
+ * +----------+ <-- start_offset
+ * | header | <-- root directory header
+ * +----------+ ---
+ * | entry 0 | -^--------------------.
+ * +----------+ | |
+ * | entry 1 | -|--------------------|--.
+ * +----------+ | | |
+ * | entry 2 | -|-----------------. | |
+ * +----------+ | | | |
+ * : : | dir_len | | |
+ * . . | | | |
+ * : : | | | |
+ * +----------+ | | | |
+ * | entry n | v | | |
+ * +----------+ <-- data_offset | | |
+ * | data 0 | <------------------|--' |
+ * +----------+ | |
+ * | data 1 | <------------------|-----'
+ * +----------+ |
+ * | 00000000 | padding |
+ * +----------+ <-- dir_end <------'
+ * | UUID | <-- directory UUID (child directory)
+ * +----------+
+ * | entry 0 |
+ * +----------+
+ * | entry 1 |
+ * +----------+
+ * : :
+ * . .
+ * : :
+ * +----------+
+ * | entry n |
+ * +----------+
+ * | data 0 |
+ * +----------+
+ *
+ * We use dir_end to hold pointer to the end of the directory. It
+ * will increase as we add directories and each directory should be
+ * added starting from previous dir_end.
+ */
+ dir_len = tb_property_dir_length(dir, false, &data_len);
+ data_offset = start_offset + dir_len;
+ dir_end = start_offset + data_len + dir_len;
+
+ if (data_offset > dir_end)
+ return -EINVAL;
+ if (dir_end > block_len)
+ return -EINVAL;
+
+ /* Write headers first */
+ if (dir->uuid) {
+ struct tb_property_dir_entry *pe;
+
+ pe = (struct tb_property_dir_entry *)&block[start_offset];
+ memcpy(pe->uuid, dir->uuid, sizeof(pe->uuid));
+ entry = pe->entries;
+ } else {
+ struct tb_property_rootdir_entry *re;
+
+ re = (struct tb_property_rootdir_entry *)&block[start_offset];
+ re->magic = TB_PROPERTY_ROOTDIR_MAGIC;
+ re->length = dir_len - sizeof(*re) / 4;
+ entry = re->entries;
+ }
+
+ list_for_each_entry(property, &dir->properties, list) {
+ const struct tb_property_dir *child;
+
+ format_dwdata(entry, property->key, 2);
+ entry->type = property->type;
+
+ switch (property->type) {
+ case TB_PROPERTY_TYPE_DIRECTORY:
+ child = property->value.dir;
+ ret = __tb_property_format_dir(child, block, dir_end,
+ block_len);
+ if (ret < 0)
+ return ret;
+ entry->length = tb_property_dir_length(child, false,
+ NULL);
+ entry->value = dir_end;
+ dir_end = ret;
+ break;
+
+ case TB_PROPERTY_TYPE_DATA:
+ format_dwdata(&block[data_offset], property->value.data,
+ property->length);
+ entry->length = property->length;
+ entry->value = data_offset;
+ data_offset += entry->length;
+ break;
+
+ case TB_PROPERTY_TYPE_TEXT:
+ format_dwdata(&block[data_offset], property->value.text,
+ property->length);
+ entry->length = property->length;
+ entry->value = data_offset;
+ data_offset += entry->length;
+ break;
+
+ case TB_PROPERTY_TYPE_VALUE:
+ entry->length = property->length;
+ entry->value = property->value.immediate;
+ break;
+
+ default:
+ break;
+ }
+
+ entry++;
+ }
+
+ return dir_end;
+}
+
+/**
+ * tb_property_format_dir() - Formats directory to the packed XDomain format
+ * @dir: Directory to format
+ * @block: Property block where the packed data is placed
+ * @block_len: Length of the property block
+ *
+ * This function formats the directory to the packed format that can be
+ * then send over the thunderbolt fabric to receiving host. Returns %0 in
+ * case of success and negative errno on faulure. Passing %NULL in @block
+ * returns number of entries the block takes.
+ */
+ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
+ size_t block_len)
+{
+ ssize_t ret;
+
+ if (!block) {
+ size_t dir_len, data_len = 0;
+
+ dir_len = tb_property_dir_length(dir, true, &data_len);
+ return dir_len + data_len;
+ }
+
+ ret = __tb_property_format_dir(dir, block, 0, block_len);
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * tb_property_add_immediate() - Add immediate property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @value: Immediate value to store with the property
+ */
+int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
+ u32 value)
+{
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_VALUE);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = 1;
+ property->value.immediate = value;
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_immediate);
+
+/**
+ * tb_property_add_data() - Adds arbitrary data property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @buf: Data buffer to add
+ * @buflen: Number of bytes in the data buffer
+ *
+ * Function takes a copy of @buf and adds it to the directory.
+ */
+int tb_property_add_data(struct tb_property_dir *parent, const char *key,
+ const void *buf, size_t buflen)
+{
+ /* Need to pad to dword boundary */
+ size_t size = round_up(buflen, 4);
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_DATA);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = size / 4;
+ property->value.data = kzalloc(size, GFP_KERNEL);
+ if (!property->value.data) {
+ kfree(property);
+ return -ENOMEM;
+ }
+
+ memcpy(property->value.data, buf, buflen);
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_data);
+
+/**
+ * tb_property_add_text() - Adds string property to directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @text: String to add
+ *
+ * Function takes a copy of @text and adds it to the directory.
+ */
+int tb_property_add_text(struct tb_property_dir *parent, const char *key,
+ const char *text)
+{
+ /* Need to pad to dword boundary */
+ size_t size = round_up(strlen(text) + 1, 4);
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_TEXT);
+ if (!property)
+ return -ENOMEM;
+
+ property->length = size / 4;
+ property->value.text = kzalloc(size, GFP_KERNEL);
+ if (!property->value.text) {
+ kfree(property);
+ return -ENOMEM;
+ }
+
+ strcpy(property->value.text, text);
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_text);
+
+/**
+ * tb_property_add_dir() - Adds a directory to the parent directory
+ * @parent: Directory to add the property
+ * @key: Key for the property
+ * @dir: Directory to add
+ */
+int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
+ struct tb_property_dir *dir)
+{
+ struct tb_property *property;
+
+ if (!tb_property_key_valid(key))
+ return -EINVAL;
+
+ property = tb_property_alloc(key, TB_PROPERTY_TYPE_DIRECTORY);
+ if (!property)
+ return -ENOMEM;
+
+ property->value.dir = dir;
+
+ list_add_tail(&property->list, &parent->properties);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_property_add_dir);
+
+/**
+ * tb_property_remove() - Removes property from a parent directory
+ * @property: Property to remove
+ *
+ * Note memory for @property is released as well so it is not allowed to
+ * touch the object after call to this function.
+ */
+void tb_property_remove(struct tb_property *property)
+{
+ list_del(&property->list);
+ kfree(property);
+}
+EXPORT_SYMBOL_GPL(tb_property_remove);
+
+/**
+ * tb_property_find() - Find a property from a directory
+ * @dir: Directory where the property is searched
+ * @key: Key to look for
+ * @type: Type of the property
+ *
+ * Finds and returns property from the given directory. Does not recurse
+ * into sub-directories. Returns %NULL if the property was not found.
+ */
+struct tb_property *tb_property_find(struct tb_property_dir *dir,
+ const char *key, enum tb_property_type type)
+{
+ struct tb_property *property;
+
+ list_for_each_entry(property, &dir->properties, list) {
+ if (property->type == type && !strcmp(property->key, key))
+ return property;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(tb_property_find);
+
+/**
+ * tb_property_get_next() - Get next property from directory
+ * @dir: Directory holding properties
+ * @prev: Previous property in the directory (%NULL returns the first)
+ */
+struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
+ struct tb_property *prev)
+{
+ if (prev) {
+ if (list_is_last(&prev->list, &dir->properties))
+ return NULL;
+ return list_next_entry(prev, list);
+ }
+ return list_first_entry_or_null(&dir->properties, struct tb_property,
+ list);
+}
+EXPORT_SYMBOL_GPL(tb_property_get_next);
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
new file mode 100644
index 000000000..57e2978a3
--- /dev/null
+++ b/drivers/thunderbolt/quirks.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - quirks
+ *
+ * Copyright (c) 2020 Mario Limonciello <mario.limonciello@dell.com>
+ */
+
+#include "tb.h"
+
+static void quirk_force_power_link(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
+}
+
+struct tb_quirk {
+ u16 vendor;
+ u16 device;
+ void (*hook)(struct tb_switch *sw);
+};
+
+static const struct tb_quirk tb_quirks[] = {
+ /* Dell WD19TB supports self-authentication on unplug */
+ { 0x00d4, 0xb070, quirk_force_power_link },
+};
+
+/**
+ * tb_check_quirks() - Check for quirks to apply
+ * @sw: Thunderbolt switch
+ *
+ * Apply any quirks for the Thunderbolt controller.
+ */
+void tb_check_quirks(struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) {
+ const struct tb_quirk *q = &tb_quirks[i];
+
+ if (sw->device == q->device && sw->vendor == q->vendor)
+ q->hook(sw);
+ }
+}
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
new file mode 100644
index 000000000..c44fad2b9
--- /dev/null
+++ b/drivers/thunderbolt/retimer.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt/USB4 retimer support.
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched/signal.h>
+
+#include "sb_regs.h"
+#include "tb.h"
+
+#define TB_MAX_RETIMER_INDEX 6
+
+static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_nvm *nvm = priv;
+ struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+ int ret;
+
+ pm_runtime_get_sync(&rt->dev);
+
+ if (!mutex_trylock(&rt->tb->lock)) {
+ ret = restart_syscall();
+ goto out;
+ }
+
+ ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
+ mutex_unlock(&rt->tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_put_autosuspend(&rt->dev);
+
+ return ret;
+}
+
+static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_nvm *nvm = priv;
+ struct tb_retimer *rt = tb_to_retimer(nvm->dev);
+ int ret = 0;
+
+ if (!mutex_trylock(&rt->tb->lock))
+ return restart_syscall();
+
+ ret = tb_nvm_write_buf(nvm, offset, val, bytes);
+ mutex_unlock(&rt->tb->lock);
+
+ return ret;
+}
+
+static int tb_retimer_nvm_add(struct tb_retimer *rt)
+{
+ struct tb_nvm *nvm;
+ u32 val, nvm_size;
+ int ret;
+
+ nvm = tb_nvm_alloc(&rt->dev);
+ if (IS_ERR(nvm))
+ return PTR_ERR(nvm);
+
+ ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
+ sizeof(val));
+ if (ret)
+ goto err_nvm;
+
+ nvm->major = val >> 16;
+ nvm->minor = val >> 8;
+
+ ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
+ &val, sizeof(val));
+ if (ret)
+ goto err_nvm;
+
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - SZ_16K) / 2;
+
+ ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
+ if (ret)
+ goto err_nvm;
+
+ ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
+ if (ret)
+ goto err_nvm;
+
+ rt->nvm = nvm;
+ return 0;
+
+err_nvm:
+ tb_nvm_free(nvm);
+ return ret;
+}
+
+static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
+{
+ unsigned int image_size, hdr_size;
+ const u8 *buf = rt->nvm->buf;
+ u16 ds_size, device;
+
+ image_size = rt->nvm->buf_data_size;
+ if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+ return -EINVAL;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ /*
+ * Make sure the device ID in the image matches the retimer
+ * hardware.
+ */
+ device = *(u16 *)(buf + hdr_size + NVM_DEVID);
+ if (device != rt->device)
+ return -EINVAL;
+
+ /* Skip headers in the image */
+ buf += hdr_size;
+ image_size -= hdr_size;
+
+ return usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
+ image_size);
+}
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ return sprintf(buf, "%#x\n", rt->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t nvm_authenticate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+ int ret;
+
+ if (!mutex_trylock(&rt->tb->lock))
+ return restart_syscall();
+
+ if (!rt->nvm)
+ ret = -EAGAIN;
+ else
+ ret = sprintf(buf, "%#x\n", rt->auth_status);
+
+ mutex_unlock(&rt->tb->lock);
+
+ return ret;
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+ bool val;
+ int ret;
+
+ pm_runtime_get_sync(&rt->dev);
+
+ if (!mutex_trylock(&rt->tb->lock)) {
+ ret = restart_syscall();
+ goto exit_rpm;
+ }
+
+ if (!rt->nvm) {
+ ret = -EAGAIN;
+ goto exit_unlock;
+ }
+
+ ret = kstrtobool(buf, &val);
+ if (ret)
+ goto exit_unlock;
+
+ /* Always clear status */
+ rt->auth_status = 0;
+
+ if (val) {
+ if (!rt->nvm->buf) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ret = tb_retimer_nvm_validate_and_write(rt);
+ if (ret)
+ goto exit_unlock;
+
+ ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
+ }
+
+exit_unlock:
+ mutex_unlock(&rt->tb->lock);
+exit_rpm:
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_put_autosuspend(&rt->dev);
+
+ if (ret)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+ int ret;
+
+ if (!mutex_trylock(&rt->tb->lock))
+ return restart_syscall();
+
+ if (!rt->nvm)
+ ret = -EAGAIN;
+ else
+ ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
+
+ mutex_unlock(&rt->tb->lock);
+ return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ return sprintf(buf, "%#x\n", rt->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static struct attribute *retimer_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_nvm_authenticate.attr,
+ &dev_attr_nvm_version.attr,
+ &dev_attr_vendor.attr,
+ NULL
+};
+
+static const struct attribute_group retimer_group = {
+ .attrs = retimer_attrs,
+};
+
+static const struct attribute_group *retimer_groups[] = {
+ &retimer_group,
+ NULL
+};
+
+static void tb_retimer_release(struct device *dev)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ kfree(rt);
+}
+
+struct device_type tb_retimer_type = {
+ .name = "thunderbolt_retimer",
+ .groups = retimer_groups,
+ .release = tb_retimer_release,
+};
+
+static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
+{
+ struct tb_retimer *rt;
+ u32 vendor, device;
+ int ret;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
+ sizeof(vendor));
+ if (ret) {
+ if (ret != -ENODEV)
+ tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
+ return ret;
+ }
+
+ ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
+ sizeof(device));
+ if (ret) {
+ if (ret != -ENODEV)
+ tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
+ return ret;
+ }
+
+ if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
+ tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
+ vendor);
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Check that it supports NVM operations. If not then don't add
+ * the device at all.
+ */
+ ret = usb4_port_retimer_nvm_sector_size(port, index);
+ if (ret < 0)
+ return ret;
+
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+
+ rt->index = index;
+ rt->vendor = vendor;
+ rt->device = device;
+ rt->auth_status = auth_status;
+ rt->port = port;
+ rt->tb = port->sw->tb;
+
+ rt->dev.parent = &port->sw->dev;
+ rt->dev.bus = &tb_bus_type;
+ rt->dev.type = &tb_retimer_type;
+ dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
+ port->port, index);
+
+ ret = device_register(&rt->dev);
+ if (ret) {
+ dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
+ put_device(&rt->dev);
+ return ret;
+ }
+
+ ret = tb_retimer_nvm_add(rt);
+ if (ret) {
+ dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
+ device_unregister(&rt->dev);
+ return ret;
+ }
+
+ dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
+ rt->vendor, rt->device);
+
+ pm_runtime_no_callbacks(&rt->dev);
+ pm_runtime_set_active(&rt->dev);
+ pm_runtime_enable(&rt->dev);
+ pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_mark_last_busy(&rt->dev);
+ pm_runtime_use_autosuspend(&rt->dev);
+
+ return 0;
+}
+
+static void tb_retimer_remove(struct tb_retimer *rt)
+{
+ dev_info(&rt->dev, "retimer disconnected\n");
+ tb_nvm_free(rt->nvm);
+ device_unregister(&rt->dev);
+}
+
+struct tb_retimer_lookup {
+ const struct tb_port *port;
+ u8 index;
+};
+
+static int retimer_match(struct device *dev, void *data)
+{
+ const struct tb_retimer_lookup *lookup = data;
+ struct tb_retimer *rt = tb_to_retimer(dev);
+
+ return rt && rt->port == lookup->port && rt->index == lookup->index;
+}
+
+static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
+{
+ struct tb_retimer_lookup lookup = { .port = port, .index = index };
+ struct device *dev;
+
+ dev = device_find_child(&port->sw->dev, &lookup, retimer_match);
+ if (dev)
+ return tb_to_retimer(dev);
+
+ return NULL;
+}
+
+/**
+ * tb_retimer_scan() - Scan for on-board retimers under port
+ * @port: USB4 port to scan
+ *
+ * Tries to enumerate on-board retimers connected to @port. Found
+ * retimers are registered as children of @port. Does not scan for cable
+ * retimers for now.
+ */
+int tb_retimer_scan(struct tb_port *port)
+{
+ u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
+ int ret, i, last_idx = 0;
+
+ if (!port->cap_usb4)
+ return 0;
+
+ /*
+ * Send broadcast RT to make sure retimer indices facing this
+ * port are set.
+ */
+ ret = usb4_port_enumerate_retimers(port);
+ if (ret)
+ return ret;
+
+ /*
+ * Before doing anything else, read the authentication status.
+ * If the retimer has it set, store it for the new retimer
+ * device instance.
+ */
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+ usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
+
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ /*
+ * Last retimer is true only for the last on-board
+ * retimer (the one connected directly to the Type-C
+ * port).
+ */
+ ret = usb4_port_retimer_is_last(port, i);
+ if (ret > 0)
+ last_idx = i;
+ else if (ret < 0)
+ break;
+ }
+
+ if (!last_idx)
+ return 0;
+
+ /* Add on-board retimers if they do not exist already */
+ for (i = 1; i <= last_idx; i++) {
+ struct tb_retimer *rt;
+
+ rt = tb_port_find_retimer(port, i);
+ if (rt) {
+ put_device(&rt->dev);
+ } else {
+ ret = tb_retimer_add(port, i, status[i]);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int remove_retimer(struct device *dev, void *data)
+{
+ struct tb_retimer *rt = tb_to_retimer(dev);
+ struct tb_port *port = data;
+
+ if (rt && rt->port == port)
+ tb_retimer_remove(rt);
+ return 0;
+}
+
+/**
+ * tb_retimer_remove_all() - Remove all retimers under port
+ * @port: USB4 port whose retimers to remove
+ *
+ * This removes all previously added retimers under @port.
+ */
+void tb_retimer_remove_all(struct tb_port *port)
+{
+ if (port->cap_usb4)
+ device_for_each_child_reverse(&port->sw->dev, port,
+ remove_retimer);
+}
diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
new file mode 100644
index 000000000..9dafd6966
--- /dev/null
+++ b/drivers/thunderbolt/sb_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * USB4 port sideband registers found on routers and retimers
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#ifndef _SB_REGS
+#define _SB_REGS
+
+#define USB4_SB_VENDOR_ID 0x00
+#define USB4_SB_PRODUCT_ID 0x01
+#define USB4_SB_OPCODE 0x08
+
+enum usb4_sb_opcode {
+ USB4_SB_OPCODE_ERR = 0x20525245, /* "ERR " */
+ USB4_SB_OPCODE_ONS = 0x444d4321, /* "!CMD" */
+ USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
+ USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
+ USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
+ USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
+ USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */
+ USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */
+ USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */
+};
+
+#define USB4_SB_METADATA 0x09
+#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0)
+#define USB4_SB_DATA 0x12
+
+#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
new file mode 100644
index 000000000..22e5c4de3
--- /dev/null
+++ b/drivers/thunderbolt/switch.c
@@ -0,0 +1,2937 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - switch/port utility functions
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
+#include <linux/sched/signal.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "tb.h"
+
+/* Switch NVM support */
+
+#define NVM_CSS 0x10
+
+struct nvm_auth_status {
+ struct list_head list;
+ uuid_t uuid;
+ u32 status;
+};
+
+enum nvm_write_ops {
+ WRITE_AND_AUTHENTICATE = 1,
+ WRITE_ONLY = 2,
+};
+
+/*
+ * Hold NVM authentication failure status per switch This information
+ * needs to stay around even when the switch gets power cycled so we
+ * keep it separately.
+ */
+static LIST_HEAD(nvm_auth_status_cache);
+static DEFINE_MUTEX(nvm_auth_status_lock);
+
+static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
+{
+ struct nvm_auth_status *st;
+
+ list_for_each_entry(st, &nvm_auth_status_cache, list) {
+ if (uuid_equal(&st->uuid, sw->uuid))
+ return st;
+ }
+
+ return NULL;
+}
+
+static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
+{
+ struct nvm_auth_status *st;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+ mutex_unlock(&nvm_auth_status_lock);
+
+ *status = st ? st->status : 0;
+}
+
+static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
+{
+ struct nvm_auth_status *st;
+
+ if (WARN_ON(!sw->uuid))
+ return;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+
+ if (!st) {
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto unlock;
+
+ memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
+ INIT_LIST_HEAD(&st->list);
+ list_add_tail(&st->list, &nvm_auth_status_cache);
+ }
+
+ st->status = status;
+unlock:
+ mutex_unlock(&nvm_auth_status_lock);
+}
+
+static void nvm_clear_auth_status(const struct tb_switch *sw)
+{
+ struct nvm_auth_status *st;
+
+ mutex_lock(&nvm_auth_status_lock);
+ st = __nvm_get_auth_status(sw);
+ if (st) {
+ list_del(&st->list);
+ kfree(st);
+ }
+ mutex_unlock(&nvm_auth_status_lock);
+}
+
+static int nvm_validate_and_write(struct tb_switch *sw)
+{
+ unsigned int image_size, hdr_size;
+ const u8 *buf = sw->nvm->buf;
+ u16 ds_size;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ image_size = sw->nvm->buf_data_size;
+ if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
+ return -EINVAL;
+
+ /*
+ * FARB pointer must point inside the image and must at least
+ * contain parts of the digital section we will be reading here.
+ */
+ hdr_size = (*(u32 *)buf) & 0xffffff;
+ if (hdr_size + NVM_DEVID + 2 >= image_size)
+ return -EINVAL;
+
+ /* Digital section start should be aligned to 4k page */
+ if (!IS_ALIGNED(hdr_size, SZ_4K))
+ return -EINVAL;
+
+ /*
+ * Read digital section size and check that it also fits inside
+ * the image.
+ */
+ ds_size = *(u16 *)(buf + hdr_size);
+ if (ds_size >= image_size)
+ return -EINVAL;
+
+ if (!sw->safe_mode) {
+ u16 device_id;
+
+ /*
+ * Make sure the device ID in the image matches the one
+ * we read from the switch config space.
+ */
+ device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
+ if (device_id != sw->config.device_id)
+ return -EINVAL;
+
+ if (sw->generation < 3) {
+ /* Write CSS headers first */
+ ret = dma_port_flash_write(sw->dma_port,
+ DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
+ DMA_PORT_CSS_MAX_SIZE);
+ if (ret)
+ return ret;
+ }
+
+ /* Skip headers in the image */
+ buf += hdr_size;
+ image_size -= hdr_size;
+ }
+
+ if (tb_switch_is_usb4(sw))
+ ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
+ else
+ ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
+ if (!ret)
+ sw->nvm->flushed = true;
+ return ret;
+}
+
+static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
+{
+ int ret = 0;
+
+ /*
+ * Root switch NVM upgrade requires that we disconnect the
+ * existing paths first (in case it is not in safe mode
+ * already).
+ */
+ if (!sw->safe_mode) {
+ u32 status;
+
+ ret = tb_domain_disconnect_all_paths(sw->tb);
+ if (ret)
+ return ret;
+ /*
+ * The host controller goes away pretty soon after this if
+ * everything goes well so getting timeout is expected.
+ */
+ ret = dma_port_flash_update_auth(sw->dma_port);
+ if (!ret || ret == -ETIMEDOUT)
+ return 0;
+
+ /*
+ * Any error from update auth operation requires power
+ * cycling of the host router.
+ */
+ tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
+ if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
+ nvm_set_auth_status(sw, status);
+ }
+
+ /*
+ * From safe mode we can get out by just power cycling the
+ * switch.
+ */
+ dma_port_power_cycle(sw->dma_port);
+ return ret;
+}
+
+static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
+{
+ int ret, retries = 10;
+
+ ret = dma_port_flash_update_auth(sw->dma_port);
+ switch (ret) {
+ case 0:
+ case -ETIMEDOUT:
+ case -EACCES:
+ case -EINVAL:
+ /* Power cycle is required */
+ break;
+ default:
+ return ret;
+ }
+
+ /*
+ * Poll here for the authentication status. It takes some time
+ * for the device to respond (we get timeout for a while). Once
+ * we get response the device needs to be power cycled in order
+ * to the new NVM to be taken into use.
+ */
+ do {
+ u32 status;
+
+ ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+ if (ret < 0 && ret != -ETIMEDOUT)
+ return ret;
+ if (ret > 0) {
+ if (status) {
+ tb_sw_warn(sw, "failed to authenticate NVM\n");
+ nvm_set_auth_status(sw, status);
+ }
+
+ tb_sw_info(sw, "power cycling the switch now\n");
+ dma_port_power_cycle(sw->dma_port);
+ return 0;
+ }
+
+ msleep(500);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ /*
+ * During host router NVM upgrade we should not allow root port to
+ * go into D3cold because some root ports cannot trigger PME
+ * itself. To be on the safe side keep the root port in D0 during
+ * the whole upgrade process.
+ */
+ root_port = pcie_find_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ root_port = pcie_find_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_put(&root_port->dev);
+}
+
+static inline bool nvm_readable(struct tb_switch *sw)
+{
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * USB4 devices must support NVM operations but it is
+ * optional for hosts. Therefore we query the NVM sector
+ * size here and if it is supported assume NVM
+ * operations are implemented.
+ */
+ return usb4_switch_nvm_sector_size(sw) > 0;
+ }
+
+ /* Thunderbolt 2 and 3 devices support NVM through DMA port */
+ return !!sw->dma_port;
+}
+
+static inline bool nvm_upgradeable(struct tb_switch *sw)
+{
+ if (sw->no_nvm_upgrade)
+ return false;
+ return nvm_readable(sw);
+}
+
+static inline int nvm_read(struct tb_switch *sw, unsigned int address,
+ void *buf, size_t size)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_read(sw, address, buf, size);
+ return dma_port_flash_read(sw->dma_port, address, buf, size);
+}
+
+static int nvm_authenticate(struct tb_switch *sw)
+{
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_nvm_authenticate(sw);
+
+ if (!tb_route(sw)) {
+ nvm_authenticate_start_dma_port(sw);
+ ret = nvm_authenticate_host_dma_port(sw);
+ } else {
+ ret = nvm_authenticate_device_dma_port(sw);
+ }
+
+ return ret;
+}
+
+static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_nvm *nvm = priv;
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (!mutex_trylock(&sw->tb->lock)) {
+ ret = restart_syscall();
+ goto out;
+ }
+
+ ret = nvm_read(sw, offset, val, bytes);
+ mutex_unlock(&sw->tb->lock);
+
+out:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+
+static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct tb_nvm *nvm = priv;
+ struct tb_switch *sw = tb_to_switch(nvm->dev);
+ int ret;
+
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
+
+ /*
+ * Since writing the NVM image might require some special steps,
+ * for example when CSS headers are written, we cache the image
+ * locally here and handle the special cases when the user asks
+ * us to authenticate the image.
+ */
+ ret = tb_nvm_write_buf(nvm, offset, val, bytes);
+ mutex_unlock(&sw->tb->lock);
+
+ return ret;
+}
+
+static int tb_switch_nvm_add(struct tb_switch *sw)
+{
+ struct tb_nvm *nvm;
+ u32 val;
+ int ret;
+
+ if (!nvm_readable(sw))
+ return 0;
+
+ /*
+ * The NVM format of non-Intel hardware is not known so
+ * currently restrict NVM upgrade for Intel hardware. We may
+ * relax this in the future when we learn other NVM formats.
+ */
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL &&
+ sw->config.vendor_id != 0x8087) {
+ dev_info(&sw->dev,
+ "NVM format of vendor %#x is not known, disabling NVM upgrade\n",
+ sw->config.vendor_id);
+ return 0;
+ }
+
+ nvm = tb_nvm_alloc(&sw->dev);
+ if (IS_ERR(nvm))
+ return PTR_ERR(nvm);
+
+ /*
+ * If the switch is in safe-mode the only accessible portion of
+ * the NVM is the non-active one where userspace is expected to
+ * write new functional NVM.
+ */
+ if (!sw->safe_mode) {
+ u32 nvm_size, hdr_size;
+
+ ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val));
+ if (ret)
+ goto err_nvm;
+
+ hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
+ nvm_size = (SZ_1M << (val & 7)) / 8;
+ nvm_size = (nvm_size - hdr_size) / 2;
+
+ ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val));
+ if (ret)
+ goto err_nvm;
+
+ nvm->major = val >> 16;
+ nvm->minor = val >> 8;
+
+ ret = tb_nvm_add_active(nvm, nvm_size, tb_switch_nvm_read);
+ if (ret)
+ goto err_nvm;
+ }
+
+ if (!sw->no_nvm_upgrade) {
+ ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE,
+ tb_switch_nvm_write);
+ if (ret)
+ goto err_nvm;
+ }
+
+ sw->nvm = nvm;
+ return 0;
+
+err_nvm:
+ tb_nvm_free(nvm);
+ return ret;
+}
+
+static void tb_switch_nvm_remove(struct tb_switch *sw)
+{
+ struct tb_nvm *nvm;
+
+ nvm = sw->nvm;
+ sw->nvm = NULL;
+
+ if (!nvm)
+ return;
+
+ /* Remove authentication status in case the switch is unplugged */
+ if (!nvm->authenticating)
+ nvm_clear_auth_status(sw);
+
+ tb_nvm_free(nvm);
+}
+
+/* port utility functions */
+
+static const char *tb_port_type(struct tb_regs_port_header *port)
+{
+ switch (port->type >> 16) {
+ case 0:
+ switch ((u8) port->type) {
+ case 0:
+ return "Inactive";
+ case 1:
+ return "Port";
+ case 2:
+ return "NHI";
+ default:
+ return "unknown";
+ }
+ case 0x2:
+ return "Ethernet";
+ case 0x8:
+ return "SATA";
+ case 0xe:
+ return "DP/HDMI";
+ case 0x10:
+ return "PCIe";
+ case 0x20:
+ return "USB";
+ default:
+ return "unknown";
+ }
+}
+
+static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
+{
+ tb_dbg(tb,
+ " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+ port->port_number, port->vendor_id, port->device_id,
+ port->revision, port->thunderbolt_version, tb_port_type(port),
+ port->type);
+ tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
+ port->max_in_hop_id, port->max_out_hop_id);
+ tb_dbg(tb, " Max counters: %d\n", port->max_counters);
+ tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
+}
+
+/**
+ * tb_port_state() - get connectedness state of a port
+ *
+ * The port must have a TB_CAP_PHY (i.e. it should be a real port).
+ *
+ * Return: Returns an enum tb_port_state on success or an error code on failure.
+ */
+static int tb_port_state(struct tb_port *port)
+{
+ struct tb_cap_phy phy;
+ int res;
+ if (port->cap_phy == 0) {
+ tb_port_WARN(port, "does not have a PHY\n");
+ return -EINVAL;
+ }
+ res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
+ if (res)
+ return res;
+ return phy.state;
+}
+
+/**
+ * tb_wait_for_port() - wait for a port to become ready
+ *
+ * Wait up to 1 second for a port to reach state TB_PORT_UP. If
+ * wait_if_unplugged is set then we also wait if the port is in state
+ * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
+ * switch resume). Otherwise we only wait if a device is registered but the link
+ * has not yet been established.
+ *
+ * Return: Returns an error code on failure. Returns 0 if the port is not
+ * connected or failed to reach state TB_PORT_UP within one second. Returns 1
+ * if the port is connected and in state TB_PORT_UP.
+ */
+int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
+{
+ int retries = 10;
+ int state;
+ if (!port->cap_phy) {
+ tb_port_WARN(port, "does not have PHY\n");
+ return -EINVAL;
+ }
+ if (tb_is_upstream_port(port)) {
+ tb_port_WARN(port, "is the upstream port\n");
+ return -EINVAL;
+ }
+
+ while (retries--) {
+ state = tb_port_state(port);
+ if (state < 0)
+ return state;
+ if (state == TB_PORT_DISABLED) {
+ tb_port_dbg(port, "is disabled (state: 0)\n");
+ return 0;
+ }
+ if (state == TB_PORT_UNPLUGGED) {
+ if (wait_if_unplugged) {
+ /* used during resume */
+ tb_port_dbg(port,
+ "is unplugged (state: 7), retrying...\n");
+ msleep(100);
+ continue;
+ }
+ tb_port_dbg(port, "is unplugged (state: 7)\n");
+ return 0;
+ }
+ if (state == TB_PORT_UP) {
+ tb_port_dbg(port, "is connected, link is up (state: 2)\n");
+ return 1;
+ }
+
+ /*
+ * After plug-in the state is TB_PORT_CONNECTING. Give it some
+ * time.
+ */
+ tb_port_dbg(port,
+ "is connected, link is not up (state: %d), retrying...\n",
+ state);
+ msleep(100);
+ }
+ tb_port_warn(port,
+ "failed to reach state TB_PORT_UP. Ignoring port...\n");
+ return 0;
+}
+
+/**
+ * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
+ *
+ * Change the number of NFC credits allocated to @port by @credits. To remove
+ * NFC credits pass a negative amount of credits.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_port_add_nfc_credits(struct tb_port *port, int credits)
+{
+ u32 nfc_credits;
+
+ if (credits == 0 || port->sw->is_unplugged)
+ return 0;
+
+ /*
+ * USB4 restricts programming NFC buffers to lane adapters only
+ * so skip other ports.
+ */
+ if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
+ return 0;
+
+ nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
+ nfc_credits += credits;
+
+ tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
+ port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
+
+ port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
+ port->config.nfc_credits |= nfc_credits;
+
+ return tb_port_write(port, &port->config.nfc_credits,
+ TB_CFG_PORT, ADP_CS_4, 1);
+}
+
+/**
+ * tb_port_set_initial_credits() - Set initial port link credits allocated
+ * @port: Port to set the initial credits
+ * @credits: Number of credits to to allocate
+ *
+ * Set initial credits value to be used for ingress shared buffering.
+ */
+int tb_port_set_initial_credits(struct tb_port *port, u32 credits)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
+ if (ret)
+ return ret;
+
+ data &= ~ADP_CS_5_LCA_MASK;
+ data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK;
+
+ return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1);
+}
+
+/**
+ * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_port_clear_counter(struct tb_port *port, int counter)
+{
+ u32 zero[3] = { 0, 0, 0 };
+ tb_port_dbg(port, "clearing counter %d\n", counter);
+ return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
+}
+
+/**
+ * tb_port_unlock() - Unlock downstream port
+ * @port: Port to unlock
+ *
+ * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
+ * downstream router accessible for CM.
+ */
+int tb_port_unlock(struct tb_port *port)
+{
+ if (tb_switch_is_icm(port->sw))
+ return 0;
+ if (!tb_port_is_null(port))
+ return -EINVAL;
+ if (tb_switch_is_usb4(port->sw))
+ return usb4_port_unlock(port);
+ return 0;
+}
+
+static int __tb_port_enable(struct tb_port *port, bool enable)
+{
+ int ret;
+ u32 phy;
+
+ if (!tb_port_is_null(port))
+ return -EINVAL;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy &= ~LANE_ADP_CS_1_LD;
+ else
+ phy |= LANE_ADP_CS_1_LD;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+/**
+ * tb_port_enable() - Enable lane adapter
+ * @port: Port to enable (can be %NULL)
+ *
+ * This is used for lane 0 and 1 adapters to enable it.
+ */
+int tb_port_enable(struct tb_port *port)
+{
+ return __tb_port_enable(port, true);
+}
+
+/**
+ * tb_port_disable() - Disable lane adapter
+ * @port: Port to disable (can be %NULL)
+ *
+ * This is used for lane 0 and 1 adapters to disable it.
+ */
+int tb_port_disable(struct tb_port *port)
+{
+ return __tb_port_enable(port, false);
+}
+
+/**
+ * tb_init_port() - initialize a port
+ *
+ * This is a helper method for tb_switch_alloc. Does not check or initialize
+ * any downstream switches.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_init_port(struct tb_port *port)
+{
+ int res;
+ int cap;
+
+ res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
+ if (res) {
+ if (res == -ENODEV) {
+ tb_dbg(port->sw->tb, " Port %d: not implemented\n",
+ port->port);
+ port->disabled = true;
+ return 0;
+ }
+ return res;
+ }
+
+ /* Port 0 is the switch itself and has no PHY. */
+ if (port->config.type == TB_TYPE_PORT && port->port != 0) {
+ cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
+
+ if (cap > 0)
+ port->cap_phy = cap;
+ else
+ tb_port_WARN(port, "non switch port without a PHY\n");
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
+ if (cap > 0)
+ port->cap_usb4 = cap;
+ } else if (port->port != 0) {
+ cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
+ if (cap > 0)
+ port->cap_adap = cap;
+ }
+
+ tb_dump_port(port->sw->tb, &port->config);
+
+ INIT_LIST_HEAD(&port->list);
+ return 0;
+
+}
+
+static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
+ int max_hopid)
+{
+ int port_max_hopid;
+ struct ida *ida;
+
+ if (in) {
+ port_max_hopid = port->config.max_in_hop_id;
+ ida = &port->in_hopids;
+ } else {
+ port_max_hopid = port->config.max_out_hop_id;
+ ida = &port->out_hopids;
+ }
+
+ /*
+ * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
+ * reserved.
+ */
+ if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
+ min_hopid = TB_PATH_MIN_HOPID;
+
+ if (max_hopid < 0 || max_hopid > port_max_hopid)
+ max_hopid = port_max_hopid;
+
+ return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+}
+
+/**
+ * tb_port_alloc_in_hopid() - Allocate input HopID from port
+ * @port: Port to allocate HopID for
+ * @min_hopid: Minimum acceptable input HopID
+ * @max_hopid: Maximum acceptable input HopID
+ *
+ * Return: HopID between @min_hopid and @max_hopid or negative errno in
+ * case of error.
+ */
+int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
+{
+ return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
+}
+
+/**
+ * tb_port_alloc_out_hopid() - Allocate output HopID from port
+ * @port: Port to allocate HopID for
+ * @min_hopid: Minimum acceptable output HopID
+ * @max_hopid: Maximum acceptable output HopID
+ *
+ * Return: HopID between @min_hopid and @max_hopid or negative errno in
+ * case of error.
+ */
+int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
+{
+ return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
+}
+
+/**
+ * tb_port_release_in_hopid() - Release allocated input HopID from port
+ * @port: Port whose HopID to release
+ * @hopid: HopID to release
+ */
+void tb_port_release_in_hopid(struct tb_port *port, int hopid)
+{
+ ida_simple_remove(&port->in_hopids, hopid);
+}
+
+/**
+ * tb_port_release_out_hopid() - Release allocated output HopID from port
+ * @port: Port whose HopID to release
+ * @hopid: HopID to release
+ */
+void tb_port_release_out_hopid(struct tb_port *port, int hopid)
+{
+ ida_simple_remove(&port->out_hopids, hopid);
+}
+
+static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
+ const struct tb_switch *sw)
+{
+ u64 mask = (1ULL << parent->config.depth * 8) - 1;
+ return (tb_route(parent) & mask) == (tb_route(sw) & mask);
+}
+
+/**
+ * tb_next_port_on_path() - Return next port for given port on a path
+ * @start: Start port of the walk
+ * @end: End port of the walk
+ * @prev: Previous port (%NULL if this is the first)
+ *
+ * This function can be used to walk from one port to another if they
+ * are connected through zero or more switches. If the @prev is dual
+ * link port, the function follows that link and returns another end on
+ * that same link.
+ *
+ * If the @end port has been reached, return %NULL.
+ *
+ * Domain tb->lock must be held when this function is called.
+ */
+struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+ struct tb_port *prev)
+{
+ struct tb_port *next;
+
+ if (!prev)
+ return start;
+
+ if (prev->sw == end->sw) {
+ if (prev == end)
+ return NULL;
+ return end;
+ }
+
+ if (tb_switch_is_reachable(prev->sw, end->sw)) {
+ next = tb_port_at(tb_route(end->sw), prev->sw);
+ /* Walk down the topology if next == prev */
+ if (prev->remote &&
+ (next == prev || next->dual_link_port == prev))
+ next = prev->remote;
+ } else {
+ if (tb_is_upstream_port(prev)) {
+ next = prev->remote;
+ } else {
+ next = tb_upstream_port(prev->sw);
+ /*
+ * Keep the same link if prev and next are both
+ * dual link ports.
+ */
+ if (next->dual_link_port &&
+ next->link_nr != prev->link_nr) {
+ next = next->dual_link_port;
+ }
+ }
+ }
+
+ return next != prev ? next : NULL;
+}
+
+/**
+ * tb_port_get_link_speed() - Get current link speed
+ * @port: Port to check (USB4 or CIO)
+ *
+ * Returns link speed in Gb/s or negative errno in case of failure.
+ */
+int tb_port_get_link_speed(struct tb_port *port)
+{
+ u32 val, speed;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
+ LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
+ return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10;
+}
+
+static int tb_port_get_link_width(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
+ LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
+}
+
+static bool tb_port_is_width_supported(struct tb_port *port, int width)
+{
+ u32 phy, widths;
+ int ret;
+
+ if (!port->cap_phy)
+ return false;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+ widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+
+ return !!(widths & width);
+}
+
+static int tb_port_set_link_width(struct tb_port *port, unsigned int width)
+{
+ u32 val;
+ int ret;
+
+ if (!port->cap_phy)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
+ switch (width) {
+ case 1:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ case 2:
+ val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val |= LANE_ADP_CS_1_LB;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_lane_bonding_enable(struct tb_port *port)
+{
+ int ret;
+
+ /*
+ * Enable lane bonding for both links if not already enabled by
+ * for example the boot firmware.
+ */
+ ret = tb_port_get_link_width(port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port, 2);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_port_get_link_width(port->dual_link_port);
+ if (ret == 1) {
+ ret = tb_port_set_link_width(port->dual_link_port, 2);
+ if (ret) {
+ tb_port_set_link_width(port, 1);
+ return ret;
+ }
+ }
+
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
+
+ return 0;
+}
+
+static void tb_port_lane_bonding_disable(struct tb_port *port)
+{
+ port->dual_link_port->bonded = false;
+ port->bonded = false;
+
+ tb_port_set_link_width(port->dual_link_port, 1);
+ tb_port_set_link_width(port, 1);
+}
+
+/**
+ * tb_port_is_enabled() - Is the adapter port enabled
+ * @port: Port to check
+ */
+bool tb_port_is_enabled(struct tb_port *port)
+{
+ switch (port->config.type) {
+ case TB_TYPE_PCIE_UP:
+ case TB_TYPE_PCIE_DOWN:
+ return tb_pci_port_is_enabled(port);
+
+ case TB_TYPE_DP_HDMI_IN:
+ case TB_TYPE_DP_HDMI_OUT:
+ return tb_dp_port_is_enabled(port);
+
+ case TB_TYPE_USB3_UP:
+ case TB_TYPE_USB3_DOWN:
+ return tb_usb3_port_is_enabled(port);
+
+ default:
+ return false;
+ }
+}
+
+/**
+ * tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
+ * @port: USB3 adapter port to check
+ */
+bool tb_usb3_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1))
+ return false;
+
+ return !!(data & ADP_USB3_CS_0_PE);
+}
+
+/**
+ * tb_usb3_port_enable() - Enable USB3 adapter port
+ * @port: USB3 adapter port to enable
+ * @enable: Enable/disable the USB3 adapter
+ */
+int tb_usb3_port_enable(struct tb_port *port, bool enable)
+{
+ u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
+ : ADP_USB3_CS_0_V;
+
+ if (!port->cap_adap)
+ return -ENXIO;
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_0, 1);
+}
+
+/**
+ * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
+ * @port: PCIe port to check
+ */
+bool tb_pci_port_is_enabled(struct tb_port *port)
+{
+ u32 data;
+
+ if (tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1))
+ return false;
+
+ return !!(data & ADP_PCIE_CS_0_PE);
+}
+
+/**
+ * tb_pci_port_enable() - Enable PCIe adapter port
+ * @port: PCIe port to enable
+ * @enable: Enable/disable the PCIe adapter
+ */
+int tb_pci_port_enable(struct tb_port *port, bool enable)
+{
+ u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
+ if (!port->cap_adap)
+ return -ENXIO;
+ return tb_port_write(port, &word, TB_CFG_PORT,
+ port->cap_adap + ADP_PCIE_CS_0, 1);
+}
+
+/**
+ * tb_dp_port_hpd_is_active() - Is HPD already active
+ * @port: DP out port to check
+ *
+ * Checks if the DP OUT adapter port has HDP bit already set.
+ */
+int tb_dp_port_hpd_is_active(struct tb_port *port)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ return !!(data & ADP_DP_CS_2_HDP);
+}
+
+/**
+ * tb_dp_port_hpd_clear() - Clear HPD from DP IN port
+ * @port: Port to clear HPD
+ *
+ * If the DP IN port has HDP set, this function can be used to clear it.
+ */
+int tb_dp_port_hpd_clear(struct tb_port *port)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
+ if (ret)
+ return ret;
+
+ data |= ADP_DP_CS_3_HDPC;
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_3, 1);
+}
+
+/**
+ * tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
+ * @port: DP IN/OUT port to set hops
+ * @video: Video Hop ID
+ * @aux_tx: AUX TX Hop ID
+ * @aux_rx: AUX RX Hop ID
+ *
+ * Programs specified Hop IDs for DP IN/OUT port.
+ */
+int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+ unsigned int aux_tx, unsigned int aux_rx)
+{
+ u32 data[2];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+
+ data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
+ ADP_DP_CS_0_VIDEO_HOPID_MASK;
+ data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
+ data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
+ ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
+}
+
+/**
+ * tb_dp_port_is_enabled() - Is DP adapter port enabled
+ * @port: DP adapter port to check
+ */
+bool tb_dp_port_is_enabled(struct tb_port *port)
+{
+ u32 data[2];
+
+ if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
+ ARRAY_SIZE(data)))
+ return false;
+
+ return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
+}
+
+/**
+ * tb_dp_port_enable() - Enables/disables DP paths of a port
+ * @port: DP IN/OUT port
+ * @enable: Enable/disable DP path
+ *
+ * Once Hop IDs are programmed DP paths can be enabled or disabled by
+ * calling this function.
+ */
+int tb_dp_port_enable(struct tb_port *port, bool enable)
+{
+ u32 data[2];
+ int ret;
+
+ ret = tb_port_read(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
+ if (ret)
+ return ret;
+
+ if (enable)
+ data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
+ else
+ data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
+
+ return tb_port_write(port, data, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
+}
+
+/* switch utility functions */
+
+static const char *tb_switch_generation_name(const struct tb_switch *sw)
+{
+ switch (sw->generation) {
+ case 1:
+ return "Thunderbolt 1";
+ case 2:
+ return "Thunderbolt 2";
+ case 3:
+ return "Thunderbolt 3";
+ case 4:
+ return "USB4";
+ default:
+ return "Unknown";
+ }
+}
+
+static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
+{
+ const struct tb_regs_switch_header *regs = &sw->config;
+
+ tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
+ regs->revision, regs->thunderbolt_version);
+ tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
+ tb_dbg(tb, " Config:\n");
+ tb_dbg(tb,
+ " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
+ regs->upstream_port_number, regs->depth,
+ (((u64) regs->route_hi) << 32) | regs->route_lo,
+ regs->enabled, regs->plug_events_delay);
+ tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
+ regs->__unknown1, regs->__unknown4);
+}
+
+/**
+ * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET
+ * @sw: Switch to reset
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_reset(struct tb_switch *sw)
+{
+ struct tb_cfg_result res;
+
+ if (sw->generation > 1)
+ return 0;
+
+ tb_sw_dbg(sw, "resetting switch\n");
+
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT);
+ if (res.err > 0)
+ return -EIO;
+ return res.err;
+}
+
+/**
+ * tb_plug_events_active() - enable/disable plug events on a switch
+ *
+ * Also configures a sane plug_events_delay of 255ms.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+static int tb_plug_events_active(struct tb_switch *sw, bool active)
+{
+ u32 data;
+ int res;
+
+ if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
+ return 0;
+
+ sw->config.plug_events_delay = 0xff;
+ res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
+ if (res)
+ return res;
+
+ res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
+ if (res)
+ return res;
+
+ if (active) {
+ data = data & 0xFFFFFF83;
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+ case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+ break;
+ default:
+ data |= 4;
+ }
+ } else {
+ data = data | 0x7c;
+ }
+ return tb_sw_write(sw, &data, TB_CFG_SWITCH,
+ sw->cap_plug_events + 1, 1);
+}
+
+static ssize_t authorized_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->authorized);
+}
+
+static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
+{
+ int ret = -EINVAL;
+
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
+
+ if (sw->authorized)
+ goto unlock;
+
+ switch (val) {
+ /* Approve switch */
+ case 1:
+ if (sw->key)
+ ret = tb_domain_approve_switch_key(sw->tb, sw);
+ else
+ ret = tb_domain_approve_switch(sw->tb, sw);
+ break;
+
+ /* Challenge switch */
+ case 2:
+ if (sw->key)
+ ret = tb_domain_challenge_switch_key(sw->tb, sw);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!ret) {
+ sw->authorized = val;
+ /* Notify status change to the userspace */
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+ }
+
+unlock:
+ mutex_unlock(&sw->tb->lock);
+ return ret;
+}
+
+static ssize_t authorized_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ unsigned int val;
+ ssize_t ret;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret)
+ return ret;
+ if (val > 2)
+ return -EINVAL;
+
+ pm_runtime_get_sync(&sw->dev);
+ ret = tb_switch_set_authorized(sw, val);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(authorized);
+
+static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->boot);
+}
+static DEVICE_ATTR_RO(boot);
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%#x\n", sw->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : "");
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t
+generation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->generation);
+}
+static DEVICE_ATTR_RO(generation);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ ssize_t ret;
+
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
+
+ if (sw->key)
+ ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
+ else
+ ret = sprintf(buf, "\n");
+
+ mutex_unlock(&sw->tb->lock);
+ return ret;
+}
+
+static ssize_t key_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ u8 key[TB_SWITCH_KEY_SIZE];
+ ssize_t ret = count;
+ bool clear = false;
+
+ if (!strcmp(buf, "\n"))
+ clear = true;
+ else if (hex2bin(key, buf, sizeof(key)))
+ return -EINVAL;
+
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
+
+ if (sw->authorized) {
+ ret = -EBUSY;
+ } else {
+ kfree(sw->key);
+ if (clear) {
+ sw->key = NULL;
+ } else {
+ sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
+ if (!sw->key)
+ ret = -ENOMEM;
+ }
+ }
+
+ mutex_unlock(&sw->tb->lock);
+ return ret;
+}
+static DEVICE_ATTR(key, 0600, key_show, key_store);
+
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed);
+}
+
+/*
+ * Currently all lanes must run at the same speed but we expose here
+ * both directions to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
+
+static ssize_t lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%u\n", sw->link_width);
+}
+
+/*
+ * Currently link has same amount of lanes both directions (1 or 2) but
+ * expose them separately to allow possible asymmetric links in the future.
+ */
+static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL);
+static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL);
+
+static ssize_t nvm_authenticate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ u32 status;
+
+ nvm_get_auth_status(sw, &status);
+ return sprintf(buf, "%#x\n", status);
+}
+
+static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
+ bool disconnect)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ int val;
+ int ret;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (!mutex_trylock(&sw->tb->lock)) {
+ ret = restart_syscall();
+ goto exit_rpm;
+ }
+
+ /* If NVMem devices are not yet added */
+ if (!sw->nvm) {
+ ret = -EAGAIN;
+ goto exit_unlock;
+ }
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret)
+ goto exit_unlock;
+
+ /* Always clear the authentication status */
+ nvm_clear_auth_status(sw);
+
+ if (val > 0) {
+ if (!sw->nvm->flushed) {
+ if (!sw->nvm->buf) {
+ ret = -EINVAL;
+ goto exit_unlock;
+ }
+
+ ret = nvm_validate_and_write(sw);
+ if (ret || val == WRITE_ONLY)
+ goto exit_unlock;
+ }
+ if (val == WRITE_AND_AUTHENTICATE) {
+ if (disconnect) {
+ ret = tb_lc_force_power(sw);
+ } else {
+ sw->nvm->authenticating = true;
+ ret = nvm_authenticate(sw);
+ }
+ }
+ }
+
+exit_unlock:
+ mutex_unlock(&sw->tb->lock);
+exit_rpm:
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+ return ret;
+}
+
+static ssize_t nvm_authenticate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret = nvm_authenticate_sysfs(dev, buf, false);
+ if (ret)
+ return ret;
+ return count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate);
+
+static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return nvm_authenticate_show(dev, attr, buf);
+}
+
+static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+
+ ret = nvm_authenticate_sysfs(dev, buf, true);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
+
+static ssize_t nvm_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ int ret;
+
+ if (!mutex_trylock(&sw->tb->lock))
+ return restart_syscall();
+
+ if (sw->safe_mode)
+ ret = -ENODATA;
+ else if (!sw->nvm)
+ ret = -EAGAIN;
+ else
+ ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
+
+ mutex_unlock(&sw->tb->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(nvm_version);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%#x\n", sw->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : "");
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ return sprintf(buf, "%pUb\n", sw->uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *switch_attrs[] = {
+ &dev_attr_authorized.attr,
+ &dev_attr_boot.attr,
+ &dev_attr_device.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_generation.attr,
+ &dev_attr_key.attr,
+ &dev_attr_nvm_authenticate.attr,
+ &dev_attr_nvm_authenticate_on_disconnect.attr,
+ &dev_attr_nvm_version.attr,
+ &dev_attr_rx_speed.attr,
+ &dev_attr_rx_lanes.attr,
+ &dev_attr_tx_speed.attr,
+ &dev_attr_tx_lanes.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_vendor_name.attr,
+ &dev_attr_unique_id.attr,
+ NULL,
+};
+
+static umode_t switch_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ if (attr == &dev_attr_device.attr) {
+ if (!sw->device)
+ return 0;
+ } else if (attr == &dev_attr_device_name.attr) {
+ if (!sw->device_name)
+ return 0;
+ } else if (attr == &dev_attr_vendor.attr) {
+ if (!sw->vendor)
+ return 0;
+ } else if (attr == &dev_attr_vendor_name.attr) {
+ if (!sw->vendor_name)
+ return 0;
+ } else if (attr == &dev_attr_key.attr) {
+ if (tb_route(sw) &&
+ sw->tb->security_level == TB_SECURITY_SECURE &&
+ sw->security_level == TB_SECURITY_SECURE)
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_rx_speed.attr ||
+ attr == &dev_attr_rx_lanes.attr ||
+ attr == &dev_attr_tx_speed.attr ||
+ attr == &dev_attr_tx_lanes.attr) {
+ if (tb_route(sw))
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_nvm_authenticate.attr) {
+ if (nvm_upgradeable(sw))
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_nvm_version.attr) {
+ if (nvm_readable(sw))
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_boot.attr) {
+ if (tb_route(sw))
+ return attr->mode;
+ return 0;
+ } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
+ if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
+ return attr->mode;
+ return 0;
+ }
+
+ return sw->safe_mode ? 0 : attr->mode;
+}
+
+static struct attribute_group switch_group = {
+ .is_visible = switch_attr_is_visible,
+ .attrs = switch_attrs,
+};
+
+static const struct attribute_group *switch_groups[] = {
+ &switch_group,
+ NULL,
+};
+
+static void tb_switch_release(struct device *dev)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ struct tb_port *port;
+
+ dma_port_free(sw->dma_port);
+
+ tb_switch_for_each_port(sw, port) {
+ ida_destroy(&port->in_hopids);
+ ida_destroy(&port->out_hopids);
+ }
+
+ kfree(sw->uuid);
+ kfree(sw->device_name);
+ kfree(sw->vendor_name);
+ kfree(sw->ports);
+ kfree(sw->drom);
+ kfree(sw->key);
+ kfree(sw);
+}
+
+/*
+ * Currently only need to provide the callbacks. Everything else is handled
+ * in the connection manager.
+ */
+static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
+
+ if (cm_ops->runtime_suspend_switch)
+ return cm_ops->runtime_suspend_switch(sw);
+
+ return 0;
+}
+
+static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
+
+ if (cm_ops->runtime_resume_switch)
+ return cm_ops->runtime_resume_switch(sw);
+ return 0;
+}
+
+static const struct dev_pm_ops tb_switch_pm_ops = {
+ SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
+ NULL)
+};
+
+struct device_type tb_switch_type = {
+ .name = "thunderbolt_device",
+ .release = tb_switch_release,
+ .pm = &tb_switch_pm_ops,
+};
+
+static int tb_switch_get_generation(struct tb_switch *sw)
+{
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+ case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
+ return 1;
+
+ case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+ return 2;
+
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ return 3;
+
+ default:
+ if (tb_switch_is_usb4(sw))
+ return 4;
+
+ /*
+ * For unknown switches assume generation to be 1 to be
+ * on the safe side.
+ */
+ tb_sw_warn(sw, "unsupported switch device id %#x\n",
+ sw->config.device_id);
+ return 1;
+ }
+}
+
+static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
+{
+ int max_depth;
+
+ if (tb_switch_is_usb4(sw) ||
+ (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
+ max_depth = USB4_SWITCH_MAX_DEPTH;
+ else
+ max_depth = TB_SWITCH_MAX_DEPTH;
+
+ return depth > max_depth;
+}
+
+/**
+ * tb_switch_alloc() - allocate a switch
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * Allocates and initializes a switch. Will not upload configuration to
+ * the switch. For that you need to call tb_switch_configure()
+ * separately. The returned switch should be released by calling
+ * tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or ERR_PTR() in case of
+ * failure.
+ */
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+ u64 route)
+{
+ struct tb_switch *sw;
+ int upstream_port;
+ int i, ret, depth;
+
+ /* Unlock the downstream port so we can access the switch below */
+ if (route) {
+ struct tb_switch *parent_sw = tb_to_switch(parent);
+ struct tb_port *down;
+
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
+ }
+
+ depth = tb_route_length(route);
+
+ upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
+ if (upstream_port < 0)
+ return ERR_PTR(upstream_port);
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return ERR_PTR(-ENOMEM);
+
+ sw->tb = tb;
+ ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
+ if (ret)
+ goto err_free_sw_ports;
+
+ sw->generation = tb_switch_get_generation(sw);
+
+ tb_dbg(tb, "current switch config:\n");
+ tb_dump_switch(tb, sw);
+
+ /* configure switch */
+ sw->config.upstream_port_number = upstream_port;
+ sw->config.depth = depth;
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->config.enabled = 0;
+
+ /* Make sure we do not exceed maximum topology limit */
+ if (tb_switch_exceeds_max_depth(sw, depth)) {
+ ret = -EADDRNOTAVAIL;
+ goto err_free_sw_ports;
+ }
+
+ /* initialize ports */
+ sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
+ GFP_KERNEL);
+ if (!sw->ports) {
+ ret = -ENOMEM;
+ goto err_free_sw_ports;
+ }
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ /* minimum setup for tb_find_cap and tb_drom_read to work */
+ sw->ports[i].sw = sw;
+ sw->ports[i].port = i;
+
+ /* Control port does not need HopID allocation */
+ if (i) {
+ ida_init(&sw->ports[i].in_hopids);
+ ida_init(&sw->ports[i].out_hopids);
+ }
+ }
+
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
+ if (ret > 0)
+ sw->cap_plug_events = ret;
+
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
+ if (ret > 0)
+ sw->cap_lc = ret;
+
+ /* Root switch is always authorized */
+ if (!route)
+ sw->authorized = true;
+
+ device_initialize(&sw->dev);
+ sw->dev.parent = parent;
+ sw->dev.bus = &tb_bus_type;
+ sw->dev.type = &tb_switch_type;
+ sw->dev.groups = switch_groups;
+ dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+ return sw;
+
+err_free_sw_ports:
+ kfree(sw->ports);
+ kfree(sw);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
+ * @tb: Pointer to the owning domain
+ * @parent: Parent device for this switch
+ * @route: Route string for this switch
+ *
+ * This creates a switch in safe mode. This means the switch pretty much
+ * lacks all capabilities except DMA configuration port before it is
+ * flashed with a valid NVM firmware.
+ *
+ * The returned switch must be released by calling tb_switch_put().
+ *
+ * Return: Pointer to the allocated switch or ERR_PTR() in case of failure
+ */
+struct tb_switch *
+tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
+{
+ struct tb_switch *sw;
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return ERR_PTR(-ENOMEM);
+
+ sw->tb = tb;
+ sw->config.depth = tb_route_length(route);
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->safe_mode = true;
+
+ device_initialize(&sw->dev);
+ sw->dev.parent = parent;
+ sw->dev.bus = &tb_bus_type;
+ sw->dev.type = &tb_switch_type;
+ sw->dev.groups = switch_groups;
+ dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
+
+ return sw;
+}
+
+/**
+ * tb_switch_configure() - Uploads configuration to the switch
+ * @sw: Switch to configure
+ *
+ * Call this function before the switch is added to the system. It will
+ * upload configuration to the switch and makes it available for the
+ * connection manager to use. Can be called to the switch again after
+ * resume from low power states to re-initialize it.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_configure(struct tb_switch *sw)
+{
+ struct tb *tb = sw->tb;
+ u64 route;
+ int ret;
+
+ route = tb_route(sw);
+
+ tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
+ sw->config.enabled ? "restoring" : "initializing", route,
+ tb_route_length(route), sw->config.upstream_port_number);
+
+ sw->config.enabled = 1;
+
+ if (tb_switch_is_usb4(sw)) {
+ /*
+ * For USB4 devices, we need to program the CM version
+ * accordingly so that it knows to expose all the
+ * additional capabilities.
+ */
+ sw->config.cmuv = USB4_VERSION_1_0;
+ sw->config.plug_events_delay = 0xa;
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 4);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_setup(sw);
+ } else {
+ if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
+ tb_sw_warn(sw, "unknown switch vendor id %#x\n",
+ sw->config.vendor_id);
+
+ if (!sw->cap_plug_events) {
+ tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
+ return -ENODEV;
+ }
+
+ /* Enumerate the switch */
+ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
+ ROUTER_CS_1, 3);
+ }
+ if (ret)
+ return ret;
+
+ return tb_plug_events_active(sw, true);
+}
+
+static int tb_switch_set_uuid(struct tb_switch *sw)
+{
+ bool uid = false;
+ u32 uuid[4];
+ int ret;
+
+ if (sw->uuid)
+ return 0;
+
+ if (tb_switch_is_usb4(sw)) {
+ ret = usb4_switch_read_uid(sw, &sw->uid);
+ if (ret)
+ return ret;
+ uid = true;
+ } else {
+ /*
+ * The newer controllers include fused UUID as part of
+ * link controller specific registers
+ */
+ ret = tb_lc_read_uuid(sw, uuid);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ uid = true;
+ }
+ }
+
+ if (uid) {
+ /*
+ * ICM generates UUID based on UID and fills the upper
+ * two words with ones. This is not strictly following
+ * UUID format but we want to be compatible with it so
+ * we do the same here.
+ */
+ uuid[0] = sw->uid & 0xffffffff;
+ uuid[1] = (sw->uid >> 32) & 0xffffffff;
+ uuid[2] = 0xffffffff;
+ uuid[3] = 0xffffffff;
+ }
+
+ sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
+ if (!sw->uuid)
+ return -ENOMEM;
+ return 0;
+}
+
+static int tb_switch_add_dma_port(struct tb_switch *sw)
+{
+ u32 status;
+ int ret;
+
+ switch (sw->generation) {
+ case 2:
+ /* Only root switch can be upgraded */
+ if (tb_route(sw))
+ return 0;
+
+ fallthrough;
+ case 3:
+ ret = tb_switch_set_uuid(sw);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ /*
+ * DMA port is the only thing available when the switch
+ * is in safe mode.
+ */
+ if (!sw->safe_mode)
+ return 0;
+ break;
+ }
+
+ /* Root switch DMA port requires running firmware */
+ if (!tb_route(sw) && !tb_switch_is_icm(sw))
+ return 0;
+
+ sw->dma_port = dma_port_alloc(sw);
+ if (!sw->dma_port)
+ return 0;
+
+ if (sw->no_nvm_upgrade)
+ return 0;
+
+ /*
+ * If there is status already set then authentication failed
+ * when the dma_port_flash_update_auth() returned. Power cycling
+ * is not needed (it was done already) so only thing we do here
+ * is to unblock runtime PM of the root port.
+ */
+ nvm_get_auth_status(sw, &status);
+ if (status) {
+ if (!tb_route(sw))
+ nvm_authenticate_complete_dma_port(sw);
+ return 0;
+ }
+
+ /*
+ * Check status of the previous flash authentication. If there
+ * is one we need to power cycle the switch in any case to make
+ * it functional again.
+ */
+ ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
+ if (ret <= 0)
+ return ret;
+
+ /* Now we can allow root port to suspend again */
+ if (!tb_route(sw))
+ nvm_authenticate_complete_dma_port(sw);
+
+ if (status) {
+ tb_sw_info(sw, "switch flash authentication failed\n");
+ nvm_set_auth_status(sw, status);
+ }
+
+ tb_sw_info(sw, "power cycling the switch now\n");
+ dma_port_power_cycle(sw->dma_port);
+
+ /*
+ * We return error here which causes the switch adding failure.
+ * It should appear back after power cycle is complete.
+ */
+ return -ESHUTDOWN;
+}
+
+static void tb_switch_default_link_ports(struct tb_switch *sw)
+{
+ int i;
+
+ for (i = 1; i <= sw->config.max_port_number; i++) {
+ struct tb_port *port = &sw->ports[i];
+ struct tb_port *subordinate;
+
+ if (!tb_port_is_null(port))
+ continue;
+
+ /* Check for the subordinate port */
+ if (i == sw->config.max_port_number ||
+ !tb_port_is_null(&sw->ports[i + 1]))
+ continue;
+
+ /* Link them if not already done so (by DROM) */
+ subordinate = &sw->ports[i + 1];
+ if (!port->dual_link_port && !subordinate->dual_link_port) {
+ port->link_nr = 0;
+ port->dual_link_port = subordinate;
+ subordinate->link_nr = 1;
+ subordinate->dual_link_port = port;
+
+ tb_sw_dbg(sw, "linked ports %d <-> %d\n",
+ port->port, subordinate->port);
+ }
+ }
+}
+
+static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ const struct tb_port *up = tb_upstream_port(sw);
+
+ if (!up->dual_link_port || !up->dual_link_port->remote)
+ return false;
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_lane_bonding_possible(sw);
+ return tb_lc_lane_bonding_possible(sw);
+}
+
+static int tb_switch_update_link_attributes(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ bool change = false;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+
+ ret = tb_port_get_link_speed(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_speed != ret)
+ change = true;
+ sw->link_speed = ret;
+
+ ret = tb_port_get_link_width(up);
+ if (ret < 0)
+ return ret;
+ if (sw->link_width != ret)
+ change = true;
+ sw->link_width = ret;
+
+ /* Notify userspace that there is possible link attribute change */
+ if (device_is_registered(&sw->dev) && change)
+ kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+/**
+ * tb_switch_lane_bonding_enable() - Enable lane bonding
+ * @sw: Switch to enable lane bonding
+ *
+ * Connection manager can call this function to enable lane bonding of a
+ * switch. If conditions are correct and both switches support the feature,
+ * lanes are bonded. It is safe to call this to any switch.
+ */
+int tb_switch_lane_bonding_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+ u64 route = tb_route(sw);
+ int ret;
+
+ if (!route)
+ return 0;
+
+ if (!tb_switch_lane_bonding_possible(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(route, parent);
+
+ if (!tb_port_is_width_supported(up, 2) ||
+ !tb_port_is_width_supported(down, 2))
+ return 0;
+
+ /*
+ * Both lanes need to be in CL0. Here we assume lane 0 already be in
+ * CL0 and check just for lane 1.
+ */
+ if (tb_wait_for_port(down->dual_link_port, false) <= 0)
+ return -ENOTCONN;
+
+ ret = tb_port_lane_bonding_enable(up);
+ if (ret) {
+ tb_port_warn(up, "failed to enable lane bonding\n");
+ return ret;
+ }
+
+ ret = tb_port_lane_bonding_enable(down);
+ if (ret) {
+ tb_port_warn(down, "failed to enable lane bonding\n");
+ tb_port_lane_bonding_disable(up);
+ return ret;
+ }
+
+ tb_switch_update_link_attributes(sw);
+
+ tb_sw_dbg(sw, "lane bonding enabled\n");
+ return ret;
+}
+
+/**
+ * tb_switch_lane_bonding_disable() - Disable lane bonding
+ * @sw: Switch whose lane bonding to disable
+ *
+ * Disables lane bonding between @sw and parent. This can be called even
+ * if lanes were not bonded originally.
+ */
+void tb_switch_lane_bonding_disable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_to_switch(sw->dev.parent);
+ struct tb_port *up, *down;
+
+ if (!tb_route(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (!up->bonded)
+ return;
+
+ down = tb_port_at(tb_route(sw), parent);
+
+ tb_port_lane_bonding_disable(up);
+ tb_port_lane_bonding_disable(down);
+
+ tb_switch_update_link_attributes(sw);
+ tb_sw_dbg(sw, "lane bonding disabled\n");
+}
+
+/**
+ * tb_switch_configure_link() - Set link configured
+ * @sw: Switch whose link is configured
+ *
+ * Sets the link upstream from @sw configured (from both ends) so that
+ * it will not be disconnected when the domain exits sleep. Can be
+ * called for any switch.
+ *
+ * It is recommended that this is called after lane bonding is enabled.
+ *
+ * Returns %0 on success and negative errno in case of error.
+ */
+int tb_switch_configure_link(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ ret = usb4_port_configure(up);
+ else
+ ret = tb_lc_configure_port(up);
+ if (ret)
+ return ret;
+
+ down = up->remote;
+ if (tb_switch_is_usb4(down->sw))
+ return usb4_port_configure(down);
+ return tb_lc_configure_port(down);
+}
+
+/**
+ * tb_switch_unconfigure_link() - Unconfigure link
+ * @sw: Switch whose link is unconfigured
+ *
+ * Sets the link unconfigured so the @sw will be disconnected if the
+ * domain exists sleep.
+ */
+void tb_switch_unconfigure_link(struct tb_switch *sw)
+{
+ struct tb_port *up, *down;
+
+ if (sw->is_unplugged)
+ return;
+ if (!tb_route(sw) || tb_switch_is_icm(sw))
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
+
+ down = up->remote;
+ if (tb_switch_is_usb4(down->sw))
+ usb4_port_unconfigure(down);
+ else
+ tb_lc_unconfigure_port(down);
+}
+
+static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ tb_switch_for_each_port(sw, port) {
+ int res;
+
+ if (!port->cap_usb4)
+ continue;
+
+ res = usb4_port_hotplug_enable(port);
+ if (res)
+ return res;
+ }
+ return 0;
+}
+
+/**
+ * tb_switch_add() - Add a switch to the domain
+ * @sw: Switch to add
+ *
+ * This is the last step in adding switch to the domain. It will read
+ * identification information from DROM and initializes ports so that
+ * they can be used to connect other switches. The switch will be
+ * exposed to the userspace when this function successfully returns. To
+ * remove and release the switch, call tb_switch_remove().
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_switch_add(struct tb_switch *sw)
+{
+ int i, ret;
+
+ /*
+ * Initialize DMA control port now before we read DROM. Recent
+ * host controllers have more complete DROM on NVM that includes
+ * vendor and model identification strings which we then expose
+ * to the userspace. NVM can be accessed through DMA
+ * configuration based mailbox.
+ */
+ ret = tb_switch_add_dma_port(sw);
+ if (ret) {
+ dev_err(&sw->dev, "failed to add DMA port\n");
+ return ret;
+ }
+
+ if (!sw->safe_mode) {
+ /* read drom */
+ ret = tb_drom_read(sw);
+ if (ret) {
+ dev_err(&sw->dev, "reading DROM failed\n");
+ return ret;
+ }
+ tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
+
+ ret = tb_switch_set_uuid(sw);
+ if (ret) {
+ dev_err(&sw->dev, "failed to set UUID\n");
+ return ret;
+ }
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ if (sw->ports[i].disabled) {
+ tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
+ continue;
+ }
+ ret = tb_init_port(&sw->ports[i]);
+ if (ret) {
+ dev_err(&sw->dev, "failed to initialize port %d\n", i);
+ return ret;
+ }
+ }
+
+ tb_switch_default_link_ports(sw);
+
+ ret = tb_switch_update_link_attributes(sw);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_init(sw);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_switch_port_hotplug_enable(sw);
+ if (ret)
+ return ret;
+
+ ret = device_add(&sw->dev);
+ if (ret) {
+ dev_err(&sw->dev, "failed to add device: %d\n", ret);
+ return ret;
+ }
+
+ if (tb_route(sw)) {
+ dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
+ sw->vendor, sw->device);
+ if (sw->vendor_name && sw->device_name)
+ dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
+ sw->device_name);
+ }
+
+ ret = tb_switch_nvm_add(sw);
+ if (ret) {
+ dev_err(&sw->dev, "failed to add NVM devices\n");
+ device_del(&sw->dev);
+ return ret;
+ }
+
+ /*
+ * Thunderbolt routers do not generate wakeups themselves but
+ * they forward wakeups from tunneled protocols, so enable it
+ * here.
+ */
+ device_init_wakeup(&sw->dev, true);
+
+ pm_runtime_set_active(&sw->dev);
+ if (sw->rpm) {
+ pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&sw->dev);
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_enable(&sw->dev);
+ pm_request_autosuspend(&sw->dev);
+ }
+
+ tb_switch_debugfs_init(sw);
+ return 0;
+}
+
+/**
+ * tb_switch_remove() - Remove and release a switch
+ * @sw: Switch to remove
+ *
+ * This will remove the switch from the domain and release it after last
+ * reference count drops to zero. If there are switches connected below
+ * this switch, they will be removed as well.
+ */
+void tb_switch_remove(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_debugfs_remove(sw);
+
+ if (sw->rpm) {
+ pm_runtime_get_sync(&sw->dev);
+ pm_runtime_disable(&sw->dev);
+ }
+
+ /* port 0 is the switch itself and never has a remote */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ } else if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ port->xdomain = NULL;
+ }
+
+ /* Remove any downstream retimers */
+ tb_retimer_remove_all(port);
+ }
+
+ if (!sw->is_unplugged)
+ tb_plug_events_active(sw, false);
+
+ tb_switch_nvm_remove(sw);
+
+ if (tb_route(sw))
+ dev_info(&sw->dev, "device disconnected\n");
+ device_unregister(&sw->dev);
+}
+
+/**
+ * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
+ */
+void tb_sw_set_unplugged(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ if (sw == sw->tb->root_switch) {
+ tb_sw_WARN(sw, "cannot unplug root switch\n");
+ return;
+ }
+ if (sw->is_unplugged) {
+ tb_sw_WARN(sw, "is_unplugged already set\n");
+ return;
+ }
+ sw->is_unplugged = true;
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
+ }
+}
+
+static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+ if (flags)
+ tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
+ else
+ tb_sw_dbg(sw, "disabling wakeup\n");
+
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_set_wake(sw, flags);
+ return tb_lc_set_wake(sw, flags);
+}
+
+int tb_switch_resume(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int err;
+
+ tb_sw_dbg(sw, "resuming switch\n");
+
+ /*
+ * Check for UID of the connected switches except for root
+ * switch which we assume cannot be removed.
+ */
+ if (tb_route(sw)) {
+ u64 uid;
+
+ /*
+ * Check first that we can still read the switch config
+ * space. It may be that there is now another domain
+ * connected.
+ */
+ err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
+ if (err < 0) {
+ tb_sw_info(sw, "switch not present anymore\n");
+ return err;
+ }
+
+ if (tb_switch_is_usb4(sw))
+ err = usb4_switch_read_uid(sw, &uid);
+ else
+ err = tb_drom_read_uid_only(sw, &uid);
+ if (err) {
+ tb_sw_warn(sw, "uid read failed\n");
+ return err;
+ }
+ if (sw->uid != uid) {
+ tb_sw_info(sw,
+ "changed while suspended (uid %#llx -> %#llx)\n",
+ sw->uid, uid);
+ return -ENODEV;
+ }
+ }
+
+ err = tb_switch_configure(sw);
+ if (err)
+ return err;
+
+ /* Disable wakes */
+ tb_switch_set_wake(sw, 0);
+
+ err = tb_switch_tmu_init(sw);
+ if (err)
+ return err;
+
+ /* check for surviving downstream switches */
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port) && !port->xdomain)
+ continue;
+
+ if (tb_wait_for_port(port, true) <= 0) {
+ tb_port_warn(port,
+ "lost during suspend, disconnecting\n");
+ if (tb_port_has_remote(port))
+ tb_sw_set_unplugged(port->remote->sw);
+ else if (port->xdomain)
+ port->xdomain->is_unplugged = true;
+ } else if (tb_port_has_remote(port) || port->xdomain) {
+ /*
+ * Always unlock the port so the downstream
+ * switch/domain is accessible.
+ */
+ if (tb_port_unlock(port))
+ tb_port_warn(port, "failed to unlock port\n");
+ if (port->remote && tb_switch_resume(port->remote->sw)) {
+ tb_port_warn(port,
+ "lost during suspend, disconnecting\n");
+ tb_sw_set_unplugged(port->remote->sw);
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * tb_switch_suspend() - Put a switch to sleep
+ * @sw: Switch to suspend
+ * @runtime: Is this runtime suspend or system sleep
+ *
+ * Suspends router and all its children. Enables wakes according to
+ * value of @runtime and then sets sleep bit for the router. If @sw is
+ * host router the domain is ready to go to sleep once this function
+ * returns.
+ */
+void tb_switch_suspend(struct tb_switch *sw, bool runtime)
+{
+ unsigned int flags = 0;
+ struct tb_port *port;
+ int err;
+
+ tb_sw_dbg(sw, "suspending switch\n");
+
+ err = tb_plug_events_active(sw, false);
+ if (err)
+ return;
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_switch_suspend(port->remote->sw, runtime);
+ }
+
+ if (runtime) {
+ /* Trigger wake when something is plugged in/out */
+ flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
+ flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ } else if (device_may_wakeup(&sw->dev)) {
+ flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
+ }
+
+ tb_switch_set_wake(sw, flags);
+
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_set_sleep(sw);
+ else
+ tb_lc_set_sleep(sw);
+}
+
+/**
+ * tb_switch_query_dp_resource() - Query availability of DP resource
+ * @sw: Switch whose DP resource is queried
+ * @in: DP IN port
+ *
+ * Queries availability of DP resource for DP tunneling using switch
+ * specific means. Returns %true if resource is available.
+ */
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_query_dp_resource(sw, in);
+ return tb_lc_dp_sink_query(sw, in);
+}
+
+/**
+ * tb_switch_alloc_dp_resource() - Allocate available DP resource
+ * @sw: Switch whose DP resource is allocated
+ * @in: DP IN port
+ *
+ * Allocates DP resource for DP tunneling. The resource must be
+ * available for this to succeed (see tb_switch_query_dp_resource()).
+ * Returns %0 in success and negative errno otherwise.
+ */
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ if (tb_switch_is_usb4(sw))
+ return usb4_switch_alloc_dp_resource(sw, in);
+ return tb_lc_dp_sink_alloc(sw, in);
+}
+
+/**
+ * tb_switch_dealloc_dp_resource() - De-allocate DP resource
+ * @sw: Switch whose DP resource is de-allocated
+ * @in: DP IN port
+ *
+ * De-allocates DP resource that was previously allocated for DP
+ * tunneling.
+ */
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ int ret;
+
+ if (tb_switch_is_usb4(sw))
+ ret = usb4_switch_dealloc_dp_resource(sw, in);
+ else
+ ret = tb_lc_dp_sink_dealloc(sw, in);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
+ in->port);
+}
+
+struct tb_sw_lookup {
+ struct tb *tb;
+ u8 link;
+ u8 depth;
+ const uuid_t *uuid;
+ u64 route;
+};
+
+static int tb_switch_match(struct device *dev, const void *data)
+{
+ struct tb_switch *sw = tb_to_switch(dev);
+ const struct tb_sw_lookup *lookup = data;
+
+ if (!sw)
+ return 0;
+ if (sw->tb != lookup->tb)
+ return 0;
+
+ if (lookup->uuid)
+ return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
+
+ if (lookup->route) {
+ return sw->config.route_lo == lower_32_bits(lookup->route) &&
+ sw->config.route_hi == upper_32_bits(lookup->route);
+ }
+
+ /* Root switch is matched only by depth */
+ if (!lookup->depth)
+ return !sw->depth;
+
+ return sw->link == lookup->link && sw->depth == lookup->depth;
+}
+
+/**
+ * tb_switch_find_by_link_depth() - Find switch by link and depth
+ * @tb: Domain the switch belongs
+ * @link: Link number the switch is connected
+ * @depth: Depth of the switch in link
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
+{
+ struct tb_sw_lookup lookup;
+ struct device *dev;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.tb = tb;
+ lookup.link = link;
+ lookup.depth = depth;
+
+ dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+ if (dev)
+ return tb_to_switch(dev);
+
+ return NULL;
+}
+
+/**
+ * tb_switch_find_by_uuid() - Find switch by UUID
+ * @tb: Domain the switch belongs
+ * @uuid: UUID to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
+{
+ struct tb_sw_lookup lookup;
+ struct device *dev;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.tb = tb;
+ lookup.uuid = uuid;
+
+ dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+ if (dev)
+ return tb_to_switch(dev);
+
+ return NULL;
+}
+
+/**
+ * tb_switch_find_by_route() - Find switch by route string
+ * @tb: Domain the switch belongs
+ * @route: Route string to look for
+ *
+ * Returned switch has reference count increased so the caller needs to
+ * call tb_switch_put() when done with the switch.
+ */
+struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
+{
+ struct tb_sw_lookup lookup;
+ struct device *dev;
+
+ if (!route)
+ return tb_switch_get(tb->root_switch);
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.tb = tb;
+ lookup.route = route;
+
+ dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
+ if (dev)
+ return tb_to_switch(dev);
+
+ return NULL;
+}
+
+/**
+ * tb_switch_find_port() - return the first port of @type on @sw or NULL
+ * @sw: Switch to find the port from
+ * @type: Port type to look for
+ */
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (port->config.type == type)
+ return port;
+ }
+
+ return NULL;
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
new file mode 100644
index 000000000..a56ea540a
--- /dev/null
+++ b/drivers/thunderbolt/tb.c
@@ -0,0 +1,1542 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - bus logic (NHI independent)
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+
+#include "tb.h"
+#include "tb_regs.h"
+#include "tunnel.h"
+
+/**
+ * struct tb_cm - Simple Thunderbolt connection manager
+ * @tunnel_list: List of active tunnels
+ * @dp_resources: List of available DP resources for DP tunneling
+ * @hotplug_active: tb_handle_hotplug will stop progressing plug
+ * events and exit if this is not set (it needs to
+ * acquire the lock one more time). Used to drain wq
+ * after cfg has been paused.
+ * @remove_work: Work used to remove any unplugged routers after
+ * runtime resume
+ */
+struct tb_cm {
+ struct list_head tunnel_list;
+ struct list_head dp_resources;
+ bool hotplug_active;
+ struct delayed_work remove_work;
+};
+
+static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
+{
+ return ((void *)tcm - sizeof(struct tb));
+}
+
+struct tb_hotplug_event {
+ struct work_struct work;
+ struct tb *tb;
+ u64 route;
+ u8 port;
+ bool unplug;
+};
+
+static void tb_handle_hotplug(struct work_struct *work);
+
+static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
+{
+ struct tb_hotplug_event *ev;
+
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return;
+
+ ev->tb = tb;
+ ev->route = route;
+ ev->port = port;
+ ev->unplug = unplug;
+ INIT_WORK(&ev->work, tb_handle_hotplug);
+ queue_work(tb->wq, &ev->work);
+}
+
+/* enumeration & hot plug handling */
+
+static void tb_add_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (!tb_switch_query_dp_resource(sw, port))
+ continue;
+
+ list_add_tail(&port->list, &tcm->dp_resources);
+ tb_port_dbg(port, "DP IN resource available\n");
+ }
+}
+
+static void tb_remove_dp_resources(struct tb_switch *sw)
+{
+ struct tb_cm *tcm = tb_priv(sw->tb);
+ struct tb_port *port, *tmp;
+
+ /* Clear children resources first */
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_remove_dp_resources(port->remote->sw);
+ }
+
+ list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
+ if (port->sw == sw) {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ list_del_init(&port->list);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb_switch *sw)
+{
+ struct tb *tb = sw->tb;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ struct tb_tunnel *tunnel = NULL;
+
+ switch (port->config.type) {
+ case TB_TYPE_DP_HDMI_IN:
+ tunnel = tb_tunnel_discover_dp(tb, port);
+ break;
+
+ case TB_TYPE_PCIE_DOWN:
+ tunnel = tb_tunnel_discover_pci(tb, port);
+ break;
+
+ case TB_TYPE_USB3_DOWN:
+ tunnel = tb_tunnel_discover_usb3(tb, port);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!tunnel)
+ continue;
+
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&tunnel->src_port->sw->dev);
+ pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port))
+ tb_discover_tunnels(port->remote->sw);
+ }
+}
+
+static int tb_port_configure_xdomain(struct tb_port *port)
+{
+ /*
+ * XDomain paths currently only support single lane so we must
+ * disable the other lane according to USB4 spec.
+ */
+ tb_port_disable(port->dual_link_port);
+
+ if (tb_switch_is_usb4(port->sw))
+ return usb4_port_configure_xdomain(port);
+ return tb_lc_configure_xdomain(port);
+}
+
+static void tb_port_unconfigure_xdomain(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ usb4_port_unconfigure_xdomain(port);
+ else
+ tb_lc_unconfigure_xdomain(port);
+
+ tb_port_enable(port->dual_link_port);
+}
+
+static void tb_scan_xdomain(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ struct tb *tb = sw->tb;
+ struct tb_xdomain *xd;
+ u64 route;
+
+ route = tb_downstream_route(port);
+ xd = tb_xdomain_find_by_route(tb, route);
+ if (xd) {
+ tb_xdomain_put(xd);
+ return;
+ }
+
+ xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
+ NULL);
+ if (xd) {
+ tb_port_at(route, sw)->xdomain = xd;
+ tb_port_configure_xdomain(port);
+ tb_xdomain_add(xd);
+ }
+}
+
+static int tb_enable_tmu(struct tb_switch *sw)
+{
+ int ret;
+
+ /* If it is already enabled in correct mode, don't touch it */
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_disable(sw);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_post_time(sw);
+ if (ret)
+ return ret;
+
+ return tb_switch_tmu_enable(sw);
+}
+
+/**
+ * tb_find_unused_port() - return the first inactive port on @sw
+ * @sw: Switch to find the port on
+ * @type: Port type to look for
+ */
+static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
+ enum tb_port_type type)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
+ continue;
+ if (port->config.type != type)
+ continue;
+ if (!port->cap_adap)
+ continue;
+ if (tb_port_is_enabled(port))
+ continue;
+ return port;
+ }
+ return NULL;
+}
+
+static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ struct tb_port *down;
+
+ down = usb4_switch_map_usb3_down(sw, port);
+ if (down && !tb_usb3_port_is_enabled(down))
+ return down;
+ return NULL;
+}
+
+static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tunnel->type == type &&
+ ((src_port && src_port == tunnel->src_port) ||
+ (dst_port && dst_port == tunnel->dst_port))) {
+ return tunnel;
+ }
+ }
+
+ return NULL;
+}
+
+static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
+{
+ struct tb_port *port, *usb3_down;
+ struct tb_switch *sw;
+
+ /* Pick the router that is deepest in the topology */
+ if (dst_port->sw->config.depth > src_port->sw->config.depth)
+ sw = dst_port->sw;
+ else
+ sw = src_port->sw;
+
+ /* Can't be the host router */
+ if (sw == tb->root_switch)
+ return NULL;
+
+ /* Find the downstream USB4 port that leads to this router */
+ port = tb_port_at(tb_route(sw), tb->root_switch);
+ /* Find the corresponding host router USB3 downstream port */
+ usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
+ if (!usb3_down)
+ return NULL;
+
+ return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
+}
+
+static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port, int *available_up, int *available_down)
+{
+ int usb3_consumed_up, usb3_consumed_down, ret;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+ struct tb_port *port;
+
+ tb_port_dbg(dst_port, "calculating available bandwidth\n");
+
+ tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+ if (tunnel) {
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
+ &usb3_consumed_down);
+ if (ret)
+ return ret;
+ } else {
+ usb3_consumed_up = 0;
+ usb3_consumed_down = 0;
+ }
+
+ *available_up = *available_down = 40000;
+
+ /* Find the minimum available bandwidth over all links */
+ tb_for_each_port_on_path(src_port, dst_port, port) {
+ int link_speed, link_width, up_bw, down_bw;
+
+ if (!tb_port_is_null(port))
+ continue;
+
+ if (tb_is_upstream_port(port)) {
+ link_speed = port->sw->link_speed;
+ } else {
+ link_speed = tb_port_get_link_speed(port);
+ if (link_speed < 0)
+ return link_speed;
+ }
+
+ link_width = port->bonded ? 2 : 1;
+
+ up_bw = link_speed * link_width * 1000; /* Mb/s */
+ /* Leave 10% guard band */
+ up_bw -= up_bw / 10;
+ down_bw = up_bw;
+
+ tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
+
+ /*
+ * Find all DP tunnels that cross the port and reduce
+ * their consumed bandwidth from the available.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ int dp_consumed_up, dp_consumed_down;
+
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (!tb_tunnel_port_on_path(tunnel, port))
+ continue;
+
+ ret = tb_tunnel_consumed_bandwidth(tunnel,
+ &dp_consumed_up,
+ &dp_consumed_down);
+ if (ret)
+ return ret;
+
+ up_bw -= dp_consumed_up;
+ down_bw -= dp_consumed_down;
+ }
+
+ /*
+ * If USB3 is tunneled from the host router down to the
+ * branch leading to port we need to take USB3 consumed
+ * bandwidth into account regardless whether it actually
+ * crosses the port.
+ */
+ up_bw -= usb3_consumed_up;
+ down_bw -= usb3_consumed_down;
+
+ if (up_bw < *available_up)
+ *available_up = up_bw;
+ if (down_bw < *available_down)
+ *available_down = down_bw;
+ }
+
+ if (*available_up < 0)
+ *available_up = 0;
+ if (*available_down < 0)
+ *available_down = 0;
+
+ return 0;
+}
+
+static int tb_release_unused_usb3_bandwidth(struct tb *tb,
+ struct tb_port *src_port,
+ struct tb_port *dst_port)
+{
+ struct tb_tunnel *tunnel;
+
+ tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+ return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
+}
+
+static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
+ struct tb_port *dst_port)
+{
+ int ret, available_up, available_down;
+ struct tb_tunnel *tunnel;
+
+ tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
+ if (!tunnel)
+ return;
+
+ tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
+
+ /*
+ * Calculate available bandwidth for the first hop USB3 tunnel.
+ * That determines the whole USB3 bandwidth for this branch.
+ */
+ ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
+ &available_up, &available_down);
+ if (ret) {
+ tb_warn(tb, "failed to calculate available bandwidth\n");
+ return;
+ }
+
+ tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
+ available_up, available_down);
+
+ tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
+}
+
+static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ int ret, available_up, available_down;
+ struct tb_port *up, *down, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
+ if (!up)
+ return 0;
+
+ if (!sw->link_usb4)
+ return 0;
+
+ /*
+ * Look up available down port. Since we are chaining it should
+ * be found right above this switch.
+ */
+ port = tb_port_at(tb_route(sw), parent);
+ down = tb_find_usb3_down(parent, port);
+ if (!down)
+ return 0;
+
+ if (tb_route(parent)) {
+ struct tb_port *parent_up;
+ /*
+ * Check first that the parent switch has its upstream USB3
+ * port enabled. Otherwise the chain is not complete and
+ * there is no point setting up a new tunnel.
+ */
+ parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
+ if (!parent_up || !tb_port_is_enabled(parent_up))
+ return 0;
+
+ /* Make all unused bandwidth available for the new tunnel */
+ ret = tb_release_unused_usb3_bandwidth(tb, down, up);
+ if (ret)
+ return ret;
+ }
+
+ ret = tb_available_bandwidth(tb, down, up, &available_up,
+ &available_down);
+ if (ret)
+ goto err_reclaim;
+
+ tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
+ available_up, available_down);
+
+ tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
+ available_down);
+ if (!tunnel) {
+ ret = -ENOMEM;
+ goto err_reclaim;
+ }
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(up,
+ "USB3 tunnel activation failed, aborting\n");
+ ret = -EIO;
+ goto err_free;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ if (tb_route(parent))
+ tb_reclaim_usb3_bandwidth(tb, down, up);
+
+ return 0;
+
+err_free:
+ tb_tunnel_free(tunnel);
+err_reclaim:
+ if (tb_route(parent))
+ tb_reclaim_usb3_bandwidth(tb, down, up);
+
+ return ret;
+}
+
+static int tb_create_usb3_tunnels(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_route(sw)) {
+ ret = tb_tunnel_usb3(sw->tb, sw);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+ ret = tb_create_usb3_tunnels(port->remote->sw);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tb_scan_port(struct tb_port *port);
+
+/**
+ * tb_scan_switch() - scan for and initialize downstream switches
+ */
+static void tb_scan_switch(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ pm_runtime_get_sync(&sw->dev);
+
+ tb_switch_for_each_port(sw, port)
+ tb_scan_port(port);
+
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+}
+
+/**
+ * tb_scan_port() - check for and initialize switches below port
+ */
+static void tb_scan_port(struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(port->sw->tb);
+ struct tb_port *upstream_port;
+ struct tb_switch *sw;
+
+ if (tb_is_upstream_port(port))
+ return;
+
+ if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
+ !tb_dp_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
+ tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
+ false);
+ return;
+ }
+
+ if (port->config.type != TB_TYPE_PORT)
+ return;
+ if (port->dual_link_port && port->link_nr)
+ return; /*
+ * Downstream switch is reachable through two ports.
+ * Only scan on the primary port (link_nr == 0).
+ */
+ if (tb_wait_for_port(port, false) <= 0)
+ return;
+ if (port->remote) {
+ tb_port_dbg(port, "port already has a remote\n");
+ return;
+ }
+
+ tb_retimer_scan(port);
+
+ sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
+ tb_downstream_route(port));
+ if (IS_ERR(sw)) {
+ /*
+ * If there is an error accessing the connected switch
+ * it may be connected to another domain. Also we allow
+ * the other domain to be connected to a max depth switch.
+ */
+ if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
+ tb_scan_xdomain(port);
+ return;
+ }
+
+ if (tb_switch_configure(sw)) {
+ tb_switch_put(sw);
+ return;
+ }
+
+ /*
+ * If there was previously another domain connected remove it
+ * first.
+ */
+ if (port->xdomain) {
+ tb_xdomain_remove(port->xdomain);
+ tb_port_unconfigure_xdomain(port);
+ port->xdomain = NULL;
+ }
+
+ /*
+ * Do not send uevents until we have discovered all existing
+ * tunnels and know which switches were authorized already by
+ * the boot firmware.
+ */
+ if (!tcm->hotplug_active)
+ dev_set_uevent_suppress(&sw->dev, true);
+
+ /*
+ * At the moment Thunderbolt 2 and beyond (devices with LC) we
+ * can support runtime PM.
+ */
+ sw->rpm = sw->generation > 1;
+
+ if (tb_switch_add(sw)) {
+ tb_switch_put(sw);
+ return;
+ }
+
+ /* Link the switches using both links if available */
+ upstream_port = tb_upstream_port(sw);
+ port->remote = upstream_port;
+ upstream_port->remote = port;
+ if (port->dual_link_port && upstream_port->dual_link_port) {
+ port->dual_link_port->remote = upstream_port->dual_link_port;
+ upstream_port->dual_link_port->remote = port->dual_link_port;
+ }
+
+ /* Enable lane bonding if supported */
+ tb_switch_lane_bonding_enable(sw);
+ /* Set the link configured */
+ tb_switch_configure_link(sw);
+
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to enable TMU\n");
+
+ /* Scan upstream retimers */
+ tb_retimer_scan(upstream_port);
+
+ /*
+ * Create USB 3.x tunnels only when the switch is plugged to the
+ * domain. This is because we scan the domain also during discovery
+ * and want to discover existing USB 3.x tunnels before we create
+ * any new.
+ */
+ if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
+ tb_sw_warn(sw, "USB3 tunnel creation failed\n");
+
+ tb_add_dp_resources(sw);
+ tb_scan_switch(sw);
+}
+
+static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
+{
+ struct tb_port *src_port, *dst_port;
+ struct tb *tb;
+
+ if (!tunnel)
+ return;
+
+ tb_tunnel_deactivate(tunnel);
+ list_del(&tunnel->list);
+
+ tb = tunnel->tb;
+ src_port = tunnel->src_port;
+ dst_port = tunnel->dst_port;
+
+ switch (tunnel->type) {
+ case TB_TUNNEL_DP:
+ /*
+ * In case of DP tunnel make sure the DP IN resource is
+ * deallocated properly.
+ */
+ tb_switch_dealloc_dp_resource(src_port->sw, src_port);
+ /* Now we can allow the domain to runtime suspend again */
+ pm_runtime_mark_last_busy(&dst_port->sw->dev);
+ pm_runtime_put_autosuspend(&dst_port->sw->dev);
+ pm_runtime_mark_last_busy(&src_port->sw->dev);
+ pm_runtime_put_autosuspend(&src_port->sw->dev);
+ fallthrough;
+
+ case TB_TUNNEL_USB3:
+ tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
+ break;
+
+ default:
+ /*
+ * PCIe and DMA tunnels do not consume guaranteed
+ * bandwidth.
+ */
+ break;
+ }
+
+ tb_tunnel_free(tunnel);
+}
+
+/**
+ * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
+ */
+static void tb_free_invalid_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+ struct tb_tunnel *n;
+
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_invalid(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
+ }
+}
+
+/**
+ * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
+ */
+static void tb_free_unplugged_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (port->remote->sw->is_unplugged) {
+ tb_retimer_remove_all(port);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_unconfigure_link(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ if (port->dual_link_port)
+ port->dual_link_port->remote = NULL;
+ } else {
+ tb_free_unplugged_children(port->remote->sw);
+ }
+ }
+}
+
+static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ struct tb_port *down = NULL;
+
+ /*
+ * To keep plugging devices consistently in the same PCIe
+ * hierarchy, do mapping here for switch downstream PCIe ports.
+ */
+ if (tb_switch_is_usb4(sw)) {
+ down = usb4_switch_map_pcie_down(sw, port);
+ } else if (!tb_route(sw)) {
+ int phy_port = tb_phy_port_from_link(port->port);
+ int index;
+
+ /*
+ * Hard-coded Thunderbolt port to PCIe down port mapping
+ * per controller.
+ */
+ if (tb_switch_is_cactus_ridge(sw) ||
+ tb_switch_is_alpine_ridge(sw))
+ index = !phy_port ? 6 : 7;
+ else if (tb_switch_is_falcon_ridge(sw))
+ index = !phy_port ? 6 : 8;
+ else if (tb_switch_is_titan_ridge(sw))
+ index = !phy_port ? 8 : 9;
+ else
+ goto out;
+
+ /* Validate the hard-coding */
+ if (WARN_ON(index > sw->config.max_port_number))
+ goto out;
+
+ down = &sw->ports[index];
+ }
+
+ if (down) {
+ if (WARN_ON(!tb_port_is_pcie_down(down)))
+ goto out;
+ if (tb_pci_port_is_enabled(down))
+ goto out;
+
+ return down;
+ }
+
+out:
+ return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
+}
+
+static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
+{
+ struct tb_port *host_port, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+
+ host_port = tb_route(in->sw) ?
+ tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
+
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpout(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "in use\n");
+ continue;
+ }
+
+ tb_port_dbg(port, "DP OUT available\n");
+
+ /*
+ * Keep the DP tunnel under the topology starting from
+ * the same host router downstream port.
+ */
+ if (host_port && tb_route(port->sw)) {
+ struct tb_port *p;
+
+ p = tb_port_at(tb_route(port->sw), tb->root_switch);
+ if (p != host_port)
+ continue;
+ }
+
+ return port;
+ }
+
+ return NULL;
+}
+
+static void tb_tunnel_dp(struct tb *tb)
+{
+ int available_up, available_down, ret;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "in use\n");
+ continue;
+ }
+
+ tb_port_dbg(port, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out) {
+ in = port;
+ break;
+ }
+ }
+
+ if (!in) {
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+ return;
+ }
+ if (!out) {
+ tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
+ return;
+ }
+
+ /*
+ * DP stream needs the domain to be active so runtime resume
+ * both ends of the tunnel.
+ *
+ * This should bring the routers in the middle active as well
+ * and keeps the domain from runtime suspending while the DP
+ * tunnel is active.
+ */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ if (tb_switch_alloc_dp_resource(in->sw, in)) {
+ tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
+ goto err_rpm_put;
+ }
+
+ /* Make all unused USB3 bandwidth available for the new DP tunnel */
+ ret = tb_release_unused_usb3_bandwidth(tb, in, out);
+ if (ret) {
+ tb_warn(tb, "failed to release unused bandwidth\n");
+ goto err_dealloc_dp;
+ }
+
+ ret = tb_available_bandwidth(tb, in, out, &available_up,
+ &available_down);
+ if (ret)
+ goto err_reclaim;
+
+ tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
+ available_up, available_down);
+
+ tunnel = tb_tunnel_alloc_dp(tb, in, out, available_up, available_down);
+ if (!tunnel) {
+ tb_port_dbg(out, "could not allocate DP tunnel\n");
+ goto err_reclaim;
+ }
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(out, "DP tunnel activation failed, aborting\n");
+ goto err_free;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ return;
+
+err_free:
+ tb_tunnel_free(tunnel);
+err_reclaim:
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+err_dealloc_dp:
+ tb_switch_dealloc_dp_resource(in->sw, in);
+err_rpm_put:
+ pm_runtime_mark_last_busy(&out->sw->dev);
+ pm_runtime_put_autosuspend(&out->sw->dev);
+ pm_runtime_mark_last_busy(&in->sw->dev);
+ pm_runtime_put_autosuspend(&in->sw->dev);
+}
+
+static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
+{
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ if (tb_port_is_dpin(port)) {
+ tb_port_dbg(port, "DP IN resource unavailable\n");
+ in = port;
+ out = NULL;
+ } else {
+ tb_port_dbg(port, "DP OUT resource unavailable\n");
+ in = NULL;
+ out = port;
+ }
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
+ tb_deactivate_and_free_tunnel(tunnel);
+ list_del_init(&port->list);
+
+ /*
+ * See if there is another DP OUT port that can be used for
+ * to create another tunnel.
+ */
+ tb_tunnel_dp(tb);
+}
+
+static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *p;
+
+ if (tb_port_is_enabled(port))
+ return;
+
+ list_for_each_entry(p, &tcm->dp_resources, list) {
+ if (p == port)
+ return;
+ }
+
+ tb_port_dbg(port, "DP %s resource available\n",
+ tb_port_is_dpin(port) ? "IN" : "OUT");
+ list_add_tail(&port->list, &tcm->dp_resources);
+
+ /* Look for suitable DP IN <-> DP OUT pairs now */
+ tb_tunnel_dp(tb);
+}
+
+static void tb_disconnect_and_release_dp(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel, *n;
+
+ /*
+ * Tear down all DP tunnels and release their resources. They
+ * will be re-established after resume based on plug events.
+ */
+ list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_dp(tunnel))
+ tb_deactivate_and_free_tunnel(tunnel);
+ }
+
+ while (!list_empty(&tcm->dp_resources)) {
+ struct tb_port *port;
+
+ port = list_first_entry(&tcm->dp_resources,
+ struct tb_port, list);
+ list_del_init(&port->list);
+ }
+}
+
+static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
+{
+ struct tb_port *up, *down, *port;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_switch *parent_sw;
+ struct tb_tunnel *tunnel;
+
+ up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
+ if (!up)
+ return 0;
+
+ /*
+ * Look up available down port. Since we are chaining it should
+ * be found right above this switch.
+ */
+ parent_sw = tb_to_switch(sw->dev.parent);
+ port = tb_port_at(tb_route(sw), parent_sw);
+ down = tb_find_pcie_down(parent_sw, port);
+ if (!down)
+ return 0;
+
+ tunnel = tb_tunnel_alloc_pci(tb, up, down);
+ if (!tunnel)
+ return -ENOMEM;
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(up,
+ "PCIe tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ return -EIO;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ return 0;
+}
+
+static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *nhi_port, *dst_port;
+ struct tb_tunnel *tunnel;
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(xd->dev.parent);
+ dst_port = tb_port_at(xd->route, sw);
+ nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
+
+ mutex_lock(&tb->lock);
+ tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
+ xd->transmit_path, xd->receive_ring,
+ xd->receive_path);
+ if (!tunnel) {
+ mutex_unlock(&tb->lock);
+ return -ENOMEM;
+ }
+
+ if (tb_tunnel_activate(tunnel)) {
+ tb_port_info(nhi_port,
+ "DMA tunnel activation failed, aborting\n");
+ tb_tunnel_free(tunnel);
+ mutex_unlock(&tb->lock);
+ return -EIO;
+ }
+
+ list_add_tail(&tunnel->list, &tcm->tunnel_list);
+ mutex_unlock(&tb->lock);
+ return 0;
+}
+
+static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ struct tb_port *dst_port;
+ struct tb_tunnel *tunnel;
+ struct tb_switch *sw;
+
+ sw = tb_to_switch(xd->dev.parent);
+ dst_port = tb_port_at(xd->route, sw);
+
+ /*
+ * It is possible that the tunnel was already teared down (in
+ * case of cable disconnect) so it is fine if we cannot find it
+ * here anymore.
+ */
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
+ tb_deactivate_and_free_tunnel(tunnel);
+}
+
+static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
+{
+ if (!xd->is_unplugged) {
+ mutex_lock(&tb->lock);
+ __tb_disconnect_xdomain_paths(tb, xd);
+ mutex_unlock(&tb->lock);
+ }
+ return 0;
+}
+
+/* hotplug handling */
+
+/**
+ * tb_handle_hotplug() - handle hotplug event
+ *
+ * Executes on tb->wq.
+ */
+static void tb_handle_hotplug(struct work_struct *work)
+{
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ struct tb *tb = ev->tb;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_switch *sw;
+ struct tb_port *port;
+
+ /* Bring the domain back from sleep if it was suspended */
+ pm_runtime_get_sync(&tb->dev);
+
+ mutex_lock(&tb->lock);
+ if (!tcm->hotplug_active)
+ goto out; /* during init, suspend or shutdown */
+
+ sw = tb_switch_find_by_route(tb, ev->route);
+ if (!sw) {
+ tb_warn(tb,
+ "hotplug event from non existent switch %llx:%x (unplug: %d)\n",
+ ev->route, ev->port, ev->unplug);
+ goto out;
+ }
+ if (ev->port > sw->config.max_port_number) {
+ tb_warn(tb,
+ "hotplug event from non existent port %llx:%x (unplug: %d)\n",
+ ev->route, ev->port, ev->unplug);
+ goto put_sw;
+ }
+ port = &sw->ports[ev->port];
+ if (tb_is_upstream_port(port)) {
+ tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
+ ev->route, ev->port, ev->unplug);
+ goto put_sw;
+ }
+
+ pm_runtime_get_sync(&sw->dev);
+
+ if (ev->unplug) {
+ tb_retimer_remove_all(port);
+
+ if (tb_port_has_remote(port)) {
+ tb_port_dbg(port, "switch unplugged\n");
+ tb_sw_set_unplugged(port->remote->sw);
+ tb_free_invalid_tunnels(tb);
+ tb_remove_dp_resources(port->remote->sw);
+ tb_switch_tmu_disable(port->remote->sw);
+ tb_switch_unconfigure_link(port->remote->sw);
+ tb_switch_lane_bonding_disable(port->remote->sw);
+ tb_switch_remove(port->remote->sw);
+ port->remote = NULL;
+ if (port->dual_link_port)
+ port->dual_link_port->remote = NULL;
+ /* Maybe we can create another DP tunnel */
+ tb_tunnel_dp(tb);
+ } else if (port->xdomain) {
+ struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
+
+ tb_port_dbg(port, "xdomain unplugged\n");
+ /*
+ * Service drivers are unbound during
+ * tb_xdomain_remove() so setting XDomain as
+ * unplugged here prevents deadlock if they call
+ * tb_xdomain_disable_paths(). We will tear down
+ * the path below.
+ */
+ xd->is_unplugged = true;
+ tb_xdomain_remove(xd);
+ port->xdomain = NULL;
+ __tb_disconnect_xdomain_paths(tb, xd);
+ tb_xdomain_put(xd);
+ tb_port_unconfigure_xdomain(port);
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_unavailable(tb, port);
+ } else {
+ tb_port_dbg(port,
+ "got unplug event for disconnected port, ignoring\n");
+ }
+ } else if (port->remote) {
+ tb_port_dbg(port, "got plug event for connected port, ignoring\n");
+ } else {
+ if (tb_port_is_null(port)) {
+ tb_port_dbg(port, "hotplug: scanning\n");
+ tb_scan_port(port);
+ if (!port->remote)
+ tb_port_dbg(port, "hotplug: no switch found\n");
+ } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
+ tb_dp_resource_available(tb, port);
+ }
+ }
+
+ pm_runtime_mark_last_busy(&sw->dev);
+ pm_runtime_put_autosuspend(&sw->dev);
+
+put_sw:
+ tb_switch_put(sw);
+out:
+ mutex_unlock(&tb->lock);
+
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+
+ kfree(ev);
+}
+
+/**
+ * tb_schedule_hotplug_handler() - callback function for the control channel
+ *
+ * Delegates to tb_handle_hotplug.
+ */
+static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ const struct cfg_event_pkg *pkg = buf;
+ u64 route;
+
+ if (type != TB_CFG_PKG_EVENT) {
+ tb_warn(tb, "unexpected event %#x, ignoring\n", type);
+ return;
+ }
+
+ route = tb_cfg_get_route(&pkg->header);
+
+ if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
+ tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
+ pkg->port);
+ }
+
+ tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
+}
+
+static void tb_stop(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+ struct tb_tunnel *n;
+
+ cancel_delayed_work(&tcm->remove_work);
+ /* tunnels are only present after everything has been initialized */
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
+ /*
+ * DMA tunnels require the driver to be functional so we
+ * tear them down. Other protocol tunnels can be left
+ * intact.
+ */
+ if (tb_tunnel_is_dma(tunnel))
+ tb_tunnel_deactivate(tunnel);
+ tb_tunnel_free(tunnel);
+ }
+ tb_switch_remove(tb->root_switch);
+ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+}
+
+static int tb_scan_finalize_switch(struct device *dev, void *data)
+{
+ if (tb_is_switch(dev)) {
+ struct tb_switch *sw = tb_to_switch(dev);
+
+ /*
+ * If we found that the switch was already setup by the
+ * boot firmware, mark it as authorized now before we
+ * send uevent to userspace.
+ */
+ if (sw->boot)
+ sw->authorized = 1;
+
+ dev_set_uevent_suppress(dev, false);
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ device_for_each_child(dev, NULL, tb_scan_finalize_switch);
+ }
+
+ return 0;
+}
+
+static int tb_start(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int ret;
+
+ tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
+ if (IS_ERR(tb->root_switch))
+ return PTR_ERR(tb->root_switch);
+
+ /*
+ * ICM firmware upgrade needs running firmware and in native
+ * mode that is not available so disable firmware upgrade of the
+ * root switch.
+ */
+ tb->root_switch->no_nvm_upgrade = true;
+ /* All USB4 routers support runtime PM */
+ tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
+
+ ret = tb_switch_configure(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ return ret;
+ }
+
+ /* Announce the switch to the world */
+ ret = tb_switch_add(tb->root_switch);
+ if (ret) {
+ tb_switch_put(tb->root_switch);
+ return ret;
+ }
+
+ /* Enable TMU if it is off */
+ tb_switch_tmu_enable(tb->root_switch);
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb->root_switch);
+ /*
+ * If the boot firmware did not create USB 3.x tunnels create them
+ * now for the whole topology.
+ */
+ tb_create_usb3_tunnels(tb->root_switch);
+ /* Add DP IN resources for the root switch */
+ tb_add_dp_resources(tb->root_switch);
+ /* Make the discovered switches available to the userspace */
+ device_for_each_child(&tb->root_switch->dev, NULL,
+ tb_scan_finalize_switch);
+
+ /* Allow tb_handle_hotplug to progress events */
+ tcm->hotplug_active = true;
+ return 0;
+}
+
+static int tb_suspend_noirq(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ tb_dbg(tb, "suspending...\n");
+ tb_disconnect_and_release_dp(tb);
+ tb_switch_suspend(tb->root_switch, false);
+ tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
+ tb_dbg(tb, "suspend finished\n");
+
+ return 0;
+}
+
+static void tb_restore_children(struct tb_switch *sw)
+{
+ struct tb_port *port;
+
+ /* No need to restore if the router is already unplugged */
+ if (sw->is_unplugged)
+ return;
+
+ if (tb_enable_tmu(sw))
+ tb_sw_warn(sw, "failed to restore TMU configuration\n");
+
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port) && !port->xdomain)
+ continue;
+
+ if (port->remote) {
+ tb_switch_lane_bonding_enable(port->remote->sw);
+ tb_switch_configure_link(port->remote->sw);
+
+ tb_restore_children(port->remote->sw);
+ } else if (port->xdomain) {
+ tb_port_configure_xdomain(port);
+ }
+ }
+}
+
+static int tb_resume_noirq(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel, *n;
+
+ tb_dbg(tb, "resuming...\n");
+
+ /* remove any pci devices the firmware might have setup */
+ tb_switch_reset(tb->root_switch);
+
+ tb_switch_resume(tb->root_switch);
+ tb_free_invalid_tunnels(tb);
+ tb_free_unplugged_children(tb->root_switch);
+ tb_restore_children(tb->root_switch);
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+ tb_tunnel_restart(tunnel);
+ if (!list_empty(&tcm->tunnel_list)) {
+ /*
+ * the pcie links need some time to get going.
+ * 100ms works for me...
+ */
+ tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
+ msleep(100);
+ }
+ /* Allow tb_handle_hotplug to progress events */
+ tcm->hotplug_active = true;
+ tb_dbg(tb, "resume finished\n");
+
+ return 0;
+}
+
+static int tb_free_unplugged_xdomains(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret = 0;
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_is_upstream_port(port))
+ continue;
+ if (port->xdomain && port->xdomain->is_unplugged) {
+ tb_retimer_remove_all(port);
+ tb_xdomain_remove(port->xdomain);
+ tb_port_unconfigure_xdomain(port);
+ port->xdomain = NULL;
+ ret++;
+ } else if (port->remote) {
+ ret += tb_free_unplugged_xdomains(port->remote->sw);
+ }
+ }
+
+ return ret;
+}
+
+static int tb_freeze_noirq(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ tcm->hotplug_active = false;
+ return 0;
+}
+
+static int tb_thaw_noirq(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ tcm->hotplug_active = true;
+ return 0;
+}
+
+static void tb_complete(struct tb *tb)
+{
+ /*
+ * Release any unplugged XDomains and if there is a case where
+ * another domain is swapped in place of unplugged XDomain we
+ * need to run another rescan.
+ */
+ mutex_lock(&tb->lock);
+ if (tb_free_unplugged_xdomains(tb->root_switch))
+ tb_scan_switch(tb->root_switch);
+ mutex_unlock(&tb->lock);
+}
+
+static int tb_runtime_suspend(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+
+ mutex_lock(&tb->lock);
+ tb_switch_suspend(tb->root_switch, true);
+ tcm->hotplug_active = false;
+ mutex_unlock(&tb->lock);
+
+ return 0;
+}
+
+static void tb_remove_work(struct work_struct *work)
+{
+ struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
+ struct tb *tb = tcm_to_tb(tcm);
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch) {
+ tb_free_unplugged_children(tb->root_switch);
+ tb_free_unplugged_xdomains(tb->root_switch);
+ }
+ mutex_unlock(&tb->lock);
+}
+
+static int tb_runtime_resume(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel, *n;
+
+ mutex_lock(&tb->lock);
+ tb_switch_resume(tb->root_switch);
+ tb_free_invalid_tunnels(tb);
+ tb_restore_children(tb->root_switch);
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+ tb_tunnel_restart(tunnel);
+ tcm->hotplug_active = true;
+ mutex_unlock(&tb->lock);
+
+ /*
+ * Schedule cleanup of any unplugged devices. Run this in a
+ * separate thread to avoid possible deadlock if the device
+ * removal runtime resumes the unplugged device.
+ */
+ queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
+ return 0;
+}
+
+static const struct tb_cm_ops tb_cm_ops = {
+ .start = tb_start,
+ .stop = tb_stop,
+ .suspend_noirq = tb_suspend_noirq,
+ .resume_noirq = tb_resume_noirq,
+ .freeze_noirq = tb_freeze_noirq,
+ .thaw_noirq = tb_thaw_noirq,
+ .complete = tb_complete,
+ .runtime_suspend = tb_runtime_suspend,
+ .runtime_resume = tb_runtime_resume,
+ .handle_event = tb_handle_event,
+ .approve_switch = tb_tunnel_pci,
+ .approve_xdomain_paths = tb_approve_xdomain_paths,
+ .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
+};
+
+struct tb *tb_probe(struct tb_nhi *nhi)
+{
+ struct tb_cm *tcm;
+ struct tb *tb;
+
+ tb = tb_domain_alloc(nhi, sizeof(*tcm));
+ if (!tb)
+ return NULL;
+
+ tb->security_level = TB_SECURITY_USER;
+ tb->cm_ops = &tb_cm_ops;
+
+ tcm = tb_priv(tb);
+ INIT_LIST_HEAD(&tcm->tunnel_list);
+ INIT_LIST_HEAD(&tcm->dp_resources);
+ INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
+
+ return tb;
+}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
new file mode 100644
index 000000000..266f3bf8f
--- /dev/null
+++ b/drivers/thunderbolt/tb.h
@@ -0,0 +1,1044 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - bus logic (NHI independent)
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#ifndef TB_H_
+#define TB_H_
+
+#include <linux/nvmem-provider.h>
+#include <linux/pci.h>
+#include <linux/thunderbolt.h>
+#include <linux/uuid.h>
+
+#include "tb_regs.h"
+#include "ctl.h"
+#include "dma_port.h"
+
+#define NVM_MIN_SIZE SZ_32K
+#define NVM_MAX_SIZE SZ_512K
+
+/* Intel specific NVM offsets */
+#define NVM_DEVID 0x05
+#define NVM_VERSION 0x08
+#define NVM_FLASH_SIZE 0x45
+
+/**
+ * struct tb_nvm - Structure holding NVM information
+ * @dev: Owner of the NVM
+ * @major: Major version number of the active NVM portion
+ * @minor: Minor version number of the active NVM portion
+ * @id: Identifier used with both NVM portions
+ * @active: Active portion NVMem device
+ * @non_active: Non-active portion NVMem device
+ * @buf: Buffer where the NVM image is stored before it is written to
+ * the actual NVM flash device
+ * @buf_data_size: Number of bytes actually consumed by the new NVM
+ * image
+ * @authenticating: The device is authenticating the new NVM
+ * @flushed: The image has been flushed to the storage area
+ *
+ * The user of this structure needs to handle serialization of possible
+ * concurrent access.
+ */
+struct tb_nvm {
+ struct device *dev;
+ u8 major;
+ u8 minor;
+ int id;
+ struct nvmem_device *active;
+ struct nvmem_device *non_active;
+ void *buf;
+ size_t buf_data_size;
+ bool authenticating;
+ bool flushed;
+};
+
+#define TB_SWITCH_KEY_SIZE 32
+#define TB_SWITCH_MAX_DEPTH 6
+#define USB4_SWITCH_MAX_DEPTH 5
+
+/**
+ * enum tb_switch_tmu_rate - TMU refresh rate
+ * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake)
+ * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive
+ * transmission of the Delay Request TSNOS
+ * (Time Sync Notification Ordered Set) on a Link
+ * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive
+ * transmission of the Delay Request TSNOS on
+ * a Link
+ */
+enum tb_switch_tmu_rate {
+ TB_SWITCH_TMU_RATE_OFF = 0,
+ TB_SWITCH_TMU_RATE_HIFI = 16,
+ TB_SWITCH_TMU_RATE_NORMAL = 1000,
+};
+
+/**
+ * struct tb_switch_tmu - Structure holding switch TMU configuration
+ * @cap: Offset to the TMU capability (%0 if not found)
+ * @has_ucap: Does the switch support uni-directional mode
+ * @rate: TMU refresh rate related to upstream switch. In case of root
+ * switch this holds the domain rate.
+ * @unidirectional: Is the TMU in uni-directional or bi-directional mode
+ * related to upstream switch. Don't case for root switch.
+ */
+struct tb_switch_tmu {
+ int cap;
+ bool has_ucap;
+ enum tb_switch_tmu_rate rate;
+ bool unidirectional;
+};
+
+/**
+ * struct tb_switch - a thunderbolt switch
+ * @dev: Device for the switch
+ * @config: Switch configuration
+ * @ports: Ports in this switch
+ * @dma_port: If the switch has port supporting DMA configuration based
+ * mailbox this will hold the pointer to that (%NULL
+ * otherwise). If set it also means the switch has
+ * upgradeable NVM.
+ * @tmu: The switch TMU configuration
+ * @tb: Pointer to the domain the switch belongs to
+ * @uid: Unique ID of the switch
+ * @uuid: UUID of the switch (or %NULL if not supported)
+ * @vendor: Vendor ID of the switch
+ * @device: Device ID of the switch
+ * @vendor_name: Name of the vendor (or %NULL if not known)
+ * @device_name: Name of the device (or %NULL if not known)
+ * @link_speed: Speed of the link in Gb/s
+ * @link_width: Width of the link (1 or 2)
+ * @link_usb4: Upstream link is USB4
+ * @generation: Switch Thunderbolt generation
+ * @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @cap_lc: Offset to the link controller capability (%0 if not found)
+ * @is_unplugged: The switch is going away
+ * @drom: DROM of the switch (%NULL if not found)
+ * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
+ * @no_nvm_upgrade: Prevent NVM upgrade of this switch
+ * @safe_mode: The switch is in safe-mode
+ * @boot: Whether the switch was already authorized on boot or not
+ * @rpm: The switch supports runtime PM
+ * @authorized: Whether the switch is authorized by user or policy
+ * @security_level: Switch supported security level
+ * @debugfs_dir: Pointer to the debugfs structure
+ * @key: Contains the key used to challenge the device or %NULL if not
+ * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
+ * @connection_id: Connection ID used with ICM messaging
+ * @connection_key: Connection key used with ICM messaging
+ * @link: Root switch link this switch is connected (ICM only)
+ * @depth: Depth in the chain this switch is connected (ICM only)
+ * @rpm_complete: Completion used to wait for runtime resume to
+ * complete (ICM only)
+ * @quirks: Quirks used for this Thunderbolt switch
+ *
+ * When the switch is being added or removed to the domain (other
+ * switches) you need to have domain lock held.
+ */
+struct tb_switch {
+ struct device dev;
+ struct tb_regs_switch_header config;
+ struct tb_port *ports;
+ struct tb_dma_port *dma_port;
+ struct tb_switch_tmu tmu;
+ struct tb *tb;
+ u64 uid;
+ uuid_t *uuid;
+ u16 vendor;
+ u16 device;
+ const char *vendor_name;
+ const char *device_name;
+ unsigned int link_speed;
+ unsigned int link_width;
+ bool link_usb4;
+ unsigned int generation;
+ int cap_plug_events;
+ int cap_lc;
+ bool is_unplugged;
+ u8 *drom;
+ struct tb_nvm *nvm;
+ bool no_nvm_upgrade;
+ bool safe_mode;
+ bool boot;
+ bool rpm;
+ unsigned int authorized;
+ enum tb_security_level security_level;
+ struct dentry *debugfs_dir;
+ u8 *key;
+ u8 connection_id;
+ u8 connection_key;
+ u8 link;
+ u8 depth;
+ struct completion rpm_complete;
+ unsigned long quirks;
+};
+
+/**
+ * struct tb_port - a thunderbolt port, part of a tb_switch
+ * @config: Cached port configuration read from registers
+ * @sw: Switch the port belongs to
+ * @remote: Remote port (%NULL if not connected)
+ * @xdomain: Remote host (%NULL if not connected)
+ * @cap_phy: Offset, zero if not found
+ * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present)
+ * @cap_adap: Offset of the adapter specific capability (%0 if not present)
+ * @cap_usb4: Offset to the USB4 port capability (%0 if not present)
+ * @port: Port number on switch
+ * @disabled: Disabled by eeprom or enabled but not implemented
+ * @bonded: true if the port is bonded (two lanes combined as one)
+ * @dual_link_port: If the switch is connected using two ports, points
+ * to the other port.
+ * @link_nr: Is this primary or secondary port on the dual_link.
+ * @in_hopids: Currently allocated input HopIDs
+ * @out_hopids: Currently allocated output HopIDs
+ * @list: Used to link ports to DP resources list
+ */
+struct tb_port {
+ struct tb_regs_port_header config;
+ struct tb_switch *sw;
+ struct tb_port *remote;
+ struct tb_xdomain *xdomain;
+ int cap_phy;
+ int cap_tmu;
+ int cap_adap;
+ int cap_usb4;
+ u8 port;
+ bool disabled;
+ bool bonded;
+ struct tb_port *dual_link_port;
+ u8 link_nr:1;
+ struct ida in_hopids;
+ struct ida out_hopids;
+ struct list_head list;
+};
+
+/**
+ * tb_retimer: Thunderbolt retimer
+ * @dev: Device for the retimer
+ * @tb: Pointer to the domain the retimer belongs to
+ * @index: Retimer index facing the router USB4 port
+ * @vendor: Vendor ID of the retimer
+ * @device: Device ID of the retimer
+ * @port: Pointer to the lane 0 adapter
+ * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise)
+ * @auth_status: Status of last NVM authentication
+ */
+struct tb_retimer {
+ struct device dev;
+ struct tb *tb;
+ u8 index;
+ u32 vendor;
+ u32 device;
+ struct tb_port *port;
+ struct tb_nvm *nvm;
+ u32 auth_status;
+};
+
+/**
+ * struct tb_path_hop - routing information for a tb_path
+ * @in_port: Ingress port of a switch
+ * @out_port: Egress port of a switch where the packet is routed out
+ * (must be on the same switch than @in_port)
+ * @in_hop_index: HopID where the path configuration entry is placed in
+ * the path config space of @in_port.
+ * @in_counter_index: Used counter index (not used in the driver
+ * currently, %-1 to disable)
+ * @next_hop_index: HopID of the packet when it is routed out from @out_port
+ * @initial_credits: Number of initial flow control credits allocated for
+ * the path
+ *
+ * Hop configuration is always done on the IN port of a switch.
+ * in_port and out_port have to be on the same switch. Packets arriving on
+ * in_port with "hop" = in_hop_index will get routed to through out_port. The
+ * next hop to take (on out_port->remote) is determined by
+ * next_hop_index. When routing packet to another switch (out->remote is
+ * set) the @next_hop_index must match the @in_hop_index of that next
+ * hop to make routing possible.
+ *
+ * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in
+ * port.
+ */
+struct tb_path_hop {
+ struct tb_port *in_port;
+ struct tb_port *out_port;
+ int in_hop_index;
+ int in_counter_index;
+ int next_hop_index;
+ unsigned int initial_credits;
+};
+
+/**
+ * enum tb_path_port - path options mask
+ * @TB_PATH_NONE: Do not activate on any hop on path
+ * @TB_PATH_SOURCE: Activate on the first hop (out of src)
+ * @TB_PATH_INTERNAL: Activate on the intermediate hops (not the first/last)
+ * @TB_PATH_DESTINATION: Activate on the last hop (into dst)
+ * @TB_PATH_ALL: Activate on all hops on the path
+ */
+enum tb_path_port {
+ TB_PATH_NONE = 0,
+ TB_PATH_SOURCE = 1,
+ TB_PATH_INTERNAL = 2,
+ TB_PATH_DESTINATION = 4,
+ TB_PATH_ALL = 7,
+};
+
+/**
+ * struct tb_path - a unidirectional path between two ports
+ * @tb: Pointer to the domain structure
+ * @name: Name of the path (used for debugging)
+ * @nfc_credits: Number of non flow controlled credits allocated for the path
+ * @ingress_shared_buffer: Shared buffering used for ingress ports on the path
+ * @egress_shared_buffer: Shared buffering used for egress ports on the path
+ * @ingress_fc_enable: Flow control for ingress ports on the path
+ * @egress_fc_enable: Flow control for egress ports on the path
+ * @priority: Priority group if the path
+ * @weight: Weight of the path inside the priority group
+ * @drop_packages: Drop packages from queue tail or head
+ * @activated: Is the path active
+ * @clear_fc: Clear all flow control from the path config space entries
+ * when deactivating this path
+ * @hops: Path hops
+ * @path_length: How many hops the path uses
+ *
+ * A path consists of a number of hops (see &struct tb_path_hop). To
+ * establish a PCIe tunnel two paths have to be created between the two
+ * PCIe ports.
+ */
+struct tb_path {
+ struct tb *tb;
+ const char *name;
+ int nfc_credits;
+ enum tb_path_port ingress_shared_buffer;
+ enum tb_path_port egress_shared_buffer;
+ enum tb_path_port ingress_fc_enable;
+ enum tb_path_port egress_fc_enable;
+
+ unsigned int priority:3;
+ int weight:4;
+ bool drop_packages;
+ bool activated;
+ bool clear_fc;
+ struct tb_path_hop *hops;
+ int path_length;
+};
+
+/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
+#define TB_PATH_MIN_HOPID 8
+/*
+ * Support paths from the farthest (depth 6) router to the host and back
+ * to the same level (not necessarily to the same router).
+ */
+#define TB_PATH_MAX_HOPS (7 * 2)
+
+/* Possible wake types */
+#define TB_WAKE_ON_CONNECT BIT(0)
+#define TB_WAKE_ON_DISCONNECT BIT(1)
+#define TB_WAKE_ON_USB4 BIT(2)
+#define TB_WAKE_ON_USB3 BIT(3)
+#define TB_WAKE_ON_PCIE BIT(4)
+
+/**
+ * struct tb_cm_ops - Connection manager specific operations vector
+ * @driver_ready: Called right after control channel is started. Used by
+ * ICM to send driver ready message to the firmware.
+ * @start: Starts the domain
+ * @stop: Stops the domain
+ * @suspend_noirq: Connection manager specific suspend_noirq
+ * @resume_noirq: Connection manager specific resume_noirq
+ * @suspend: Connection manager specific suspend
+ * @freeze_noirq: Connection manager specific freeze_noirq
+ * @thaw_noirq: Connection manager specific thaw_noirq
+ * @complete: Connection manager specific complete
+ * @runtime_suspend: Connection manager specific runtime_suspend
+ * @runtime_resume: Connection manager specific runtime_resume
+ * @runtime_suspend_switch: Runtime suspend a switch
+ * @runtime_resume_switch: Runtime resume a switch
+ * @handle_event: Handle thunderbolt event
+ * @get_boot_acl: Get boot ACL list
+ * @set_boot_acl: Set boot ACL list
+ * @approve_switch: Approve switch
+ * @add_switch_key: Add key to switch
+ * @challenge_switch_key: Challenge switch using key
+ * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
+ * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
+ * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
+ */
+struct tb_cm_ops {
+ int (*driver_ready)(struct tb *tb);
+ int (*start)(struct tb *tb);
+ void (*stop)(struct tb *tb);
+ int (*suspend_noirq)(struct tb *tb);
+ int (*resume_noirq)(struct tb *tb);
+ int (*suspend)(struct tb *tb);
+ int (*freeze_noirq)(struct tb *tb);
+ int (*thaw_noirq)(struct tb *tb);
+ void (*complete)(struct tb *tb);
+ int (*runtime_suspend)(struct tb *tb);
+ int (*runtime_resume)(struct tb *tb);
+ int (*runtime_suspend_switch)(struct tb_switch *sw);
+ int (*runtime_resume_switch)(struct tb_switch *sw);
+ void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
+ const void *buf, size_t size);
+ int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids);
+ int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids);
+ int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
+ int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
+ int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
+ const u8 *challenge, u8 *response);
+ int (*disconnect_pcie_paths)(struct tb *tb);
+ int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
+ int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
+};
+
+static inline void *tb_priv(struct tb *tb)
+{
+ return (void *)tb->privdata;
+}
+
+#define TB_AUTOSUSPEND_DELAY 15000 /* ms */
+
+/* helper functions & macros */
+
+/**
+ * tb_upstream_port() - return the upstream port of a switch
+ *
+ * Every switch has an upstream port (for the root switch it is the NHI).
+ *
+ * During switch alloc/init tb_upstream_port()->remote may be NULL, even for
+ * non root switches (on the NHI port remote is always NULL).
+ *
+ * Return: Returns the upstream port of the switch.
+ */
+static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
+{
+ return &sw->ports[sw->config.upstream_port_number];
+}
+
+/**
+ * tb_is_upstream_port() - Is the port upstream facing
+ * @port: Port to check
+ *
+ * Returns true if @port is upstream facing port. In case of dual link
+ * ports both return true.
+ */
+static inline bool tb_is_upstream_port(const struct tb_port *port)
+{
+ const struct tb_port *upstream_port = tb_upstream_port(port->sw);
+ return port == upstream_port || port->dual_link_port == upstream_port;
+}
+
+static inline u64 tb_route(const struct tb_switch *sw)
+{
+ return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
+}
+
+static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
+{
+ u8 port;
+
+ port = route >> (sw->config.depth * 8);
+ if (WARN_ON(port > sw->config.max_port_number))
+ return NULL;
+ return &sw->ports[port];
+}
+
+/**
+ * tb_port_has_remote() - Does the port have switch connected downstream
+ * @port: Port to check
+ *
+ * Returns true only when the port is primary port and has remote set.
+ */
+static inline bool tb_port_has_remote(const struct tb_port *port)
+{
+ if (tb_is_upstream_port(port))
+ return false;
+ if (!port->remote)
+ return false;
+ if (port->dual_link_port && port->link_nr)
+ return false;
+
+ return true;
+}
+
+static inline bool tb_port_is_null(const struct tb_port *port)
+{
+ return port && port->port && port->config.type == TB_TYPE_PORT;
+}
+
+static inline bool tb_port_is_nhi(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_NHI;
+}
+
+static inline bool tb_port_is_pcie_down(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_PCIE_DOWN;
+}
+
+static inline bool tb_port_is_pcie_up(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_PCIE_UP;
+}
+
+static inline bool tb_port_is_dpin(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_DP_HDMI_IN;
+}
+
+static inline bool tb_port_is_dpout(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_DP_HDMI_OUT;
+}
+
+static inline bool tb_port_is_usb3_down(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_DOWN;
+}
+
+static inline bool tb_port_is_usb3_up(const struct tb_port *port)
+{
+ return port && port->config.type == TB_TYPE_USB3_UP;
+}
+
+static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
+ enum tb_cfg_space space, u32 offset, u32 length)
+{
+ if (sw->is_unplugged)
+ return -ENODEV;
+ return tb_cfg_read(sw->tb->ctl,
+ buffer,
+ tb_route(sw),
+ 0,
+ space,
+ offset,
+ length);
+}
+
+static inline int tb_sw_write(struct tb_switch *sw, const void *buffer,
+ enum tb_cfg_space space, u32 offset, u32 length)
+{
+ if (sw->is_unplugged)
+ return -ENODEV;
+ return tb_cfg_write(sw->tb->ctl,
+ buffer,
+ tb_route(sw),
+ 0,
+ space,
+ offset,
+ length);
+}
+
+static inline int tb_port_read(struct tb_port *port, void *buffer,
+ enum tb_cfg_space space, u32 offset, u32 length)
+{
+ if (port->sw->is_unplugged)
+ return -ENODEV;
+ return tb_cfg_read(port->sw->tb->ctl,
+ buffer,
+ tb_route(port->sw),
+ port->port,
+ space,
+ offset,
+ length);
+}
+
+static inline int tb_port_write(struct tb_port *port, const void *buffer,
+ enum tb_cfg_space space, u32 offset, u32 length)
+{
+ if (port->sw->is_unplugged)
+ return -ENODEV;
+ return tb_cfg_write(port->sw->tb->ctl,
+ buffer,
+ tb_route(port->sw),
+ port->port,
+ space,
+ offset,
+ length);
+}
+
+#define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
+
+#define __TB_SW_PRINT(level, sw, fmt, arg...) \
+ do { \
+ const struct tb_switch *__sw = (sw); \
+ level(__sw->tb, "%llx: " fmt, \
+ tb_route(__sw), ## arg); \
+ } while (0)
+#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
+#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
+#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
+#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
+
+#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
+ do { \
+ const struct tb_port *__port = (_port); \
+ level(__port->sw->tb, "%llx:%x: " fmt, \
+ tb_route(__port->sw), __port->port, ## arg); \
+ } while (0)
+#define tb_port_WARN(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg)
+#define tb_port_warn(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
+#define tb_port_info(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+#define tb_port_dbg(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
+
+struct tb *icm_probe(struct tb_nhi *nhi);
+struct tb *tb_probe(struct tb_nhi *nhi);
+
+extern struct device_type tb_domain_type;
+extern struct device_type tb_retimer_type;
+extern struct device_type tb_switch_type;
+
+int tb_domain_init(void);
+void tb_domain_exit(void);
+int tb_xdomain_init(void);
+void tb_xdomain_exit(void);
+
+struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
+int tb_domain_add(struct tb *tb);
+void tb_domain_remove(struct tb *tb);
+int tb_domain_suspend_noirq(struct tb *tb);
+int tb_domain_resume_noirq(struct tb *tb);
+int tb_domain_suspend(struct tb *tb);
+int tb_domain_freeze_noirq(struct tb *tb);
+int tb_domain_thaw_noirq(struct tb *tb);
+void tb_domain_complete(struct tb *tb);
+int tb_domain_runtime_suspend(struct tb *tb);
+int tb_domain_runtime_resume(struct tb *tb);
+int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
+int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
+int tb_domain_disconnect_pcie_paths(struct tb *tb);
+int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
+int tb_domain_disconnect_all_paths(struct tb *tb);
+
+static inline struct tb *tb_domain_get(struct tb *tb)
+{
+ if (tb)
+ get_device(&tb->dev);
+ return tb;
+}
+
+static inline void tb_domain_put(struct tb *tb)
+{
+ put_device(&tb->dev);
+}
+
+struct tb_nvm *tb_nvm_alloc(struct device *dev);
+int tb_nvm_add_active(struct tb_nvm *nvm, size_t size, nvmem_reg_read_t reg_read);
+int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
+ size_t bytes);
+int tb_nvm_add_non_active(struct tb_nvm *nvm, size_t size,
+ nvmem_reg_write_t reg_write);
+void tb_nvm_free(struct tb_nvm *nvm);
+void tb_nvm_exit(void);
+
+struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
+ u64 route);
+struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb,
+ struct device *parent, u64 route);
+int tb_switch_configure(struct tb_switch *sw);
+int tb_switch_add(struct tb_switch *sw);
+void tb_switch_remove(struct tb_switch *sw);
+void tb_switch_suspend(struct tb_switch *sw, bool runtime);
+int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_reset(struct tb_switch *sw);
+void tb_sw_set_unplugged(struct tb_switch *sw);
+struct tb_port *tb_switch_find_port(struct tb_switch *sw,
+ enum tb_port_type type);
+struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth);
+struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid);
+struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route);
+
+/**
+ * tb_switch_for_each_port() - Iterate over each switch port
+ * @sw: Switch whose ports to iterate
+ * @p: Port used as iterator
+ *
+ * Iterates over each switch port skipping the control port (port %0).
+ */
+#define tb_switch_for_each_port(sw, p) \
+ for ((p) = &(sw)->ports[1]; \
+ (p) <= &(sw)->ports[(sw)->config.max_port_number]; (p)++)
+
+static inline struct tb_switch *tb_switch_get(struct tb_switch *sw)
+{
+ if (sw)
+ get_device(&sw->dev);
+ return sw;
+}
+
+static inline void tb_switch_put(struct tb_switch *sw)
+{
+ put_device(&sw->dev);
+}
+
+static inline bool tb_is_switch(const struct device *dev)
+{
+ return dev->type == &tb_switch_type;
+}
+
+static inline struct tb_switch *tb_to_switch(struct device *dev)
+{
+ if (tb_is_switch(dev))
+ return container_of(dev, struct tb_switch, dev);
+ return NULL;
+}
+
+static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
+{
+ return tb_to_switch(sw->dev.parent);
+}
+
+static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw)
+{
+ return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+ sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE;
+}
+
+static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw)
+{
+ return sw->config.vendor_id == PCI_VENDOR_ID_INTEL &&
+ sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE;
+}
+
+static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * tb_switch_is_usb4() - Is the switch USB4 compliant
+ * @sw: Switch to check
+ *
+ * Returns true if the @sw is USB4 compliant router, false otherwise.
+ */
+static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
+{
+ return sw->config.thunderbolt_version == USB4_VERSION_1_0;
+}
+
+/**
+ * tb_switch_is_icm() - Is the switch handled by ICM firmware
+ * @sw: Switch to check
+ *
+ * In case there is a need to differentiate whether ICM firmware or SW CM
+ * is handling @sw this function can be called. It is valid to call this
+ * after tb_switch_alloc() and tb_switch_configure() has been called
+ * (latter only for SW CM case).
+ */
+static inline bool tb_switch_is_icm(const struct tb_switch *sw)
+{
+ return !sw->config.enabled;
+}
+
+int tb_switch_lane_bonding_enable(struct tb_switch *sw);
+void tb_switch_lane_bonding_disable(struct tb_switch *sw);
+int tb_switch_configure_link(struct tb_switch *sw);
+void tb_switch_unconfigure_link(struct tb_switch *sw);
+
+bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+
+int tb_switch_tmu_init(struct tb_switch *sw);
+int tb_switch_tmu_post_time(struct tb_switch *sw);
+int tb_switch_tmu_disable(struct tb_switch *sw);
+int tb_switch_tmu_enable(struct tb_switch *sw);
+
+static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+{
+ return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
+ !sw->tmu.unidirectional;
+}
+
+int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
+int tb_port_add_nfc_credits(struct tb_port *port, int credits);
+int tb_port_set_initial_credits(struct tb_port *port, u32 credits);
+int tb_port_clear_counter(struct tb_port *port, int counter);
+int tb_port_unlock(struct tb_port *port);
+int tb_port_enable(struct tb_port *port);
+int tb_port_disable(struct tb_port *port);
+int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid);
+void tb_port_release_in_hopid(struct tb_port *port, int hopid);
+int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid);
+void tb_port_release_out_hopid(struct tb_port *port, int hopid);
+struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
+ struct tb_port *prev);
+
+/**
+ * tb_for_each_port_on_path() - Iterate over each port on path
+ * @src: Source port
+ * @dst: Destination port
+ * @p: Port used as iterator
+ *
+ * Walks over each port on path from @src to @dst.
+ */
+#define tb_for_each_port_on_path(src, dst, p) \
+ for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \
+ (p) = tb_next_port_on_path((src), (dst), (p)))
+
+int tb_port_get_link_speed(struct tb_port *port);
+
+int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
+int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
+int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset);
+int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap);
+int tb_port_next_cap(struct tb_port *port, unsigned int offset);
+bool tb_port_is_enabled(struct tb_port *port);
+
+bool tb_usb3_port_is_enabled(struct tb_port *port);
+int tb_usb3_port_enable(struct tb_port *port, bool enable);
+
+bool tb_pci_port_is_enabled(struct tb_port *port);
+int tb_pci_port_enable(struct tb_port *port, bool enable);
+
+int tb_dp_port_hpd_is_active(struct tb_port *port);
+int tb_dp_port_hpd_clear(struct tb_port *port);
+int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
+ unsigned int aux_tx, unsigned int aux_rx);
+bool tb_dp_port_is_enabled(struct tb_port *port);
+int tb_dp_port_enable(struct tb_port *port, bool enable);
+
+struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid,
+ struct tb_port **last, const char *name);
+struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
+ struct tb_port *dst, int dst_hopid, int link_nr,
+ const char *name);
+void tb_path_free(struct tb_path *path);
+int tb_path_activate(struct tb_path *path);
+void tb_path_deactivate(struct tb_path *path);
+bool tb_path_is_invalid(struct tb_path *path);
+bool tb_path_port_on_path(const struct tb_path *path,
+ const struct tb_port *port);
+
+int tb_drom_read(struct tb_switch *sw);
+int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
+
+int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_configure_port(struct tb_port *port);
+void tb_lc_unconfigure_port(struct tb_port *port);
+int tb_lc_configure_xdomain(struct tb_port *port);
+void tb_lc_unconfigure_xdomain(struct tb_port *port);
+int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
+int tb_lc_set_sleep(struct tb_switch *sw);
+bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
+bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in);
+int tb_lc_force_power(struct tb_switch *sw);
+
+static inline int tb_route_length(u64 route)
+{
+ return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT;
+}
+
+/**
+ * tb_downstream_route() - get route to downstream switch
+ *
+ * Port must not be the upstream port (otherwise a loop is created).
+ *
+ * Return: Returns a route to the switch behind @port.
+ */
+static inline u64 tb_downstream_route(struct tb_port *port)
+{
+ return tb_route(port->sw)
+ | ((u64) port->port << (port->sw->config.depth * 8));
+}
+
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size);
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+ u64 route, const uuid_t *local_uuid,
+ const uuid_t *remote_uuid);
+void tb_xdomain_add(struct tb_xdomain *xd);
+void tb_xdomain_remove(struct tb_xdomain *xd);
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth);
+
+int tb_retimer_scan(struct tb_port *port);
+void tb_retimer_remove_all(struct tb_port *port);
+
+static inline bool tb_is_retimer(const struct device *dev)
+{
+ return dev->type == &tb_retimer_type;
+}
+
+static inline struct tb_retimer *tb_to_retimer(struct device *dev)
+{
+ if (tb_is_retimer(dev))
+ return container_of(dev, struct tb_retimer, dev);
+ return NULL;
+}
+
+int usb4_switch_setup(struct tb_switch *sw);
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
+int usb4_switch_set_sleep(struct tb_switch *sw);
+int usb4_switch_nvm_sector_size(struct tb_switch *sw);
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size);
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size);
+int usb4_switch_nvm_authenticate(struct tb_switch *sw);
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in);
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port);
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port);
+
+int usb4_port_unlock(struct tb_port *port);
+int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_configure(struct tb_port *port);
+void usb4_port_unconfigure(struct tb_port *port);
+int usb4_port_configure_xdomain(struct tb_port *port);
+void usb4_port_unconfigure_xdomain(struct tb_port *port);
+int usb4_port_enumerate_retimers(struct tb_port *port);
+
+int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
+ u8 size);
+int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
+ const void *buf, u8 size);
+int usb4_port_retimer_is_last(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index,
+ unsigned int address, const void *buf,
+ size_t size);
+int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index);
+int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
+ u32 *status);
+int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
+ unsigned int address, void *buf, size_t size);
+
+int usb4_usb3_port_max_link_rate(struct tb_port *port);
+int usb4_usb3_port_actual_link_rate(struct tb_port *port);
+int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
+ int *downstream_bw);
+int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
+ int *downstream_bw);
+int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
+ int *downstream_bw);
+
+/* Keep link controller awake during update */
+#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
+
+void tb_check_quirks(struct tb_switch *sw);
+
+#ifdef CONFIG_ACPI
+void tb_acpi_add_links(struct tb_nhi *nhi);
+#else
+static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void tb_debugfs_init(void);
+void tb_debugfs_exit(void);
+void tb_switch_debugfs_init(struct tb_switch *sw);
+void tb_switch_debugfs_remove(struct tb_switch *sw);
+#else
+static inline void tb_debugfs_init(void) { }
+static inline void tb_debugfs_exit(void) { }
+static inline void tb_switch_debugfs_init(struct tb_switch *sw) { }
+static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { }
+#endif
+
+#ifdef CONFIG_USB4_KUNIT_TEST
+int tb_test_init(void);
+void tb_test_exit(void);
+#else
+static inline int tb_test_init(void) { return 0; }
+static inline void tb_test_exit(void) { }
+#endif
+
+#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
new file mode 100644
index 000000000..0e01dbc63
--- /dev/null
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -0,0 +1,571 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt control channel messages
+ *
+ * Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2017, Intel Corporation
+ */
+
+#ifndef _TB_MSGS
+#define _TB_MSGS
+
+#include <linux/types.h>
+#include <linux/uuid.h>
+
+enum tb_cfg_space {
+ TB_CFG_HOPS = 0,
+ TB_CFG_PORT = 1,
+ TB_CFG_SWITCH = 2,
+ TB_CFG_COUNTERS = 3,
+};
+
+enum tb_cfg_error {
+ TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
+ TB_CFG_ERROR_LINK_ERROR = 1,
+ TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
+ TB_CFG_ERROR_NO_SUCH_PORT = 4,
+ TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
+ TB_CFG_ERROR_LOOP = 8,
+ TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
+ TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
+ TB_CFG_ERROR_LOCK = 15,
+};
+
+/* common header */
+struct tb_cfg_header {
+ u32 route_hi:22;
+ u32 unknown:10; /* highest order bit is set on replies */
+ u32 route_lo;
+} __packed;
+
+/* additional header for read/write packets */
+struct tb_cfg_address {
+ u32 offset:13; /* in dwords */
+ u32 length:6; /* in dwords */
+ u32 port:6;
+ enum tb_cfg_space space:2;
+ u32 seq:2; /* sequence number */
+ u32 zero:3;
+} __packed;
+
+/* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
+struct cfg_read_pkg {
+ struct tb_cfg_header header;
+ struct tb_cfg_address addr;
+} __packed;
+
+/* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
+struct cfg_write_pkg {
+ struct tb_cfg_header header;
+ struct tb_cfg_address addr;
+ u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
+} __packed;
+
+/* TB_CFG_PKG_ERROR */
+struct cfg_error_pkg {
+ struct tb_cfg_header header;
+ enum tb_cfg_error error:4;
+ u32 zero1:4;
+ u32 port:6;
+ u32 zero2:2; /* Both should be zero, still they are different fields. */
+ u32 zero3:14;
+ u32 pg:2;
+} __packed;
+
+#define TB_CFG_ERROR_PG_HOT_PLUG 0x2
+#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3
+
+/* TB_CFG_PKG_EVENT */
+struct cfg_event_pkg {
+ struct tb_cfg_header header;
+ u32 port:6;
+ u32 zero:25;
+ bool unplug:1;
+} __packed;
+
+/* TB_CFG_PKG_RESET */
+struct cfg_reset_pkg {
+ struct tb_cfg_header header;
+} __packed;
+
+/* TB_CFG_PKG_PREPARE_TO_SLEEP */
+struct cfg_pts_pkg {
+ struct tb_cfg_header header;
+ u32 data;
+} __packed;
+
+/* ICM messages */
+
+enum icm_pkg_code {
+ ICM_GET_TOPOLOGY = 0x1,
+ ICM_DRIVER_READY = 0x3,
+ ICM_APPROVE_DEVICE = 0x4,
+ ICM_CHALLENGE_DEVICE = 0x5,
+ ICM_ADD_DEVICE_KEY = 0x6,
+ ICM_GET_ROUTE = 0xa,
+ ICM_APPROVE_XDOMAIN = 0x10,
+ ICM_DISCONNECT_XDOMAIN = 0x11,
+ ICM_PREBOOT_ACL = 0x18,
+};
+
+enum icm_event_code {
+ ICM_EVENT_DEVICE_CONNECTED = 0x3,
+ ICM_EVENT_DEVICE_DISCONNECTED = 0x4,
+ ICM_EVENT_XDOMAIN_CONNECTED = 0x6,
+ ICM_EVENT_XDOMAIN_DISCONNECTED = 0x7,
+ ICM_EVENT_RTD3_VETO = 0xa,
+};
+
+struct icm_pkg_header {
+ u8 code;
+ u8 flags;
+ u8 packet_id;
+ u8 total_packets;
+};
+
+#define ICM_FLAGS_ERROR BIT(0)
+#define ICM_FLAGS_NO_KEY BIT(1)
+#define ICM_FLAGS_SLEVEL_SHIFT 3
+#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
+#define ICM_FLAGS_DUAL_LANE BIT(5)
+#define ICM_FLAGS_SPEED_GEN3 BIT(7)
+#define ICM_FLAGS_WRITE BIT(7)
+
+struct icm_pkg_driver_ready {
+ struct icm_pkg_header hdr;
+};
+
+/* Falcon Ridge only messages */
+
+struct icm_fr_pkg_driver_ready_response {
+ struct icm_pkg_header hdr;
+ u8 romver;
+ u8 ramver;
+ u16 security_level;
+};
+
+#define ICM_FR_SLEVEL_MASK 0xf
+
+/* Falcon Ridge & Alpine Ridge common messages */
+
+struct icm_fr_pkg_get_topology {
+ struct icm_pkg_header hdr;
+};
+
+#define ICM_GET_TOPOLOGY_PACKETS 14
+
+struct icm_fr_pkg_get_topology_response {
+ struct icm_pkg_header hdr;
+ u32 route_lo;
+ u32 route_hi;
+ u8 first_data;
+ u8 second_data;
+ u8 drom_i2c_address_index;
+ u8 switch_index;
+ u32 reserved[2];
+ u32 ports[16];
+ u32 port_hop_info[16];
+};
+
+#define ICM_SWITCH_USED BIT(0)
+#define ICM_SWITCH_UPSTREAM_PORT_MASK GENMASK(7, 1)
+#define ICM_SWITCH_UPSTREAM_PORT_SHIFT 1
+
+#define ICM_PORT_TYPE_MASK GENMASK(23, 0)
+#define ICM_PORT_INDEX_SHIFT 24
+#define ICM_PORT_INDEX_MASK GENMASK(31, 24)
+
+struct icm_fr_event_device_connected {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 link_info;
+ u32 ep_name[55];
+};
+
+#define ICM_LINK_INFO_LINK_MASK 0x7
+#define ICM_LINK_INFO_DEPTH_SHIFT 4
+#define ICM_LINK_INFO_DEPTH_MASK GENMASK(7, 4)
+#define ICM_LINK_INFO_APPROVED BIT(8)
+#define ICM_LINK_INFO_REJECTED BIT(9)
+#define ICM_LINK_INFO_BOOT BIT(10)
+
+struct icm_fr_pkg_approve_device {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+};
+
+struct icm_fr_event_device_disconnected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+};
+
+struct icm_fr_event_xdomain_connected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ uuid_t local_uuid;
+ u32 local_route_hi;
+ u32 local_route_lo;
+ u32 remote_route_hi;
+ u32 remote_route_lo;
+};
+
+struct icm_fr_event_xdomain_disconnected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+};
+
+struct icm_fr_pkg_add_device_key {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+ u32 key[8];
+};
+
+struct icm_fr_pkg_add_device_key_response {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+};
+
+struct icm_fr_pkg_challenge_device {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+ u32 challenge[8];
+};
+
+struct icm_fr_pkg_challenge_device_response {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u8 connection_key;
+ u8 connection_id;
+ u16 reserved;
+ u32 challenge[8];
+ u32 response[8];
+};
+
+struct icm_fr_pkg_approve_xdomain {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
+
+struct icm_fr_pkg_approve_xdomain_response {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
+
+/* Alpine Ridge only messages */
+
+struct icm_ar_pkg_driver_ready_response {
+ struct icm_pkg_header hdr;
+ u8 romver;
+ u8 ramver;
+ u16 info;
+};
+
+#define ICM_AR_FLAGS_RTD3 BIT(6)
+
+#define ICM_AR_INFO_SLEVEL_MASK GENMASK(3, 0)
+#define ICM_AR_INFO_BOOT_ACL_SHIFT 7
+#define ICM_AR_INFO_BOOT_ACL_MASK GENMASK(11, 7)
+#define ICM_AR_INFO_BOOT_ACL_SUPPORTED BIT(13)
+
+struct icm_ar_pkg_get_route {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+};
+
+struct icm_ar_pkg_get_route_response {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ u32 route_hi;
+ u32 route_lo;
+};
+
+struct icm_ar_boot_acl_entry {
+ u32 uuid_lo;
+ u32 uuid_hi;
+};
+
+#define ICM_AR_PREBOOT_ACL_ENTRIES 16
+
+struct icm_ar_pkg_preboot_acl {
+ struct icm_pkg_header hdr;
+ struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES];
+};
+
+struct icm_ar_pkg_preboot_acl_response {
+ struct icm_pkg_header hdr;
+ struct icm_ar_boot_acl_entry acl[ICM_AR_PREBOOT_ACL_ENTRIES];
+};
+
+/* Titan Ridge messages */
+
+struct icm_tr_pkg_driver_ready_response {
+ struct icm_pkg_header hdr;
+ u16 reserved1;
+ u16 info;
+ u32 nvm_version;
+ u16 device_id;
+ u16 reserved2;
+};
+
+#define ICM_TR_FLAGS_RTD3 BIT(6)
+
+#define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0)
+#define ICM_TR_INFO_BOOT_ACL_SHIFT 7
+#define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7)
+
+struct icm_tr_event_device_connected {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u32 route_hi;
+ u32 route_lo;
+ u8 connection_id;
+ u8 reserved;
+ u16 link_info;
+ u32 ep_name[55];
+};
+
+struct icm_tr_event_device_disconnected {
+ struct icm_pkg_header hdr;
+ u32 route_hi;
+ u32 route_lo;
+};
+
+struct icm_tr_event_xdomain_connected {
+ struct icm_pkg_header hdr;
+ u16 reserved;
+ u16 link_info;
+ uuid_t remote_uuid;
+ uuid_t local_uuid;
+ u32 local_route_hi;
+ u32 local_route_lo;
+ u32 remote_route_hi;
+ u32 remote_route_lo;
+};
+
+struct icm_tr_event_xdomain_disconnected {
+ struct icm_pkg_header hdr;
+ u32 route_hi;
+ u32 route_lo;
+ uuid_t remote_uuid;
+};
+
+struct icm_tr_pkg_approve_device {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u32 route_hi;
+ u32 route_lo;
+ u8 connection_id;
+ u8 reserved1[3];
+};
+
+struct icm_tr_pkg_add_device_key {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u32 route_hi;
+ u32 route_lo;
+ u8 connection_id;
+ u8 reserved[3];
+ u32 key[8];
+};
+
+struct icm_tr_pkg_challenge_device {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u32 route_hi;
+ u32 route_lo;
+ u8 connection_id;
+ u8 reserved[3];
+ u32 challenge[8];
+};
+
+struct icm_tr_pkg_approve_xdomain {
+ struct icm_pkg_header hdr;
+ u32 route_hi;
+ u32 route_lo;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
+
+struct icm_tr_pkg_disconnect_xdomain {
+ struct icm_pkg_header hdr;
+ u8 stage;
+ u8 reserved[3];
+ u32 route_hi;
+ u32 route_lo;
+ uuid_t remote_uuid;
+};
+
+struct icm_tr_pkg_challenge_device_response {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u32 route_hi;
+ u32 route_lo;
+ u8 connection_id;
+ u8 reserved[3];
+ u32 challenge[8];
+ u32 response[8];
+};
+
+struct icm_tr_pkg_add_device_key_response {
+ struct icm_pkg_header hdr;
+ uuid_t ep_uuid;
+ u32 route_hi;
+ u32 route_lo;
+ u8 connection_id;
+ u8 reserved[3];
+};
+
+struct icm_tr_pkg_approve_xdomain_response {
+ struct icm_pkg_header hdr;
+ u32 route_hi;
+ u32 route_lo;
+ uuid_t remote_uuid;
+ u16 transmit_path;
+ u16 transmit_ring;
+ u16 receive_path;
+ u16 receive_ring;
+};
+
+struct icm_tr_pkg_disconnect_xdomain_response {
+ struct icm_pkg_header hdr;
+ u8 stage;
+ u8 reserved[3];
+ u32 route_hi;
+ u32 route_lo;
+ uuid_t remote_uuid;
+};
+
+/* Ice Lake messages */
+
+struct icm_icl_event_rtd3_veto {
+ struct icm_pkg_header hdr;
+ u32 veto_reason;
+};
+
+/* XDomain messages */
+
+struct tb_xdomain_header {
+ u32 route_hi;
+ u32 route_lo;
+ u32 length_sn;
+};
+
+#define TB_XDOMAIN_LENGTH_MASK GENMASK(5, 0)
+#define TB_XDOMAIN_SN_MASK GENMASK(28, 27)
+#define TB_XDOMAIN_SN_SHIFT 27
+
+enum tb_xdp_type {
+ UUID_REQUEST_OLD = 1,
+ UUID_RESPONSE = 2,
+ PROPERTIES_REQUEST,
+ PROPERTIES_RESPONSE,
+ PROPERTIES_CHANGED_REQUEST,
+ PROPERTIES_CHANGED_RESPONSE,
+ ERROR_RESPONSE,
+ UUID_REQUEST = 12,
+};
+
+struct tb_xdp_header {
+ struct tb_xdomain_header xd_hdr;
+ uuid_t uuid;
+ u32 type;
+};
+
+struct tb_xdp_uuid {
+ struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_uuid_response {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ u32 src_route_hi;
+ u32 src_route_lo;
+};
+
+struct tb_xdp_properties {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 reserved;
+};
+
+struct tb_xdp_properties_response {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 data_length;
+ u32 generation;
+ u32 data[0];
+};
+
+/*
+ * Max length of data array single XDomain property response is allowed
+ * to carry.
+ */
+#define TB_XDP_PROPERTIES_MAX_DATA_LENGTH \
+ (((256 - 4 - sizeof(struct tb_xdp_properties_response))) / 4)
+
+/* Maximum size of the total property block in dwords we allow */
+#define TB_XDP_PROPERTIES_MAX_LENGTH 500
+
+struct tb_xdp_properties_changed {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+};
+
+struct tb_xdp_properties_changed_response {
+ struct tb_xdp_header hdr;
+};
+
+enum tb_xdp_error {
+ ERROR_SUCCESS,
+ ERROR_UNKNOWN_PACKET,
+ ERROR_UNKNOWN_DOMAIN,
+ ERROR_NOT_SUPPORTED,
+ ERROR_NOT_READY,
+};
+
+struct tb_xdp_error_response {
+ struct tb_xdp_header hdr;
+ u32 error;
+};
+
+#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
new file mode 100644
index 000000000..26868e2f9
--- /dev/null
+++ b/drivers/thunderbolt/tb_regs.h
@@ -0,0 +1,457 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - Port/Switch config area registers
+ *
+ * Every thunderbolt device consists (logically) of a switch with multiple
+ * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
+ * COUNTERS) which are used to configure the device.
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
+ */
+
+#ifndef _TB_REGS
+#define _TB_REGS
+
+#include <linux/types.h>
+
+
+#define TB_ROUTE_SHIFT 8 /* number of bits in a port entry of a route */
+
+
+/*
+ * TODO: should be 63? But we do not know how to receive frames larger than 256
+ * bytes at the frame level. (header + checksum = 16, 60*4 = 240)
+ */
+#define TB_MAX_CONFIG_RW_LENGTH 60
+
+enum tb_switch_cap {
+ TB_SWITCH_CAP_TMU = 0x03,
+ TB_SWITCH_CAP_VSE = 0x05,
+};
+
+enum tb_switch_vse_cap {
+ TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */
+ TB_VSE_CAP_TIME2 = 0x03,
+ TB_VSE_CAP_IECS = 0x04,
+ TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */
+};
+
+enum tb_port_cap {
+ TB_PORT_CAP_PHY = 0x01,
+ TB_PORT_CAP_POWER = 0x02,
+ TB_PORT_CAP_TIME1 = 0x03,
+ TB_PORT_CAP_ADAP = 0x04,
+ TB_PORT_CAP_VSE = 0x05,
+ TB_PORT_CAP_USB4 = 0x06,
+};
+
+enum tb_port_state {
+ TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
+ TB_PORT_CONNECTING = 1, /* retry */
+ TB_PORT_UP = 2,
+ TB_PORT_UNPLUGGED = 7,
+};
+
+/* capability headers */
+
+struct tb_cap_basic {
+ u8 next;
+ /* enum tb_cap cap:8; prevent "narrower than values of its type" */
+ u8 cap; /* if cap == 0x05 then we have a extended capability */
+} __packed;
+
+/**
+ * struct tb_cap_extended_short - Switch extended short capability
+ * @next: Pointer to the next capability. If @next and @length are zero
+ * then we have a long cap.
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @length: Length of this capability
+ */
+struct tb_cap_extended_short {
+ u8 next;
+ u8 cap;
+ u8 vsec_id;
+ u8 length;
+} __packed;
+
+/**
+ * struct tb_cap_extended_long - Switch extended long capability
+ * @zero1: This field should be zero
+ * @cap: Base capability ID (see &enum tb_switch_cap)
+ * @vsec_id: Vendor specific capability ID (see &enum switch_vse_cap)
+ * @zero2: This field should be zero
+ * @next: Pointer to the next capability
+ * @length: Length of this capability
+ */
+struct tb_cap_extended_long {
+ u8 zero1;
+ u8 cap;
+ u8 vsec_id;
+ u8 zero2;
+ u16 next;
+ u16 length;
+} __packed;
+
+/**
+ * struct tb_cap_any - Structure capable of hold every capability
+ * @basic: Basic capability
+ * @extended_short: Vendor specific capability
+ * @extended_long: Vendor specific extended capability
+ */
+struct tb_cap_any {
+ union {
+ struct tb_cap_basic basic;
+ struct tb_cap_extended_short extended_short;
+ struct tb_cap_extended_long extended_long;
+ };
+} __packed;
+
+/* capabilities */
+
+struct tb_cap_link_controller {
+ struct tb_cap_extended_long cap_header;
+ u32 count:4; /* number of link controllers */
+ u32 unknown1:4;
+ u32 base_offset:8; /*
+ * offset (into this capability) of the configuration
+ * area of the first link controller
+ */
+ u32 length:12; /* link controller configuration area length */
+ u32 unknown2:4; /* TODO check that length is correct */
+} __packed;
+
+struct tb_cap_phy {
+ struct tb_cap_basic cap_header;
+ u32 unknown1:16;
+ u32 unknown2:14;
+ bool disable:1;
+ u32 unknown3:11;
+ enum tb_port_state state:4;
+ u32 unknown4:2;
+} __packed;
+
+struct tb_eeprom_ctl {
+ bool clock:1; /* send pulse to transfer one bit */
+ bool access_low:1; /* set to 0 before access */
+ bool data_out:1; /* to eeprom */
+ bool data_in:1; /* from eeprom */
+ bool access_high:1; /* set to 1 before access */
+ bool not_present:1; /* should be 0 */
+ bool unknown1:1;
+ bool present:1; /* should be 1 */
+ u32 unknown2:24;
+} __packed;
+
+struct tb_cap_plug_events {
+ struct tb_cap_extended_short cap_header;
+ u32 __unknown1:2;
+ u32 plug_events:5;
+ u32 __unknown2:25;
+ u32 __unknown3;
+ u32 __unknown4;
+ struct tb_eeprom_ctl eeprom_ctl;
+ u32 __unknown5[7];
+ u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */
+} __packed;
+
+/* device headers */
+
+/* Present on port 0 in TB_CFG_SWITCH at address zero. */
+struct tb_regs_switch_header {
+ /* DWORD 0 */
+ u16 vendor_id;
+ u16 device_id;
+ /* DWORD 1 */
+ u32 first_cap_offset:8;
+ u32 upstream_port_number:6;
+ u32 max_port_number:6;
+ u32 depth:3;
+ u32 __unknown1:1;
+ u32 revision:8;
+ /* DWORD 2 */
+ u32 route_lo;
+ /* DWORD 3 */
+ u32 route_hi:31;
+ bool enabled:1;
+ /* DWORD 4 */
+ u32 plug_events_delay:8; /*
+ * RW, pause between plug events in
+ * milliseconds. Writing 0x00 is interpreted
+ * as 255ms.
+ */
+ u32 cmuv:8;
+ u32 __unknown4:8;
+ u32 thunderbolt_version:8;
+} __packed;
+
+/* USB4 version 1.0 */
+#define USB4_VERSION_1_0 0x20
+
+#define ROUTER_CS_1 0x01
+#define ROUTER_CS_4 0x04
+#define ROUTER_CS_5 0x05
+#define ROUTER_CS_5_SLP BIT(0)
+#define ROUTER_CS_5_WOP BIT(1)
+#define ROUTER_CS_5_WOU BIT(2)
+#define ROUTER_CS_5_C3S BIT(23)
+#define ROUTER_CS_5_PTO BIT(24)
+#define ROUTER_CS_5_UTO BIT(25)
+#define ROUTER_CS_5_HCO BIT(26)
+#define ROUTER_CS_5_CV BIT(31)
+#define ROUTER_CS_6 0x06
+#define ROUTER_CS_6_SLPR BIT(0)
+#define ROUTER_CS_6_TNS BIT(1)
+#define ROUTER_CS_6_WOPS BIT(2)
+#define ROUTER_CS_6_WOUS BIT(3)
+#define ROUTER_CS_6_HCI BIT(18)
+#define ROUTER_CS_6_CR BIT(25)
+#define ROUTER_CS_7 0x07
+#define ROUTER_CS_9 0x09
+#define ROUTER_CS_25 0x19
+#define ROUTER_CS_26 0x1a
+#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24)
+#define ROUTER_CS_26_STATUS_SHIFT 24
+#define ROUTER_CS_26_ONS BIT(30)
+#define ROUTER_CS_26_OV BIT(31)
+
+/* Router TMU configuration */
+#define TMU_RTR_CS_0 0x00
+#define TMU_RTR_CS_0_TD BIT(27)
+#define TMU_RTR_CS_0_UCAP BIT(30)
+#define TMU_RTR_CS_1 0x01
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16
+#define TMU_RTR_CS_2 0x02
+#define TMU_RTR_CS_3 0x03
+#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16)
+#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
+#define TMU_RTR_CS_22 0x16
+#define TMU_RTR_CS_24 0x18
+
+enum tb_port_type {
+ TB_TYPE_INACTIVE = 0x000000,
+ TB_TYPE_PORT = 0x000001,
+ TB_TYPE_NHI = 0x000002,
+ /* TB_TYPE_ETHERNET = 0x020000, lower order bits are not known */
+ /* TB_TYPE_SATA = 0x080000, lower order bits are not known */
+ TB_TYPE_DP_HDMI_IN = 0x0e0101,
+ TB_TYPE_DP_HDMI_OUT = 0x0e0102,
+ TB_TYPE_PCIE_DOWN = 0x100101,
+ TB_TYPE_PCIE_UP = 0x100102,
+ TB_TYPE_USB3_DOWN = 0x200101,
+ TB_TYPE_USB3_UP = 0x200102,
+};
+
+/* Present on every port in TB_CF_PORT at address zero. */
+struct tb_regs_port_header {
+ /* DWORD 0 */
+ u16 vendor_id;
+ u16 device_id;
+ /* DWORD 1 */
+ u32 first_cap_offset:8;
+ u32 max_counters:11;
+ u32 counters_support:1;
+ u32 __unknown1:4;
+ u32 revision:8;
+ /* DWORD 2 */
+ enum tb_port_type type:24;
+ u32 thunderbolt_version:8;
+ /* DWORD 3 */
+ u32 __unknown2:20;
+ u32 port_number:6;
+ u32 __unknown3:6;
+ /* DWORD 4 */
+ u32 nfc_credits;
+ /* DWORD 5 */
+ u32 max_in_hop_id:11;
+ u32 max_out_hop_id:11;
+ u32 __unknown4:10;
+ /* DWORD 6 */
+ u32 __unknown5;
+ /* DWORD 7 */
+ u32 __unknown6;
+
+} __packed;
+
+/* Basic adapter configuration registers */
+#define ADP_CS_4 0x04
+#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0)
+#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20)
+#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20
+#define ADP_CS_4_LCK BIT(31)
+#define ADP_CS_5 0x05
+#define ADP_CS_5_LCA_MASK GENMASK(28, 22)
+#define ADP_CS_5_LCA_SHIFT 22
+#define ADP_CS_5_DHP BIT(31)
+
+/* TMU adapter registers */
+#define TMU_ADP_CS_3 0x03
+#define TMU_ADP_CS_3_UDM BIT(29)
+
+/* Lane adapter registers */
+#define LANE_ADP_CS_0 0x00
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1 0x01
+#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
+#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
+#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
+#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_LD BIT(14)
+#define LANE_ADP_CS_1_LB BIT(15)
+#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
+#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8
+#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
+#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
+#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
+
+/* USB4 port registers */
+#define PORT_CS_1 0x01
+#define PORT_CS_1_LENGTH_SHIFT 8
+#define PORT_CS_1_TARGET_MASK GENMASK(18, 16)
+#define PORT_CS_1_TARGET_SHIFT 16
+#define PORT_CS_1_RETIMER_INDEX_SHIFT 20
+#define PORT_CS_1_WNR_WRITE BIT(24)
+#define PORT_CS_1_NR BIT(25)
+#define PORT_CS_1_RC BIT(26)
+#define PORT_CS_1_PND BIT(31)
+#define PORT_CS_2 0x02
+#define PORT_CS_18 0x12
+#define PORT_CS_18_BE BIT(8)
+#define PORT_CS_18_TCM BIT(9)
+#define PORT_CS_18_WOU4S BIT(18)
+#define PORT_CS_19 0x13
+#define PORT_CS_19_PC BIT(3)
+#define PORT_CS_19_PID BIT(4)
+#define PORT_CS_19_WOC BIT(16)
+#define PORT_CS_19_WOD BIT(17)
+#define PORT_CS_19_WOU4 BIT(18)
+
+/* Display Port adapter registers */
+#define ADP_DP_CS_0 0x00
+#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16)
+#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16
+#define ADP_DP_CS_0_AE BIT(30)
+#define ADP_DP_CS_0_VE BIT(31)
+#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0)
+#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
+#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
+#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_3 0x03
+#define ADP_DP_CS_3_HDPC BIT(9)
+#define DP_LOCAL_CAP 0x04
+#define DP_REMOTE_CAP 0x05
+#define DP_STATUS_CTRL 0x06
+#define DP_STATUS_CTRL_CMHS BIT(25)
+#define DP_STATUS_CTRL_UF BIT(26)
+#define DP_COMMON_CAP 0x07
+/*
+ * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
+ * with exception of DPRX done.
+ */
+#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8)
+#define DP_COMMON_CAP_RATE_SHIFT 8
+#define DP_COMMON_CAP_RATE_RBR 0x0
+#define DP_COMMON_CAP_RATE_HBR 0x1
+#define DP_COMMON_CAP_RATE_HBR2 0x2
+#define DP_COMMON_CAP_RATE_HBR3 0x3
+#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12)
+#define DP_COMMON_CAP_LANES_SHIFT 12
+#define DP_COMMON_CAP_1_LANE 0x0
+#define DP_COMMON_CAP_2_LANES 0x1
+#define DP_COMMON_CAP_4_LANES 0x2
+#define DP_COMMON_CAP_DPRX_DONE BIT(31)
+
+/* PCIe adapter registers */
+#define ADP_PCIE_CS_0 0x00
+#define ADP_PCIE_CS_0_PE BIT(31)
+
+/* USB adapter registers */
+#define ADP_USB3_CS_0 0x00
+#define ADP_USB3_CS_0_V BIT(30)
+#define ADP_USB3_CS_0_PE BIT(31)
+#define ADP_USB3_CS_1 0x01
+#define ADP_USB3_CS_1_CUBW_MASK GENMASK(11, 0)
+#define ADP_USB3_CS_1_CDBW_MASK GENMASK(23, 12)
+#define ADP_USB3_CS_1_CDBW_SHIFT 12
+#define ADP_USB3_CS_1_HCA BIT(31)
+#define ADP_USB3_CS_2 0x02
+#define ADP_USB3_CS_2_AUBW_MASK GENMASK(11, 0)
+#define ADP_USB3_CS_2_ADBW_MASK GENMASK(23, 12)
+#define ADP_USB3_CS_2_ADBW_SHIFT 12
+#define ADP_USB3_CS_2_CMR BIT(31)
+#define ADP_USB3_CS_3 0x03
+#define ADP_USB3_CS_3_SCALE_MASK GENMASK(5, 0)
+#define ADP_USB3_CS_4 0x04
+#define ADP_USB3_CS_4_ALR_MASK GENMASK(6, 0)
+#define ADP_USB3_CS_4_ALR_20G 0x1
+#define ADP_USB3_CS_4_ULV BIT(7)
+#define ADP_USB3_CS_4_MSLR_MASK GENMASK(18, 12)
+#define ADP_USB3_CS_4_MSLR_SHIFT 12
+#define ADP_USB3_CS_4_MSLR_20G 0x1
+
+/* Hop register from TB_CFG_HOPS. 8 byte per entry. */
+struct tb_regs_hop {
+ /* DWORD 0 */
+ u32 next_hop:11; /*
+ * hop to take after sending the packet through
+ * out_port (on the incoming port of the next switch)
+ */
+ u32 out_port:6; /* next port of the path (on the same switch) */
+ u32 initial_credits:8;
+ u32 unknown1:6; /* set to zero */
+ bool enable:1;
+
+ /* DWORD 1 */
+ u32 weight:4;
+ u32 unknown2:4; /* set to zero */
+ u32 priority:3;
+ bool drop_packages:1;
+ u32 counter:11; /* index into TB_CFG_COUNTERS on this port */
+ bool counter_enable:1;
+ bool ingress_fc:1;
+ bool egress_fc:1;
+ bool ingress_shared_buffer:1;
+ bool egress_shared_buffer:1;
+ bool pending:1;
+ u32 unknown3:3; /* set to zero */
+} __packed;
+
+/* Common link controller registers */
+#define TB_LC_DESC 0x02
+#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SIZE_SHIFT 16
+#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
+#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
+#define TB_LC_POWER 0x740
+
+/* Link controller registers */
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
+#define TB_LC_SX_CTRL 0x96
+#define TB_LC_SX_CTRL_WOC BIT(1)
+#define TB_LC_SX_CTRL_WOD BIT(2)
+#define TB_LC_SX_CTRL_WOU4 BIT(5)
+#define TB_LC_SX_CTRL_WOP BIT(6)
+#define TB_LC_SX_CTRL_L1C BIT(16)
+#define TB_LC_SX_CTRL_L1D BIT(17)
+#define TB_LC_SX_CTRL_L2C BIT(20)
+#define TB_LC_SX_CTRL_L2D BIT(21)
+#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
+#define TB_LC_SX_CTRL_SLP BIT(31)
+
+#endif
diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c
new file mode 100644
index 000000000..e254f8c37
--- /dev/null
+++ b/drivers/thunderbolt/test.c
@@ -0,0 +1,1637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit tests
+ *
+ * Copyright (C) 2020, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/idr.h>
+
+#include "tb.h"
+#include "tunnel.h"
+
+static int __ida_init(struct kunit_resource *res, void *context)
+{
+ struct ida *ida = context;
+
+ ida_init(ida);
+ res->data = ida;
+ return 0;
+}
+
+static void __ida_destroy(struct kunit_resource *res)
+{
+ struct ida *ida = res->data;
+
+ ida_destroy(ida);
+}
+
+static void kunit_ida_init(struct kunit *test, struct ida *ida)
+{
+ kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
+}
+
+static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
+ u8 upstream_port, u8 max_port_number)
+{
+ struct tb_switch *sw;
+ size_t size;
+ int i;
+
+ sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return NULL;
+
+ sw->config.upstream_port_number = upstream_port;
+ sw->config.depth = tb_route_length(route);
+ sw->config.route_hi = upper_32_bits(route);
+ sw->config.route_lo = lower_32_bits(route);
+ sw->config.enabled = 0;
+ sw->config.max_port_number = max_port_number;
+
+ size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
+ sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
+ if (!sw->ports)
+ return NULL;
+
+ for (i = 0; i <= sw->config.max_port_number; i++) {
+ sw->ports[i].sw = sw;
+ sw->ports[i].port = i;
+ sw->ports[i].config.port_number = i;
+ if (i) {
+ kunit_ida_init(test, &sw->ports[i].in_hopids);
+ kunit_ida_init(test, &sw->ports[i].out_hopids);
+ }
+ }
+
+ return sw;
+}
+
+static struct tb_switch *alloc_host(struct kunit *test)
+{
+ struct tb_switch *sw;
+
+ sw = alloc_switch(test, 0, 7, 13);
+ if (!sw)
+ return NULL;
+
+ sw->config.vendor_id = 0x8086;
+ sw->config.device_id = 0x9a1b;
+
+ sw->ports[0].config.type = TB_TYPE_PORT;
+ sw->ports[0].config.max_in_hop_id = 7;
+ sw->ports[0].config.max_out_hop_id = 7;
+
+ sw->ports[1].config.type = TB_TYPE_PORT;
+ sw->ports[1].config.max_in_hop_id = 19;
+ sw->ports[1].config.max_out_hop_id = 19;
+ sw->ports[1].dual_link_port = &sw->ports[2];
+
+ sw->ports[2].config.type = TB_TYPE_PORT;
+ sw->ports[2].config.max_in_hop_id = 19;
+ sw->ports[2].config.max_out_hop_id = 19;
+ sw->ports[2].dual_link_port = &sw->ports[1];
+ sw->ports[2].link_nr = 1;
+
+ sw->ports[3].config.type = TB_TYPE_PORT;
+ sw->ports[3].config.max_in_hop_id = 19;
+ sw->ports[3].config.max_out_hop_id = 19;
+ sw->ports[3].dual_link_port = &sw->ports[4];
+
+ sw->ports[4].config.type = TB_TYPE_PORT;
+ sw->ports[4].config.max_in_hop_id = 19;
+ sw->ports[4].config.max_out_hop_id = 19;
+ sw->ports[4].dual_link_port = &sw->ports[3];
+ sw->ports[4].link_nr = 1;
+
+ sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
+ sw->ports[5].config.max_in_hop_id = 9;
+ sw->ports[5].config.max_out_hop_id = 9;
+ sw->ports[5].cap_adap = -1;
+
+ sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
+ sw->ports[6].config.max_in_hop_id = 9;
+ sw->ports[6].config.max_out_hop_id = 9;
+ sw->ports[6].cap_adap = -1;
+
+ sw->ports[7].config.type = TB_TYPE_NHI;
+ sw->ports[7].config.max_in_hop_id = 11;
+ sw->ports[7].config.max_out_hop_id = 11;
+
+ sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
+ sw->ports[8].config.max_in_hop_id = 8;
+ sw->ports[8].config.max_out_hop_id = 8;
+
+ sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
+ sw->ports[9].config.max_in_hop_id = 8;
+ sw->ports[9].config.max_out_hop_id = 8;
+
+ sw->ports[10].disabled = true;
+ sw->ports[11].disabled = true;
+
+ sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
+ sw->ports[12].config.max_in_hop_id = 8;
+ sw->ports[12].config.max_out_hop_id = 8;
+
+ sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
+ sw->ports[13].config.max_in_hop_id = 8;
+ sw->ports[13].config.max_out_hop_id = 8;
+
+ return sw;
+}
+
+static struct tb_switch *alloc_dev_default(struct kunit *test,
+ struct tb_switch *parent,
+ u64 route, bool bonded)
+{
+ struct tb_port *port, *upstream_port;
+ struct tb_switch *sw;
+
+ sw = alloc_switch(test, route, 1, 19);
+ if (!sw)
+ return NULL;
+
+ sw->config.vendor_id = 0x8086;
+ sw->config.device_id = 0x15ef;
+
+ sw->ports[0].config.type = TB_TYPE_PORT;
+ sw->ports[0].config.max_in_hop_id = 8;
+ sw->ports[0].config.max_out_hop_id = 8;
+
+ sw->ports[1].config.type = TB_TYPE_PORT;
+ sw->ports[1].config.max_in_hop_id = 19;
+ sw->ports[1].config.max_out_hop_id = 19;
+ sw->ports[1].dual_link_port = &sw->ports[2];
+
+ sw->ports[2].config.type = TB_TYPE_PORT;
+ sw->ports[2].config.max_in_hop_id = 19;
+ sw->ports[2].config.max_out_hop_id = 19;
+ sw->ports[2].dual_link_port = &sw->ports[1];
+ sw->ports[2].link_nr = 1;
+
+ sw->ports[3].config.type = TB_TYPE_PORT;
+ sw->ports[3].config.max_in_hop_id = 19;
+ sw->ports[3].config.max_out_hop_id = 19;
+ sw->ports[3].dual_link_port = &sw->ports[4];
+
+ sw->ports[4].config.type = TB_TYPE_PORT;
+ sw->ports[4].config.max_in_hop_id = 19;
+ sw->ports[4].config.max_out_hop_id = 19;
+ sw->ports[4].dual_link_port = &sw->ports[3];
+ sw->ports[4].link_nr = 1;
+
+ sw->ports[5].config.type = TB_TYPE_PORT;
+ sw->ports[5].config.max_in_hop_id = 19;
+ sw->ports[5].config.max_out_hop_id = 19;
+ sw->ports[5].dual_link_port = &sw->ports[6];
+
+ sw->ports[6].config.type = TB_TYPE_PORT;
+ sw->ports[6].config.max_in_hop_id = 19;
+ sw->ports[6].config.max_out_hop_id = 19;
+ sw->ports[6].dual_link_port = &sw->ports[5];
+ sw->ports[6].link_nr = 1;
+
+ sw->ports[7].config.type = TB_TYPE_PORT;
+ sw->ports[7].config.max_in_hop_id = 19;
+ sw->ports[7].config.max_out_hop_id = 19;
+ sw->ports[7].dual_link_port = &sw->ports[8];
+
+ sw->ports[8].config.type = TB_TYPE_PORT;
+ sw->ports[8].config.max_in_hop_id = 19;
+ sw->ports[8].config.max_out_hop_id = 19;
+ sw->ports[8].dual_link_port = &sw->ports[7];
+ sw->ports[8].link_nr = 1;
+
+ sw->ports[9].config.type = TB_TYPE_PCIE_UP;
+ sw->ports[9].config.max_in_hop_id = 8;
+ sw->ports[9].config.max_out_hop_id = 8;
+
+ sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
+ sw->ports[10].config.max_in_hop_id = 8;
+ sw->ports[10].config.max_out_hop_id = 8;
+
+ sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
+ sw->ports[11].config.max_in_hop_id = 8;
+ sw->ports[11].config.max_out_hop_id = 8;
+
+ sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
+ sw->ports[12].config.max_in_hop_id = 8;
+ sw->ports[12].config.max_out_hop_id = 8;
+
+ sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
+ sw->ports[13].config.max_in_hop_id = 9;
+ sw->ports[13].config.max_out_hop_id = 9;
+ sw->ports[13].cap_adap = -1;
+
+ sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
+ sw->ports[14].config.max_in_hop_id = 9;
+ sw->ports[14].config.max_out_hop_id = 9;
+ sw->ports[14].cap_adap = -1;
+
+ sw->ports[15].disabled = true;
+
+ sw->ports[16].config.type = TB_TYPE_USB3_UP;
+ sw->ports[16].config.max_in_hop_id = 8;
+ sw->ports[16].config.max_out_hop_id = 8;
+
+ sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
+ sw->ports[17].config.max_in_hop_id = 8;
+ sw->ports[17].config.max_out_hop_id = 8;
+
+ sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
+ sw->ports[18].config.max_in_hop_id = 8;
+ sw->ports[18].config.max_out_hop_id = 8;
+
+ sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
+ sw->ports[19].config.max_in_hop_id = 8;
+ sw->ports[19].config.max_out_hop_id = 8;
+
+ if (!parent)
+ return sw;
+
+ /* Link them */
+ upstream_port = tb_upstream_port(sw);
+ port = tb_port_at(route, parent);
+ port->remote = upstream_port;
+ upstream_port->remote = port;
+ if (port->dual_link_port && upstream_port->dual_link_port) {
+ port->dual_link_port->remote = upstream_port->dual_link_port;
+ upstream_port->dual_link_port->remote = port->dual_link_port;
+
+ if (bonded) {
+ /* Bonding is used */
+ port->bonded = true;
+ port->dual_link_port->bonded = true;
+ upstream_port->bonded = true;
+ upstream_port->dual_link_port->bonded = true;
+ }
+ }
+
+ return sw;
+}
+
+static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
+ struct tb_switch *parent,
+ u64 route, bool bonded)
+{
+ struct tb_switch *sw;
+
+ sw = alloc_dev_default(test, parent, route, bonded);
+ if (!sw)
+ return NULL;
+
+ sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
+ sw->ports[13].config.max_in_hop_id = 9;
+ sw->ports[13].config.max_out_hop_id = 9;
+
+ sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
+ sw->ports[14].config.max_in_hop_id = 9;
+ sw->ports[14].config.max_out_hop_id = 9;
+
+ return sw;
+}
+
+static void tb_test_path_basic(struct kunit *test)
+{
+ struct tb_port *src_port, *dst_port, *p;
+ struct tb_switch *host;
+
+ host = alloc_host(test);
+
+ src_port = &host->ports[5];
+ dst_port = src_port;
+
+ p = tb_next_port_on_path(src_port, dst_port, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
+
+ p = tb_next_port_on_path(src_port, dst_port, p);
+ KUNIT_EXPECT_TRUE(test, !p);
+}
+
+static void tb_test_path_not_connected_walk(struct kunit *test)
+{
+ struct tb_port *src_port, *dst_port, *p;
+ struct tb_switch *host, *dev;
+
+ host = alloc_host(test);
+ /* No connection between host and dev */
+ dev = alloc_dev_default(test, NULL, 3, true);
+
+ src_port = &host->ports[12];
+ dst_port = &dev->ports[16];
+
+ p = tb_next_port_on_path(src_port, dst_port, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, p, src_port);
+
+ p = tb_next_port_on_path(src_port, dst_port, p);
+ KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
+
+ p = tb_next_port_on_path(src_port, dst_port, p);
+ KUNIT_EXPECT_TRUE(test, !p);
+
+ /* Other direction */
+
+ p = tb_next_port_on_path(dst_port, src_port, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
+
+ p = tb_next_port_on_path(dst_port, src_port, p);
+ KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
+
+ p = tb_next_port_on_path(dst_port, src_port, p);
+ KUNIT_EXPECT_TRUE(test, !p);
+}
+
+struct port_expectation {
+ u64 route;
+ u8 port;
+ enum tb_port_type type;
+};
+
+static void tb_test_path_single_hop_walk(struct kunit *test)
+{
+ /*
+ * Walks from Host PCIe downstream port to Device #1 PCIe
+ * upstream port.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device]
+ */
+ static const struct port_expectation test_data[] = {
+ { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
+ { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
+ };
+ struct tb_port *src_port, *dst_port, *p;
+ struct tb_switch *host, *dev;
+ int i;
+
+ host = alloc_host(test);
+ dev = alloc_dev_default(test, host, 1, true);
+
+ src_port = &host->ports[8];
+ dst_port = &dev->ports[9];
+
+ /* Walk both directions */
+
+ i = 0;
+ tb_for_each_port_on_path(src_port, dst_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+ i = ARRAY_SIZE(test_data) - 1;
+ tb_for_each_port_on_path(dst_port, src_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_daisy_chain_walk(struct kunit *test)
+{
+ /*
+ * Walks from Host DP IN to Device #2 DP OUT.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 3 /
+ * 1 /
+ * [Device #2]
+ */
+ static const struct port_expectation test_data[] = {
+ { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
+ { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
+ };
+ struct tb_port *src_port, *dst_port, *p;
+ struct tb_switch *host, *dev1, *dev2;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x301, true);
+
+ src_port = &host->ports[5];
+ dst_port = &dev2->ports[13];
+
+ /* Walk both directions */
+
+ i = 0;
+ tb_for_each_port_on_path(src_port, dst_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+ i = ARRAY_SIZE(test_data) - 1;
+ tb_for_each_port_on_path(dst_port, src_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_simple_tree_walk(struct kunit *test)
+{
+ /*
+ * Walks from Host DP IN to Device #3 DP OUT.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 3 / | 5 \ 7
+ * 1 / | \ 1
+ * [Device #2] | [Device #4]
+ * | 1
+ * [Device #3]
+ */
+ static const struct port_expectation test_data[] = {
+ { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
+ { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
+ { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
+ };
+ struct tb_port *src_port, *dst_port, *p;
+ struct tb_switch *host, *dev1, *dev3;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ alloc_dev_default(test, dev1, 0x301, true);
+ dev3 = alloc_dev_default(test, dev1, 0x501, true);
+ alloc_dev_default(test, dev1, 0x701, true);
+
+ src_port = &host->ports[5];
+ dst_port = &dev3->ports[13];
+
+ /* Walk both directions */
+
+ i = 0;
+ tb_for_each_port_on_path(src_port, dst_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+ i = ARRAY_SIZE(test_data) - 1;
+ tb_for_each_port_on_path(dst_port, src_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_complex_tree_walk(struct kunit *test)
+{
+ /*
+ * Walks from Device #3 DP IN to Device #9 DP OUT.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 3 / | 5 \ 7
+ * 1 / | \ 1
+ * [Device #2] | [Device #5]
+ * 5 | | 1 \ 7
+ * 1 | [Device #4] \ 1
+ * [Device #3] [Device #6]
+ * 3 /
+ * 1 /
+ * [Device #7]
+ * 3 / | 5
+ * 1 / |
+ * [Device #8] | 1
+ * [Device #9]
+ */
+ static const struct port_expectation test_data[] = {
+ { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
+ { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
+ { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
+ { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
+ { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
+ { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
+ };
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
+ struct tb_port *src_port, *dst_port, *p;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x301, true);
+ dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
+ alloc_dev_default(test, dev1, 0x501, true);
+ dev5 = alloc_dev_default(test, dev1, 0x701, true);
+ dev6 = alloc_dev_default(test, dev5, 0x70701, true);
+ dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
+ alloc_dev_default(test, dev7, 0x303070701, true);
+ dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
+
+ src_port = &dev3->ports[13];
+ dst_port = &dev9->ports[14];
+
+ /* Walk both directions */
+
+ i = 0;
+ tb_for_each_port_on_path(src_port, dst_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+ i = ARRAY_SIZE(test_data) - 1;
+ tb_for_each_port_on_path(dst_port, src_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_max_length_walk(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
+ struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
+ struct tb_port *src_port, *dst_port, *p;
+ int i;
+
+ /*
+ * Walks from Device #6 DP IN to Device #12 DP OUT.
+ *
+ * [Host]
+ * 1 / \ 3
+ * 1 / \ 1
+ * [Device #1] [Device #7]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #2] [Device #8]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #3] [Device #9]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #4] [Device #10]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #5] [Device #11]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #6] [Device #12]
+ */
+ static const struct port_expectation test_data[] = {
+ { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
+ { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
+ { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
+ { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
+ };
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x301, true);
+ dev3 = alloc_dev_default(test, dev2, 0x30301, true);
+ dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
+ dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
+ dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
+ dev7 = alloc_dev_default(test, host, 0x3, true);
+ dev8 = alloc_dev_default(test, dev7, 0x303, true);
+ dev9 = alloc_dev_default(test, dev8, 0x30303, true);
+ dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
+ dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
+ dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
+
+ src_port = &dev6->ports[13];
+ dst_port = &dev12->ports[13];
+
+ /* Walk both directions */
+
+ i = 0;
+ tb_for_each_port_on_path(src_port, dst_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i++;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
+
+ i = ARRAY_SIZE(test_data) - 1;
+ tb_for_each_port_on_path(dst_port, src_port, p) {
+ KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
+ KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
+ test_data[i].type);
+ i--;
+ }
+
+ KUNIT_EXPECT_EQ(test, i, -1);
+}
+
+static void tb_test_path_not_connected(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2;
+ struct tb_port *down, *up;
+ struct tb_path *path;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x3, false);
+ /* Not connected to anything */
+ dev2 = alloc_dev_default(test, NULL, 0x303, false);
+
+ down = &dev1->ports[10];
+ up = &dev2->ports[9];
+
+ path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
+ KUNIT_ASSERT_TRUE(test, path == NULL);
+ path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
+ KUNIT_ASSERT_TRUE(test, path == NULL);
+}
+
+struct hop_expectation {
+ u64 route;
+ u8 in_port;
+ enum tb_port_type in_type;
+ u8 out_port;
+ enum tb_port_type out_type;
+};
+
+static void tb_test_path_not_bonded_lane0(struct kunit *test)
+{
+ /*
+ * PCIe path from host to device using lane 0.
+ *
+ * [Host]
+ * 3 |: 4
+ * 1 |: 2
+ * [Device]
+ */
+ static const struct hop_expectation test_data[] = {
+ {
+ .route = 0x0,
+ .in_port = 9,
+ .in_type = TB_TYPE_PCIE_DOWN,
+ .out_port = 3,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x3,
+ .in_port = 1,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 9,
+ .out_type = TB_TYPE_PCIE_UP,
+ },
+ };
+ struct tb_switch *host, *dev;
+ struct tb_port *down, *up;
+ struct tb_path *path;
+ int i;
+
+ host = alloc_host(test);
+ dev = alloc_dev_default(test, host, 0x3, false);
+
+ down = &host->ports[9];
+ up = &dev->ports[9];
+
+ path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
+ KUNIT_ASSERT_TRUE(test, path != NULL);
+ KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ const struct tb_port *in_port, *out_port;
+
+ in_port = path->hops[i].in_port;
+ out_port = path->hops[i].out_port;
+
+ KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+ test_data[i].in_type);
+ KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+ test_data[i].out_type);
+ }
+ tb_path_free(path);
+}
+
+static void tb_test_path_not_bonded_lane1(struct kunit *test)
+{
+ /*
+ * DP Video path from host to device using lane 1. Paths like
+ * these are only used with Thunderbolt 1 devices where lane
+ * bonding is not possible. USB4 specifically does not allow
+ * paths like this (you either use lane 0 where lane 1 is
+ * disabled or both lanes are bonded).
+ *
+ * [Host]
+ * 1 :| 2
+ * 1 :| 2
+ * [Device]
+ */
+ static const struct hop_expectation test_data[] = {
+ {
+ .route = 0x0,
+ .in_port = 5,
+ .in_type = TB_TYPE_DP_HDMI_IN,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x1,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 13,
+ .out_type = TB_TYPE_DP_HDMI_OUT,
+ },
+ };
+ struct tb_switch *host, *dev;
+ struct tb_port *in, *out;
+ struct tb_path *path;
+ int i;
+
+ host = alloc_host(test);
+ dev = alloc_dev_default(test, host, 0x1, false);
+
+ in = &host->ports[5];
+ out = &dev->ports[13];
+
+ path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+ KUNIT_ASSERT_TRUE(test, path != NULL);
+ KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ const struct tb_port *in_port, *out_port;
+
+ in_port = path->hops[i].in_port;
+ out_port = path->hops[i].out_port;
+
+ KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+ test_data[i].in_type);
+ KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+ test_data[i].out_type);
+ }
+ tb_path_free(path);
+}
+
+static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
+{
+ /*
+ * DP Video path from host to device 3 using lane 1.
+ *
+ * [Host]
+ * 1 :| 2
+ * 1 :| 2
+ * [Device #1]
+ * 7 :| 8
+ * 1 :| 2
+ * [Device #2]
+ * 5 :| 6
+ * 1 :| 2
+ * [Device #3]
+ */
+ static const struct hop_expectation test_data[] = {
+ {
+ .route = 0x0,
+ .in_port = 5,
+ .in_type = TB_TYPE_DP_HDMI_IN,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x1,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 8,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x701,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 6,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x50701,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 13,
+ .out_type = TB_TYPE_DP_HDMI_OUT,
+ },
+ };
+ struct tb_switch *host, *dev1, *dev2, *dev3;
+ struct tb_port *in, *out;
+ struct tb_path *path;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, false);
+ dev2 = alloc_dev_default(test, dev1, 0x701, false);
+ dev3 = alloc_dev_default(test, dev2, 0x50701, false);
+
+ in = &host->ports[5];
+ out = &dev3->ports[13];
+
+ path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+ KUNIT_ASSERT_TRUE(test, path != NULL);
+ KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ const struct tb_port *in_port, *out_port;
+
+ in_port = path->hops[i].in_port;
+ out_port = path->hops[i].out_port;
+
+ KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+ test_data[i].in_type);
+ KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+ test_data[i].out_type);
+ }
+ tb_path_free(path);
+}
+
+static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
+{
+ /*
+ * DP Video path from device 3 to host using lane 1.
+ *
+ * [Host]
+ * 1 :| 2
+ * 1 :| 2
+ * [Device #1]
+ * 7 :| 8
+ * 1 :| 2
+ * [Device #2]
+ * 5 :| 6
+ * 1 :| 2
+ * [Device #3]
+ */
+ static const struct hop_expectation test_data[] = {
+ {
+ .route = 0x50701,
+ .in_port = 13,
+ .in_type = TB_TYPE_DP_HDMI_IN,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x701,
+ .in_port = 6,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x1,
+ .in_port = 8,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x0,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 5,
+ .out_type = TB_TYPE_DP_HDMI_IN,
+ },
+ };
+ struct tb_switch *host, *dev1, *dev2, *dev3;
+ struct tb_port *in, *out;
+ struct tb_path *path;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, false);
+ dev2 = alloc_dev_default(test, dev1, 0x701, false);
+ dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
+
+ in = &dev3->ports[13];
+ out = &host->ports[5];
+
+ path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+ KUNIT_ASSERT_TRUE(test, path != NULL);
+ KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ const struct tb_port *in_port, *out_port;
+
+ in_port = path->hops[i].in_port;
+ out_port = path->hops[i].out_port;
+
+ KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+ test_data[i].in_type);
+ KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+ test_data[i].out_type);
+ }
+ tb_path_free(path);
+}
+
+static void tb_test_path_mixed_chain(struct kunit *test)
+{
+ /*
+ * DP Video path from host to device 4 where first and last link
+ * is bonded.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 7 :| 8
+ * 1 :| 2
+ * [Device #2]
+ * 5 :| 6
+ * 1 :| 2
+ * [Device #3]
+ * 3 |
+ * 1 |
+ * [Device #4]
+ */
+ static const struct hop_expectation test_data[] = {
+ {
+ .route = 0x0,
+ .in_port = 5,
+ .in_type = TB_TYPE_DP_HDMI_IN,
+ .out_port = 1,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x1,
+ .in_port = 1,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 8,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x701,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 6,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x50701,
+ .in_port = 2,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 3,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x3050701,
+ .in_port = 1,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 13,
+ .out_type = TB_TYPE_DP_HDMI_OUT,
+ },
+ };
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
+ struct tb_port *in, *out;
+ struct tb_path *path;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x701, false);
+ dev3 = alloc_dev_default(test, dev2, 0x50701, false);
+ dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
+
+ in = &host->ports[5];
+ out = &dev4->ports[13];
+
+ path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+ KUNIT_ASSERT_TRUE(test, path != NULL);
+ KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ const struct tb_port *in_port, *out_port;
+
+ in_port = path->hops[i].in_port;
+ out_port = path->hops[i].out_port;
+
+ KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+ test_data[i].in_type);
+ KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+ test_data[i].out_type);
+ }
+ tb_path_free(path);
+}
+
+static void tb_test_path_mixed_chain_reverse(struct kunit *test)
+{
+ /*
+ * DP Video path from device 4 to host where first and last link
+ * is bonded.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 7 :| 8
+ * 1 :| 2
+ * [Device #2]
+ * 5 :| 6
+ * 1 :| 2
+ * [Device #3]
+ * 3 |
+ * 1 |
+ * [Device #4]
+ */
+ static const struct hop_expectation test_data[] = {
+ {
+ .route = 0x3050701,
+ .in_port = 13,
+ .in_type = TB_TYPE_DP_HDMI_OUT,
+ .out_port = 1,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x50701,
+ .in_port = 3,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x701,
+ .in_port = 6,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 2,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x1,
+ .in_port = 8,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 1,
+ .out_type = TB_TYPE_PORT,
+ },
+ {
+ .route = 0x0,
+ .in_port = 1,
+ .in_type = TB_TYPE_PORT,
+ .out_port = 5,
+ .out_type = TB_TYPE_DP_HDMI_IN,
+ },
+ };
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
+ struct tb_port *in, *out;
+ struct tb_path *path;
+ int i;
+
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x701, false);
+ dev3 = alloc_dev_default(test, dev2, 0x50701, false);
+ dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
+
+ in = &dev4->ports[13];
+ out = &host->ports[5];
+
+ path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
+ KUNIT_ASSERT_TRUE(test, path != NULL);
+ KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ const struct tb_port *in_port, *out_port;
+
+ in_port = path->hops[i].in_port;
+ out_port = path->hops[i].out_port;
+
+ KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
+ test_data[i].in_type);
+ KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
+ KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
+ KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
+ test_data[i].out_type);
+ }
+ tb_path_free(path);
+}
+
+static void tb_test_tunnel_pcie(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2;
+ struct tb_tunnel *tunnel1, *tunnel2;
+ struct tb_port *down, *up;
+
+ /*
+ * Create PCIe tunnel between host and two devices.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 5 |
+ * 1 |
+ * [Device #2]
+ */
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x501, true);
+
+ down = &host->ports[8];
+ up = &dev1->ports[9];
+ tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
+
+ down = &dev1->ports[10];
+ up = &dev2->ports[9];
+ tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
+ KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
+
+ tb_tunnel_free(tunnel2);
+ tb_tunnel_free(tunnel1);
+}
+
+static void tb_test_tunnel_dp(struct kunit *test)
+{
+ struct tb_switch *host, *dev;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Create DP tunnel between Host and Device
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device]
+ */
+ host = alloc_host(test);
+ dev = alloc_dev_default(test, host, 0x3, true);
+
+ in = &host->ports[5];
+ out = &dev->ports[13];
+
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dp_chain(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev4;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Create DP tunnel from Host DP IN to Device #4 DP OUT.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * 3 / | 5 \ 7
+ * 1 / | \ 1
+ * [Device #2] | [Device #4]
+ * | 1
+ * [Device #3]
+ */
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ alloc_dev_default(test, dev1, 0x301, true);
+ alloc_dev_default(test, dev1, 0x501, true);
+ dev4 = alloc_dev_default(test, dev1, 0x701, true);
+
+ in = &host->ports[5];
+ out = &dev4->ports[14];
+
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dp_tree(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
+ *
+ * [Host]
+ * 3 |
+ * 1 |
+ * [Device #1]
+ * 3 / | 5 \ 7
+ * 1 / | \ 1
+ * [Device #2] | [Device #4]
+ * | 1
+ * [Device #3]
+ * | 5
+ * | 1
+ * [Device #5]
+ */
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x3, true);
+ dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
+ dev3 = alloc_dev_default(test, dev1, 0x503, true);
+ alloc_dev_default(test, dev1, 0x703, true);
+ dev5 = alloc_dev_default(test, dev3, 0x50503, true);
+
+ in = &dev2->ports[13];
+ out = &dev5->ports[13];
+
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_dp_max_length(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
+ struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Creates DP tunnel from Device #6 to Device #12.
+ *
+ * [Host]
+ * 1 / \ 3
+ * 1 / \ 1
+ * [Device #1] [Device #7]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #2] [Device #8]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #3] [Device #9]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #4] [Device #10]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #5] [Device #11]
+ * 3 | | 3
+ * 1 | | 1
+ * [Device #6] [Device #12]
+ */
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x301, true);
+ dev3 = alloc_dev_default(test, dev2, 0x30301, true);
+ dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
+ dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
+ dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
+ dev7 = alloc_dev_default(test, host, 0x3, true);
+ dev8 = alloc_dev_default(test, dev7, 0x303, true);
+ dev9 = alloc_dev_default(test, dev8, 0x30303, true);
+ dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
+ dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
+ dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
+
+ in = &dev6->ports[13];
+ out = &dev12->ports[13];
+
+ tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
+ /* First hop */
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
+ /* Middle */
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
+ &host->ports[1]);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
+ &host->ports[3]);
+ /* Last */
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
+ &host->ports[1]);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
+ &host->ports[3]);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
+ KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
+ &host->ports[3]);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
+ &host->ports[1]);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
+ tb_tunnel_free(tunnel);
+}
+
+static void tb_test_tunnel_usb3(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2;
+ struct tb_tunnel *tunnel1, *tunnel2;
+ struct tb_port *down, *up;
+
+ /*
+ * Create USB3 tunnel between host and two devices.
+ *
+ * [Host]
+ * 1 |
+ * 1 |
+ * [Device #1]
+ * \ 7
+ * \ 1
+ * [Device #2]
+ */
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x1, true);
+ dev2 = alloc_dev_default(test, dev1, 0x701, true);
+
+ down = &host->ports[12];
+ up = &dev1->ports[16];
+ tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
+
+ down = &dev1->ports[17];
+ up = &dev2->ports[16];
+ tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
+ KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
+ KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
+ KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
+ KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
+ KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
+
+ tb_tunnel_free(tunnel2);
+ tb_tunnel_free(tunnel1);
+}
+
+static void tb_test_tunnel_port_on_path(struct kunit *test)
+{
+ struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
+ struct tb_port *in, *out, *port;
+ struct tb_tunnel *dp_tunnel;
+
+ /*
+ * [Host]
+ * 3 |
+ * 1 |
+ * [Device #1]
+ * 3 / | 5 \ 7
+ * 1 / | \ 1
+ * [Device #2] | [Device #4]
+ * | 1
+ * [Device #3]
+ * | 5
+ * | 1
+ * [Device #5]
+ */
+ host = alloc_host(test);
+ dev1 = alloc_dev_default(test, host, 0x3, true);
+ dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
+ dev3 = alloc_dev_default(test, dev1, 0x503, true);
+ dev4 = alloc_dev_default(test, dev1, 0x703, true);
+ dev5 = alloc_dev_default(test, dev3, 0x50503, true);
+
+ in = &dev2->ports[13];
+ out = &dev5->ports[13];
+
+ dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
+ KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
+
+ KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
+ KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
+
+ port = &host->ports[8];
+ KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &host->ports[3];
+ KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev1->ports[1];
+ KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev1->ports[3];
+ KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev1->ports[5];
+ KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev1->ports[7];
+ KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev3->ports[1];
+ KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev5->ports[1];
+ KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ port = &dev4->ports[1];
+ KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
+
+ tb_tunnel_free(dp_tunnel);
+}
+
+static struct kunit_case tb_test_cases[] = {
+ KUNIT_CASE(tb_test_path_basic),
+ KUNIT_CASE(tb_test_path_not_connected_walk),
+ KUNIT_CASE(tb_test_path_single_hop_walk),
+ KUNIT_CASE(tb_test_path_daisy_chain_walk),
+ KUNIT_CASE(tb_test_path_simple_tree_walk),
+ KUNIT_CASE(tb_test_path_complex_tree_walk),
+ KUNIT_CASE(tb_test_path_max_length_walk),
+ KUNIT_CASE(tb_test_path_not_connected),
+ KUNIT_CASE(tb_test_path_not_bonded_lane0),
+ KUNIT_CASE(tb_test_path_not_bonded_lane1),
+ KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
+ KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
+ KUNIT_CASE(tb_test_path_mixed_chain),
+ KUNIT_CASE(tb_test_path_mixed_chain_reverse),
+ KUNIT_CASE(tb_test_tunnel_pcie),
+ KUNIT_CASE(tb_test_tunnel_dp),
+ KUNIT_CASE(tb_test_tunnel_dp_chain),
+ KUNIT_CASE(tb_test_tunnel_dp_tree),
+ KUNIT_CASE(tb_test_tunnel_dp_max_length),
+ KUNIT_CASE(tb_test_tunnel_port_on_path),
+ KUNIT_CASE(tb_test_tunnel_usb3),
+ { }
+};
+
+static struct kunit_suite tb_test_suite = {
+ .name = "thunderbolt",
+ .test_cases = tb_test_cases,
+};
+
+static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
+
+int tb_test_init(void)
+{
+ return __kunit_test_suites_init(tb_test_suites);
+}
+
+void tb_test_exit(void)
+{
+ return __kunit_test_suites_exit(tb_test_suites);
+}
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
new file mode 100644
index 000000000..039c42a06
--- /dev/null
+++ b/drivers/thunderbolt/tmu.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt Time Management Unit (TMU) support
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+
+#include "tb.h"
+
+static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
+{
+ bool root_switch = !tb_route(sw);
+
+ switch (sw->tmu.rate) {
+ case TB_SWITCH_TMU_RATE_OFF:
+ return "off";
+
+ case TB_SWITCH_TMU_RATE_HIFI:
+ /* Root switch does not have upstream directionality */
+ if (root_switch)
+ return "HiFi";
+ if (sw->tmu.unidirectional)
+ return "uni-directional, HiFi";
+ return "bi-directional, HiFi";
+
+ case TB_SWITCH_TMU_RATE_NORMAL:
+ if (root_switch)
+ return "normal";
+ return "uni-directional, normal";
+
+ default:
+ return "unknown";
+ }
+}
+
+static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TMU_RTR_CS_0_UCAP);
+}
+
+static int tb_switch_tmu_rate_read(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+ return val;
+}
+
+static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+ if (ret)
+ return ret;
+
+ val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
+ val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_3, 1);
+}
+
+static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
+ u32 value)
+{
+ u32 data;
+ int ret;
+
+ ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
+ if (ret)
+ return ret;
+
+ data &= ~mask;
+ data |= value;
+
+ return tb_port_write(port, &data, TB_CFG_PORT,
+ port->cap_tmu + offset, 1);
+}
+
+static int tb_port_tmu_set_unidirectional(struct tb_port *port,
+ bool unidirectional)
+{
+ u32 val;
+
+ if (!port->sw->tmu.has_ucap)
+ return 0;
+
+ val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
+ return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
+}
+
+static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
+{
+ return tb_port_tmu_set_unidirectional(port, false);
+}
+
+static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_tmu + TMU_ADP_CS_3, 1);
+ if (ret)
+ return false;
+
+ return val & TMU_ADP_CS_3_UDM;
+}
+
+static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (ret)
+ return ret;
+
+ if (set)
+ val |= TMU_RTR_CS_0_TD;
+ else
+ val &= ~TMU_RTR_CS_0_TD;
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->tmu.cap + TMU_RTR_CS_0, 1);
+}
+
+/**
+ * tb_switch_tmu_init() - Initialize switch TMU structures
+ * @sw: Switch to initialized
+ *
+ * This function must be called before other TMU related functions to
+ * makes the internal structures are filled in correctly. Does not
+ * change any hardware configuration.
+ */
+int tb_switch_tmu_init(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ int ret;
+
+ if (tb_switch_is_icm(sw))
+ return 0;
+
+ ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
+ if (ret > 0)
+ sw->tmu.cap = ret;
+
+ tb_switch_for_each_port(sw, port) {
+ int cap;
+
+ cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
+ if (cap > 0)
+ port->cap_tmu = cap;
+ }
+
+ ret = tb_switch_tmu_rate_read(sw);
+ if (ret < 0)
+ return ret;
+
+ sw->tmu.rate = ret;
+
+ sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
+ if (sw->tmu.has_ucap) {
+ tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
+
+ if (tb_route(sw)) {
+ struct tb_port *up = tb_upstream_port(sw);
+
+ sw->tmu.unidirectional =
+ tb_port_tmu_is_unidirectional(up);
+ }
+ } else {
+ sw->tmu.unidirectional = false;
+ }
+
+ tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_post_time() - Update switch local time
+ * @sw: Switch whose time to update
+ *
+ * Updates switch local time using time posting procedure.
+ */
+int tb_switch_tmu_post_time(struct tb_switch *sw)
+{
+ unsigned int post_local_time_offset, post_time_offset;
+ struct tb_switch *root_switch = sw->tb->root_switch;
+ u64 hi, mid, lo, local_time, post_time;
+ int i, ret, retries = 100;
+ u32 gm_local_time[3];
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Need to be able to read the grand master time */
+ if (!root_switch->tmu.cap)
+ return 0;
+
+ ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
+ root_switch->tmu.cap + TMU_RTR_CS_1,
+ ARRAY_SIZE(gm_local_time));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
+ tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
+ gm_local_time[i]);
+
+ /* Convert to nanoseconds (drop fractional part) */
+ hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
+ mid = gm_local_time[1];
+ lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
+ TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
+ local_time = hi << 48 | mid << 16 | lo;
+
+ /* Tell the switch that time sync is disrupted for a while */
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
+ post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+
+ /*
+ * Write the Grandmaster time to the Post Local Time registers
+ * of the new switch.
+ */
+ ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
+ post_local_time_offset, 2);
+ if (ret)
+ goto out;
+
+ /*
+ * Have the new switch update its local time (by writing 1 to
+ * the post_time registers) and wait for the completion of the
+ * same (post_time register becomes 0). This means the time has
+ * been converged properly.
+ */
+ post_time = 1;
+
+ ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
+ if (ret)
+ goto out;
+
+ do {
+ usleep_range(5, 10);
+ ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
+ post_time_offset, 2);
+ if (ret)
+ goto out;
+ } while (--retries && post_time);
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
+
+out:
+ tb_switch_tmu_set_time_disruption(sw, false);
+ return ret;
+}
+
+/**
+ * tb_switch_tmu_disable() - Disable TMU of a switch
+ * @sw: Switch whose TMU to disable
+ *
+ * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
+ */
+int tb_switch_tmu_disable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ /* Already disabled? */
+ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
+ return 0;
+
+ if (sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ /* The switch may be unplugged so ignore any errors */
+ tb_port_tmu_unidirectional_disable(up);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+ }
+
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
+
+ tb_sw_dbg(sw, "TMU: disabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a switch
+ * @sw: Switch whose TMU to enable
+ *
+ * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
+ * all tunneling should work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+ int ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ if (tb_switch_tmu_is_enabled(sw))
+ return 0;
+
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ /* Change mode to bi-directional */
+ if (tb_route(sw) && sw->tmu.unidirectional) {
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_unidirectional_disable(up);
+ if (ret)
+ return ret;
+ } else {
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+ }
+
+ sw->tmu.unidirectional = false;
+ sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+
+ return tb_switch_tmu_set_time_disruption(sw, false);
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
new file mode 100644
index 000000000..011ab5fed
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.c
@@ -0,0 +1,1460 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt driver - Tunneling support
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+
+#include "tunnel.h"
+#include "tb.h"
+
+/* PCIe adapters use always HopID of 8 for both directions */
+#define TB_PCI_HOPID 8
+
+#define TB_PCI_PATH_DOWN 0
+#define TB_PCI_PATH_UP 1
+
+/* USB3 adapters use always HopID of 8 for both directions */
+#define TB_USB3_HOPID 8
+
+#define TB_USB3_PATH_DOWN 0
+#define TB_USB3_PATH_UP 1
+
+/* DP adapters use HopID 8 for AUX and 9 for Video */
+#define TB_DP_AUX_TX_HOPID 8
+#define TB_DP_AUX_RX_HOPID 8
+#define TB_DP_VIDEO_HOPID 9
+
+#define TB_DP_VIDEO_PATH_OUT 0
+#define TB_DP_AUX_PATH_OUT 1
+#define TB_DP_AUX_PATH_IN 2
+
+#define TB_DMA_PATH_OUT 0
+#define TB_DMA_PATH_IN 1
+
+static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
+
+#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
+ do { \
+ struct tb_tunnel *__tunnel = (tunnel); \
+ level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
+ tb_route(__tunnel->src_port->sw), \
+ __tunnel->src_port->port, \
+ tb_route(__tunnel->dst_port->sw), \
+ __tunnel->dst_port->port, \
+ tb_tunnel_names[__tunnel->type], \
+ ## arg); \
+ } while (0)
+
+#define tb_tunnel_WARN(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
+#define tb_tunnel_warn(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
+#define tb_tunnel_info(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
+#define tb_tunnel_dbg(tunnel, fmt, arg...) \
+ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
+
+static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
+ enum tb_tunnel_type type)
+{
+ struct tb_tunnel *tunnel;
+
+ tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
+ if (!tunnel->paths) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&tunnel->list);
+ tunnel->tb = tb;
+ tunnel->npaths = npaths;
+ tunnel->type = type;
+
+ return tunnel;
+}
+
+static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
+{
+ int res;
+
+ res = tb_pci_port_enable(tunnel->src_port, activate);
+ if (res)
+ return res;
+
+ if (tb_port_is_pcie_up(tunnel->dst_port))
+ return tb_pci_port_enable(tunnel->dst_port, activate);
+
+ return 0;
+}
+
+static int tb_initial_credits(const struct tb_switch *sw)
+{
+ /* If the path is complete sw is not NULL */
+ if (sw) {
+ /* More credits for faster link */
+ switch (sw->link_speed * sw->link_width) {
+ case 40:
+ return 32;
+ case 20:
+ return 24;
+ }
+ }
+
+ return 16;
+}
+
+static void tb_pci_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 1;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+ path->hops[0].initial_credits = 7;
+ if (path->path_length > 1)
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
+ * @tb: Pointer to the domain structure
+ * @down: PCIe downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the PCIe upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ if (!tb_pci_port_is_enabled(down))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_pci_activate;
+ tunnel->src_port = down;
+
+ /*
+ * Discover both paths even if they are not complete. We will
+ * clean them up by calling tb_tunnel_deactivate() below in that
+ * case.
+ */
+ path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
+ &tunnel->dst_port, "PCIe Up");
+ if (!path) {
+ /* Just disable the downstream port */
+ tb_pci_port_enable(down, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_PCI_PATH_UP] = path;
+ tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
+ "PCIe Down");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_PCI_PATH_DOWN] = path;
+ tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_pcie_up(tunnel->dst_port)) {
+ tb_port_warn(tunnel->dst_port,
+ "path does not end on a PCIe adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (down != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
+ tb_tunnel_warn(tunnel,
+ "tunnel is not fully activated, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_pci() - allocate a pci tunnel
+ * @tb: Pointer to the domain structure
+ * @up: PCIe upstream adapter port
+ * @down: PCIe downstream adapter port
+ *
+ * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
+ * TB_TYPE_PCIE_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+ struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_pci_activate;
+ tunnel->src_port = down;
+ tunnel->dst_port = up;
+
+ path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
+ "PCIe Down");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_pci_init_path(path);
+ tunnel->paths[TB_PCI_PATH_DOWN] = path;
+
+ path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
+ "PCIe Up");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_pci_init_path(path);
+ tunnel->paths[TB_PCI_PATH_UP] = path;
+
+ return tunnel;
+}
+
+static bool tb_dp_is_usb4(const struct tb_switch *sw)
+{
+ /* Titan Ridge DP adapters need the same treatment as USB4 */
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+{
+ int timeout = 10;
+ u32 val;
+ int ret;
+
+ /* Both ends need to support this */
+ if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
+ return 0;
+
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
+
+ ret = tb_port_write(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+
+ do {
+ ret = tb_port_read(out, &val, TB_CFG_PORT,
+ out->cap_adap + DP_STATUS_CTRL, 1);
+ if (ret)
+ return ret;
+ if (!(val & DP_STATUS_CTRL_CMHS))
+ return 0;
+ usleep_range(10, 100);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
+static inline u32 tb_dp_cap_get_rate(u32 val)
+{
+ u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
+
+ switch (rate) {
+ case DP_COMMON_CAP_RATE_RBR:
+ return 1620;
+ case DP_COMMON_CAP_RATE_HBR:
+ return 2700;
+ case DP_COMMON_CAP_RATE_HBR2:
+ return 5400;
+ case DP_COMMON_CAP_RATE_HBR3:
+ return 8100;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
+{
+ val &= ~DP_COMMON_CAP_RATE_MASK;
+ switch (rate) {
+ default:
+ WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
+ fallthrough;
+ case 1620:
+ val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 2700:
+ val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 5400:
+ val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ case 8100:
+ val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static inline u32 tb_dp_cap_get_lanes(u32 val)
+{
+ u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
+
+ switch (lanes) {
+ case DP_COMMON_CAP_1_LANE:
+ return 1;
+ case DP_COMMON_CAP_2_LANES:
+ return 2;
+ case DP_COMMON_CAP_4_LANES:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
+{
+ val &= ~DP_COMMON_CAP_LANES_MASK;
+ switch (lanes) {
+ default:
+ WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
+ lanes);
+ fallthrough;
+ case 1:
+ val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
+ break;
+ }
+ return val;
+}
+
+static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
+{
+ /* Tunneling removes the DP 8b/10b encoding */
+ return rate * lanes * 8 / 10;
+}
+
+static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
+ u32 out_rate, u32 out_lanes, u32 *new_rate,
+ u32 *new_lanes)
+{
+ static const u32 dp_bw[][2] = {
+ /* Mb/s, lanes */
+ { 8100, 4 }, /* 25920 Mb/s */
+ { 5400, 4 }, /* 17280 Mb/s */
+ { 8100, 2 }, /* 12960 Mb/s */
+ { 2700, 4 }, /* 8640 Mb/s */
+ { 5400, 2 }, /* 8640 Mb/s */
+ { 8100, 1 }, /* 6480 Mb/s */
+ { 1620, 4 }, /* 5184 Mb/s */
+ { 5400, 1 }, /* 4320 Mb/s */
+ { 2700, 2 }, /* 4320 Mb/s */
+ { 1620, 2 }, /* 2592 Mb/s */
+ { 2700, 1 }, /* 2160 Mb/s */
+ { 1620, 1 }, /* 1296 Mb/s */
+ };
+ unsigned int i;
+
+ /*
+ * Find a combination that can fit into max_bw and does not
+ * exceed the maximum rate and lanes supported by the DP OUT and
+ * DP IN adapters.
+ */
+ for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
+ if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
+ continue;
+
+ if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
+ continue;
+
+ if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
+ *new_rate = dp_bw[i][0];
+ *new_lanes = dp_bw[i][1];
+ return 0;
+ }
+ }
+
+ return -ENOSR;
+}
+
+static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
+{
+ u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ int ret, max_bw;
+
+ /*
+ * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
+ * newer generation hardware.
+ */
+ if (in->sw->generation < 2 || out->sw->generation < 2)
+ return 0;
+
+ /*
+ * Perform connection manager handshake between IN and OUT ports
+ * before capabilities exchange can take place.
+ */
+ ret = tb_dp_cm_handshake(in, out);
+ if (ret)
+ return ret;
+
+ /* Read both DP_LOCAL_CAP registers */
+ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
+ out->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ /* Write IN local caps to OUT remote caps */
+ ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
+ out->cap_adap + DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+
+ /*
+ * If the tunnel bandwidth is limited (max_bw is set) then see
+ * if we need to reduce bandwidth to fit there.
+ */
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+ bw = tb_dp_bandwidth(out_rate, out_lanes);
+ tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
+
+ if (in->sw->config.depth < out->sw->config.depth)
+ max_bw = tunnel->max_down;
+ else
+ max_bw = tunnel->max_up;
+
+ if (max_bw && bw > max_bw) {
+ u32 new_rate, new_lanes, new_bw;
+
+ ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
+ out_rate, out_lanes, &new_rate,
+ &new_lanes);
+ if (ret) {
+ tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ return ret;
+ }
+
+ new_bw = tb_dp_bandwidth(new_rate, new_lanes);
+ tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
+
+ /*
+ * Set new rate and number of lanes before writing it to
+ * the IN port remote caps.
+ */
+ out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
+ out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
+ }
+
+ return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1);
+}
+
+static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
+{
+ int ret;
+
+ if (active) {
+ struct tb_path **paths;
+ int last;
+
+ paths = tunnel->paths;
+ last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
+
+ tb_dp_port_set_hops(tunnel->src_port,
+ paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
+ paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
+ paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
+
+ tb_dp_port_set_hops(tunnel->dst_port,
+ paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
+ paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
+ paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
+ } else {
+ tb_dp_port_hpd_clear(tunnel->src_port);
+ tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
+ if (tb_port_is_dpout(tunnel->dst_port))
+ tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
+ }
+
+ ret = tb_dp_port_enable(tunnel->src_port, active);
+ if (ret)
+ return ret;
+
+ if (tb_port_is_dpout(tunnel->dst_port))
+ return tb_dp_port_enable(tunnel->dst_port, active);
+
+ return 0;
+}
+
+static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+ int *consumed_down)
+{
+ struct tb_port *in = tunnel->src_port;
+ const struct tb_switch *sw = in->sw;
+ u32 val, rate = 0, lanes = 0;
+ int ret;
+
+ if (tb_dp_is_usb4(sw)) {
+ int timeout = 20;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set
+ * for active tunnel.
+ */
+ do {
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ break;
+ }
+ msleep(250);
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ } else if (sw->generation >= 2) {
+ /*
+ * Read from the copied remote cap so that we take into
+ * account if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_REMOTE_CAP, 1);
+ if (ret)
+ return ret;
+
+ rate = tb_dp_cap_get_rate(val);
+ lanes = tb_dp_cap_get_lanes(val);
+ } else {
+ /* No bandwidth management for legacy devices */
+ *consumed_up = 0;
+ *consumed_down = 0;
+ return 0;
+ }
+
+ if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ *consumed_up = 0;
+ *consumed_down = tb_dp_bandwidth(rate, lanes);
+ } else {
+ *consumed_up = tb_dp_bandwidth(rate, lanes);
+ *consumed_down = 0;
+ }
+
+ return 0;
+}
+
+static void tb_dp_init_aux_path(struct tb_path *path)
+{
+ int i;
+
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 2;
+ path->weight = 1;
+
+ for (i = 0; i < path->path_length; i++)
+ path->hops[i].initial_credits = 1;
+}
+
+static void tb_dp_init_video_path(struct tb_path *path, bool discover)
+{
+ u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
+
+ path->egress_fc_enable = TB_PATH_NONE;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_NONE;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 1;
+ path->weight = 1;
+
+ if (discover) {
+ path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
+ } else {
+ u32 max_credits;
+
+ max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
+ /* Leave some credits for AUX path */
+ path->nfc_credits = min(max_credits - 2, 12U);
+ }
+}
+
+/**
+ * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
+ * @tb: Pointer to the domain structure
+ * @in: DP in adapter
+ *
+ * If @in adapter is active, follows the tunnel to the DP out adapter
+ * and back. Returns the discovered tunnel or %NULL if there was no
+ * tunnel.
+ *
+ * Return: DP tunnel or %NULL if no tunnel found.
+ */
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *port;
+ struct tb_path *path;
+
+ if (!tb_dp_port_is_enabled(in))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->init = tb_dp_xchg_caps;
+ tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
+ tunnel->src_port = in;
+
+ path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
+ &tunnel->dst_port, "Video");
+ if (!path) {
+ /* Just disable the DP IN port */
+ tb_dp_port_enable(in, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
+ tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
+
+ path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
+ &port, "AUX RX");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_DP_AUX_PATH_IN] = path;
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_dpout(tunnel->dst_port)) {
+ tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_dp_port_is_enabled(tunnel->dst_port))
+ goto err_deactivate;
+
+ if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
+ goto err_deactivate;
+
+ if (port != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
+ * @tb: Pointer to the domain structure
+ * @in: DP in adapter port
+ * @out: DP out adapter port
+ * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
+ * if not limited)
+ * @max_down: Maximum available downstream bandwidth for the DP tunnel
+ * (%0 if not limited)
+ *
+ * Allocates a tunnel between @in and @out that is capable of tunneling
+ * Display Port traffic.
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out, int max_up,
+ int max_down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path **paths;
+ struct tb_path *path;
+
+ if (WARN_ON(!in->cap_adap || !out->cap_adap))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->init = tb_dp_xchg_caps;
+ tunnel->activate = tb_dp_activate;
+ tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
+ tunnel->src_port = in;
+ tunnel->dst_port = out;
+ tunnel->max_up = max_up;
+ tunnel->max_down = max_down;
+
+ paths = tunnel->paths;
+
+ path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
+ 1, "Video");
+ if (!path)
+ goto err_free;
+ tb_dp_init_video_path(path, false);
+ paths[TB_DP_VIDEO_PATH_OUT] = path;
+
+ path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
+ TB_DP_AUX_TX_HOPID, 1, "AUX TX");
+ if (!path)
+ goto err_free;
+ tb_dp_init_aux_path(path);
+ paths[TB_DP_AUX_PATH_OUT] = path;
+
+ path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
+ TB_DP_AUX_RX_HOPID, 1, "AUX RX");
+ if (!path)
+ goto err_free;
+ tb_dp_init_aux_path(path);
+ paths[TB_DP_AUX_PATH_IN] = path;
+
+ return tunnel;
+
+err_free:
+ tb_tunnel_free(tunnel);
+ return NULL;
+}
+
+static u32 tb_dma_credits(struct tb_port *nhi)
+{
+ u32 max_credits;
+
+ max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
+ ADP_CS_4_TOTAL_BUFFERS_SHIFT;
+ return min(max_credits, 13U);
+}
+
+static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
+{
+ struct tb_port *nhi = tunnel->src_port;
+ u32 credits;
+
+ credits = active ? tb_dma_credits(nhi) : 0;
+ return tb_port_set_initial_credits(nhi, credits);
+}
+
+static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
+ unsigned int efc, u32 credits)
+{
+ int i;
+
+ path->egress_fc_enable = efc;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_shared_buffer = isb;
+ path->priority = 5;
+ path->weight = 1;
+ path->clear_fc = true;
+
+ for (i = 0; i < path->path_length; i++)
+ path->hops[i].initial_credits = credits;
+}
+
+/**
+ * tb_tunnel_alloc_dma() - allocate a DMA tunnel
+ * @tb: Pointer to the domain structure
+ * @nhi: Host controller port
+ * @dst: Destination null port which the other domain is connected to
+ * @transmit_ring: NHI ring number used to send packets towards the
+ * other domain
+ * @transmit_path: HopID used for transmitting packets
+ * @receive_ring: NHI ring number used to receive packets from the
+ * other domain
+ * @reveive_path: HopID used for receiving packets
+ *
+ * Return: Returns a tb_tunnel on success or NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
+ struct tb_port *dst, int transmit_ring,
+ int transmit_path, int receive_ring,
+ int receive_path)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+ u32 credits;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_dma_activate;
+ tunnel->src_port = nhi;
+ tunnel->dst_port = dst;
+
+ credits = tb_dma_credits(nhi);
+
+ path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
+ credits);
+ tunnel->paths[TB_DMA_PATH_IN] = path;
+
+ path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
+ tunnel->paths[TB_DMA_PATH_OUT] = path;
+
+ return tunnel;
+}
+
+static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
+{
+ int ret, up_max_rate, down_max_rate;
+
+ ret = usb4_usb3_port_max_link_rate(up);
+ if (ret < 0)
+ return ret;
+ up_max_rate = ret;
+
+ ret = usb4_usb3_port_max_link_rate(down);
+ if (ret < 0)
+ return ret;
+ down_max_rate = ret;
+
+ return min(up_max_rate, down_max_rate);
+}
+
+static int tb_usb3_init(struct tb_tunnel *tunnel)
+{
+ tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
+ tunnel->allocated_up, tunnel->allocated_down);
+
+ return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
+ &tunnel->allocated_up,
+ &tunnel->allocated_down);
+}
+
+static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
+{
+ int res;
+
+ res = tb_usb3_port_enable(tunnel->src_port, activate);
+ if (res)
+ return res;
+
+ if (tb_port_is_usb3_up(tunnel->dst_port))
+ return tb_usb3_port_enable(tunnel->dst_port, activate);
+
+ return 0;
+}
+
+static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
+ int *consumed_up, int *consumed_down)
+{
+ /*
+ * PCIe tunneling affects the USB3 bandwidth so take that it
+ * into account here.
+ */
+ *consumed_up = tunnel->allocated_up * (3 + 1) / 3;
+ *consumed_down = tunnel->allocated_down * (3 + 1) / 3;
+ return 0;
+}
+
+static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
+{
+ int ret;
+
+ ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
+ &tunnel->allocated_up,
+ &tunnel->allocated_down);
+ if (ret)
+ return ret;
+
+ tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
+ tunnel->allocated_up, tunnel->allocated_down);
+ return 0;
+}
+
+static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+ int *available_up,
+ int *available_down)
+{
+ int ret, max_rate, allocate_up, allocate_down;
+
+ ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
+ if (ret < 0) {
+ tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
+ return;
+ } else if (!ret) {
+ /* Use maximum link rate if the link valid is not set */
+ ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
+ if (ret < 0) {
+ tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
+ return;
+ }
+ }
+
+ /*
+ * 90% of the max rate can be allocated for isochronous
+ * transfers.
+ */
+ max_rate = ret * 90 / 100;
+
+ /* No need to reclaim if already at maximum */
+ if (tunnel->allocated_up >= max_rate &&
+ tunnel->allocated_down >= max_rate)
+ return;
+
+ /* Don't go lower than what is already allocated */
+ allocate_up = min(max_rate, *available_up);
+ if (allocate_up < tunnel->allocated_up)
+ allocate_up = tunnel->allocated_up;
+
+ allocate_down = min(max_rate, *available_down);
+ if (allocate_down < tunnel->allocated_down)
+ allocate_down = tunnel->allocated_down;
+
+ /* If no changes no need to do more */
+ if (allocate_up == tunnel->allocated_up &&
+ allocate_down == tunnel->allocated_down)
+ return;
+
+ ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
+ &allocate_down);
+ if (ret) {
+ tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
+ return;
+ }
+
+ tunnel->allocated_up = allocate_up;
+ *available_up -= tunnel->allocated_up;
+
+ tunnel->allocated_down = allocate_down;
+ *available_down -= tunnel->allocated_down;
+
+ tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
+ tunnel->allocated_up, tunnel->allocated_down);
+}
+
+static void tb_usb3_init_path(struct tb_path *path)
+{
+ path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
+ path->egress_shared_buffer = TB_PATH_NONE;
+ path->ingress_fc_enable = TB_PATH_ALL;
+ path->ingress_shared_buffer = TB_PATH_NONE;
+ path->priority = 3;
+ path->weight = 3;
+ path->drop_packages = 0;
+ path->nfc_credits = 0;
+ path->hops[0].initial_credits = 7;
+ if (path->path_length > 1)
+ path->hops[1].initial_credits =
+ tb_initial_credits(path->hops[1].in_port->sw);
+}
+
+/**
+ * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
+ * @tb: Pointer to the domain structure
+ * @down: USB3 downstream adapter
+ *
+ * If @down adapter is active, follows the tunnel to the USB3 upstream
+ * adapter and back. Returns the discovered tunnel or %NULL if there was
+ * no tunnel.
+ */
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+
+ if (!tb_usb3_port_is_enabled(down))
+ return NULL;
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+
+ /*
+ * Discover both paths even if they are not complete. We will
+ * clean them up by calling tb_tunnel_deactivate() below in that
+ * case.
+ */
+ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
+ &tunnel->dst_port, "USB3 Down");
+ if (!path) {
+ /* Just disable the downstream port */
+ tb_usb3_port_enable(down, false);
+ goto err_free;
+ }
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+
+ path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
+ "USB3 Up");
+ if (!path)
+ goto err_deactivate;
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+
+ /* Validate that the tunnel is complete */
+ if (!tb_port_is_usb3_up(tunnel->dst_port)) {
+ tb_port_warn(tunnel->dst_port,
+ "path does not end on an USB3 adapter, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (down != tunnel->src_port) {
+ tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
+ tb_tunnel_warn(tunnel,
+ "tunnel is not fully activated, cleaning up\n");
+ goto err_deactivate;
+ }
+
+ if (!tb_route(down->sw)) {
+ int ret;
+
+ /*
+ * Read the initial bandwidth allocation for the first
+ * hop tunnel.
+ */
+ ret = usb4_usb3_port_allocated_bandwidth(down,
+ &tunnel->allocated_up, &tunnel->allocated_down);
+ if (ret)
+ goto err_deactivate;
+
+ tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
+ tunnel->allocated_up, tunnel->allocated_down);
+
+ tunnel->init = tb_usb3_init;
+ tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
+ tunnel->release_unused_bandwidth =
+ tb_usb3_release_unused_bandwidth;
+ tunnel->reclaim_available_bandwidth =
+ tb_usb3_reclaim_available_bandwidth;
+ }
+
+ tb_tunnel_dbg(tunnel, "discovered\n");
+ return tunnel;
+
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
+err_free:
+ tb_tunnel_free(tunnel);
+
+ return NULL;
+}
+
+/**
+ * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
+ * @tb: Pointer to the domain structure
+ * @up: USB3 upstream adapter port
+ * @down: USB3 downstream adapter port
+ * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
+ * if not limited).
+ * @max_down: Maximum available downstream bandwidth for the USB3 tunnel
+ * (%0 if not limited).
+ *
+ * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
+ * @TB_TYPE_USB3_DOWN.
+ *
+ * Return: Returns a tb_tunnel on success or %NULL on failure.
+ */
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down, int max_up,
+ int max_down)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_path *path;
+ int max_rate = 0;
+
+ /*
+ * Check that we have enough bandwidth available for the new
+ * USB3 tunnel.
+ */
+ if (max_up > 0 || max_down > 0) {
+ max_rate = tb_usb3_max_link_rate(down, up);
+ if (max_rate < 0)
+ return NULL;
+
+ /* Only 90% can be allocated for USB3 isochronous transfers */
+ max_rate = max_rate * 90 / 100;
+ tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
+ max_rate);
+
+ if (max_rate > max_up || max_rate > max_down) {
+ tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
+ return NULL;
+ }
+ }
+
+ tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
+ if (!tunnel)
+ return NULL;
+
+ tunnel->activate = tb_usb3_activate;
+ tunnel->src_port = down;
+ tunnel->dst_port = up;
+ tunnel->max_up = max_up;
+ tunnel->max_down = max_down;
+
+ path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
+ "USB3 Down");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+
+ path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
+ "USB3 Up");
+ if (!path) {
+ tb_tunnel_free(tunnel);
+ return NULL;
+ }
+ tb_usb3_init_path(path);
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+
+ if (!tb_route(down->sw)) {
+ tunnel->allocated_up = max_rate;
+ tunnel->allocated_down = max_rate;
+
+ tunnel->init = tb_usb3_init;
+ tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
+ tunnel->release_unused_bandwidth =
+ tb_usb3_release_unused_bandwidth;
+ tunnel->reclaim_available_bandwidth =
+ tb_usb3_reclaim_available_bandwidth;
+ }
+
+ return tunnel;
+}
+
+/**
+ * tb_tunnel_free() - free a tunnel
+ * @tunnel: Tunnel to be freed
+ *
+ * Frees a tunnel. The tunnel does not need to be deactivated.
+ */
+void tb_tunnel_free(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ if (!tunnel)
+ return;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i])
+ tb_path_free(tunnel->paths[i]);
+ }
+
+ kfree(tunnel->paths);
+ kfree(tunnel);
+}
+
+/**
+ * tb_tunnel_is_invalid - check whether an activated path is still valid
+ * @tunnel: Tunnel to check
+ */
+bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ WARN_ON(!tunnel->paths[i]->activated);
+ if (tb_path_is_invalid(tunnel->paths[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * tb_tunnel_restart() - activate a tunnel after a hardware reset
+ * @tunnel: Tunnel to restart
+ *
+ * Return: 0 on success and negative errno in case if failure
+ */
+int tb_tunnel_restart(struct tb_tunnel *tunnel)
+{
+ int res, i;
+
+ tb_tunnel_dbg(tunnel, "activating\n");
+
+ /*
+ * Make sure all paths are properly disabled before enabling
+ * them again.
+ */
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i]->activated) {
+ tb_path_deactivate(tunnel->paths[i]);
+ tunnel->paths[i]->activated = false;
+ }
+ }
+
+ if (tunnel->init) {
+ res = tunnel->init(tunnel);
+ if (res)
+ return res;
+ }
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ res = tb_path_activate(tunnel->paths[i]);
+ if (res)
+ goto err;
+ }
+
+ if (tunnel->activate) {
+ res = tunnel->activate(tunnel, true);
+ if (res)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ tb_tunnel_warn(tunnel, "activation failed\n");
+ tb_tunnel_deactivate(tunnel);
+ return res;
+}
+
+/**
+ * tb_tunnel_activate() - activate a tunnel
+ * @tunnel: Tunnel to activate
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_tunnel_activate(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i]->activated) {
+ tb_tunnel_WARN(tunnel,
+ "trying to activate an already activated tunnel\n");
+ return -EINVAL;
+ }
+ }
+
+ return tb_tunnel_restart(tunnel);
+}
+
+/**
+ * tb_tunnel_deactivate() - deactivate a tunnel
+ * @tunnel: Tunnel to deactivate
+ */
+void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
+{
+ int i;
+
+ tb_tunnel_dbg(tunnel, "deactivating\n");
+
+ if (tunnel->activate)
+ tunnel->activate(tunnel, false);
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (tunnel->paths[i] && tunnel->paths[i]->activated)
+ tb_path_deactivate(tunnel->paths[i]);
+ }
+}
+
+/**
+ * tb_tunnel_port_on_path() - Does the tunnel go through port
+ * @tunnel: Tunnel to check
+ * @port: Port to check
+ *
+ * Returns true if @tunnel goes through @port (direction does not matter),
+ * false otherwise.
+ */
+bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_port *port)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ continue;
+
+ if (tb_path_port_on_path(tunnel->paths[i], port))
+ return true;
+ }
+
+ return false;
+}
+
+static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
+{
+ int i;
+
+ for (i = 0; i < tunnel->npaths; i++) {
+ if (!tunnel->paths[i])
+ return false;
+ if (!tunnel->paths[i]->activated)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
+ * @tunnel: Tunnel to check
+ * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
+ * Can be %NULL.
+ * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
+ * Can be %NULL.
+ *
+ * Stores the amount of isochronous bandwidth @tunnel consumes in
+ * @consumed_up and @consumed_down. In case of success returns %0,
+ * negative errno otherwise.
+ */
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+ int *consumed_down)
+{
+ int up_bw = 0, down_bw = 0;
+
+ if (!tb_tunnel_is_active(tunnel))
+ goto out;
+
+ if (tunnel->consumed_bandwidth) {
+ int ret;
+
+ ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
+ if (ret)
+ return ret;
+
+ tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
+ down_bw);
+ }
+
+out:
+ if (consumed_up)
+ *consumed_up = up_bw;
+ if (consumed_down)
+ *consumed_down = down_bw;
+
+ return 0;
+}
+
+/**
+ * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
+ * @tunnel: Tunnel whose unused bandwidth to release
+ *
+ * If tunnel supports dynamic bandwidth management (USB3 tunnels at the
+ * moment) this function makes it to release all the unused bandwidth.
+ *
+ * Returns %0 in case of success and negative errno otherwise.
+ */
+int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return 0;
+
+ if (tunnel->release_unused_bandwidth) {
+ int ret;
+
+ ret = tunnel->release_unused_bandwidth(tunnel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
+ * @tunnel: Tunnel reclaiming available bandwidth
+ * @available_up: Available upstream bandwidth (in Mb/s)
+ * @available_down: Available downstream bandwidth (in Mb/s)
+ *
+ * Reclaims bandwidth from @available_up and @available_down and updates
+ * the variables accordingly (e.g decreases both according to what was
+ * reclaimed by the tunnel). If nothing was reclaimed the values are
+ * kept as is.
+ */
+void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+ int *available_up,
+ int *available_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return;
+
+ if (tunnel->reclaim_available_bandwidth)
+ tunnel->reclaim_available_bandwidth(tunnel, available_up,
+ available_down);
+}
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
new file mode 100644
index 000000000..1d2a64eb0
--- /dev/null
+++ b/drivers/thunderbolt/tunnel.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt driver - Tunneling support
+ *
+ * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2019, Intel Corporation
+ */
+
+#ifndef TB_TUNNEL_H_
+#define TB_TUNNEL_H_
+
+#include "tb.h"
+
+enum tb_tunnel_type {
+ TB_TUNNEL_PCI,
+ TB_TUNNEL_DP,
+ TB_TUNNEL_DMA,
+ TB_TUNNEL_USB3,
+};
+
+/**
+ * struct tb_tunnel - Tunnel between two ports
+ * @tb: Pointer to the domain
+ * @src_port: Source port of the tunnel
+ * @dst_port: Destination port of the tunnel. For discovered incomplete
+ * tunnels may be %NULL or null adapter port instead.
+ * @paths: All paths required by the tunnel
+ * @npaths: Number of paths in @paths
+ * @init: Optional tunnel specific initialization
+ * @activate: Optional tunnel specific activation/deactivation
+ * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
+ * @release_unused_bandwidth: Release all unused bandwidth
+ * @reclaim_available_bandwidth: Reclaim back available bandwidth
+ * @list: Tunnels are linked using this field
+ * @type: Type of the tunnel
+ * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
+ * Only set if the bandwidth needs to be limited.
+ * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
+ * Only set if the bandwidth needs to be limited.
+ * @allocated_up: Allocated upstream bandwidth (only for USB3)
+ * @allocated_down: Allocated downstream bandwidth (only for USB3)
+ */
+struct tb_tunnel {
+ struct tb *tb;
+ struct tb_port *src_port;
+ struct tb_port *dst_port;
+ struct tb_path **paths;
+ size_t npaths;
+ int (*init)(struct tb_tunnel *tunnel);
+ int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
+ int *consumed_down);
+ int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
+ void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
+ int *available_up,
+ int *available_down);
+ struct list_head list;
+ enum tb_tunnel_type type;
+ int max_up;
+ int max_down;
+ int allocated_up;
+ int allocated_down;
+};
+
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
+ struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
+struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out, int max_up,
+ int max_down);
+struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
+ struct tb_port *dst, int transmit_ring,
+ int transmit_path, int receive_ring,
+ int receive_path);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
+ struct tb_port *down, int max_up,
+ int max_down);
+
+void tb_tunnel_free(struct tb_tunnel *tunnel);
+int tb_tunnel_activate(struct tb_tunnel *tunnel);
+int tb_tunnel_restart(struct tb_tunnel *tunnel);
+void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
+bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
+bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
+ const struct tb_port *port);
+int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
+ int *consumed_down);
+int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
+void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
+ int *available_up,
+ int *available_down);
+
+static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_PCI;
+}
+
+static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_DP;
+}
+
+static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_DMA;
+}
+
+static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
+{
+ return tunnel->type == TB_TUNNEL_USB3;
+}
+
+#endif
+
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
new file mode 100644
index 000000000..5b45c45e7
--- /dev/null
+++ b/drivers/thunderbolt/usb4.c
@@ -0,0 +1,1769 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB4 specific functionality
+ *
+ * Copyright (C) 2019, Intel Corporation
+ * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Rajmohan Mani <rajmohan.mani@intel.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/ktime.h>
+
+#include "sb_regs.h"
+#include "tb.h"
+
+#define USB4_DATA_DWORDS 16
+#define USB4_DATA_RETRIES 3
+
+enum usb4_switch_op {
+ USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10,
+ USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11,
+ USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12,
+ USB4_SWITCH_OP_NVM_WRITE = 0x20,
+ USB4_SWITCH_OP_NVM_AUTH = 0x21,
+ USB4_SWITCH_OP_NVM_READ = 0x22,
+ USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23,
+ USB4_SWITCH_OP_DROM_READ = 0x24,
+ USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25,
+};
+
+enum usb4_sb_target {
+ USB4_SB_TARGET_ROUTER,
+ USB4_SB_TARGET_PARTNER,
+ USB4_SB_TARGET_RETIMER,
+};
+
+#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
+#define USB4_NVM_READ_OFFSET_SHIFT 2
+#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
+#define USB4_NVM_READ_LENGTH_SHIFT 24
+
+#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
+#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
+
+#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
+#define USB4_DROM_ADDRESS_SHIFT 2
+#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
+#define USB4_DROM_SIZE_SHIFT 15
+
+#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
+
+typedef int (*read_block_fn)(void *, unsigned int, void *, size_t);
+typedef int (*write_block_fn)(void *, const void *, size_t);
+
+static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int usb4_switch_op_read_data(struct tb_switch *sw, void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords);
+}
+
+static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata)
+{
+ return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata)
+{
+ return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
+}
+
+static int usb4_do_read_data(u16 address, void *buf, size_t size,
+ read_block_fn read_block, void *read_block_data)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ do {
+ unsigned int dwaddress, dwords;
+ u8 data[USB4_DATA_DWORDS * 4];
+ size_t nbytes;
+ int ret;
+
+ offset = address & 3;
+ nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4);
+
+ dwaddress = address / 4;
+ dwords = ALIGN(nbytes, 4) / 4;
+
+ ret = read_block(read_block_data, dwaddress, data, dwords);
+ if (ret) {
+ if (ret != -ENODEV && retries--)
+ continue;
+ return ret;
+ }
+
+ nbytes -= offset;
+ memcpy(buf, data + offset, nbytes);
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_do_write_data(unsigned int address, const void *buf, size_t size,
+ write_block_fn write_next_block, void *write_block_data)
+{
+ unsigned int retries = USB4_DATA_RETRIES;
+ unsigned int offset;
+
+ offset = address & 3;
+ address = address & ~3;
+
+ do {
+ u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4);
+ u8 data[USB4_DATA_DWORDS * 4];
+ int ret;
+
+ memcpy(data + offset, buf, nbytes);
+
+ ret = write_next_block(write_block_data, data, nbytes / 4);
+ if (ret) {
+ if (ret == -ETIMEDOUT) {
+ if (retries--)
+ continue;
+ ret = -EIO;
+ }
+ return ret;
+ }
+
+ size -= nbytes;
+ address += nbytes;
+ buf += nbytes;
+ } while (size > 0);
+
+ return 0;
+}
+
+static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
+{
+ u32 val;
+ int ret;
+
+ val = opcode | ROUTER_CS_26_OV;
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+ if (ret)
+ return ret;
+
+ if (val & ROUTER_CS_26_ONS)
+ return -EOPNOTSUPP;
+
+ *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT;
+ return 0;
+}
+
+static void usb4_switch_check_wakes(struct tb_switch *sw)
+{
+ struct tb_port *port;
+ bool wakeup = false;
+ u32 val;
+
+ if (!device_may_wakeup(&sw->dev))
+ return;
+
+ if (tb_route(sw)) {
+ if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
+ return;
+
+ tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
+ (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
+ (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
+
+ wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
+ }
+
+ /* Check for any connected downstream ports for USB4 wake */
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_has_remote(port))
+ continue;
+
+ if (tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_18, 1))
+ break;
+
+ tb_port_dbg(port, "USB4 wake: %s\n",
+ (val & PORT_CS_18_WOU4S) ? "yes" : "no");
+
+ if (val & PORT_CS_18_WOU4S)
+ wakeup = true;
+ }
+
+ if (wakeup)
+ pm_wakeup_event(&sw->dev, 0);
+}
+
+static bool link_is_usb4(struct tb_port *port)
+{
+ u32 val;
+
+ if (!port->cap_usb4)
+ return false;
+
+ if (tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_18, 1))
+ return false;
+
+ return !(val & PORT_CS_18_TCM);
+}
+
+/**
+ * usb4_switch_setup() - Additional setup for USB4 device
+ * @sw: USB4 router to setup
+ *
+ * USB4 routers need additional settings in order to enable all the
+ * tunneling. This function enables USB and PCIe tunneling if it can be
+ * enabled (e.g the parent switch also supports them). If USB tunneling
+ * is not available for some reason (like that there is Thunderbolt 3
+ * switch upstream) then the internal xHCI controller is enabled
+ * instead.
+ */
+int usb4_switch_setup(struct tb_switch *sw)
+{
+ struct tb_port *downstream_port;
+ struct tb_switch *parent;
+ bool tbt3, xhci;
+ u32 val = 0;
+ int ret;
+
+ usb4_switch_check_wakes(sw);
+
+ if (!tb_route(sw))
+ return 0;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
+ if (ret)
+ return ret;
+
+ parent = tb_switch_parent(sw);
+ downstream_port = tb_port_at(tb_route(sw), parent);
+ sw->link_usb4 = link_is_usb4(downstream_port);
+ tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT3");
+
+ xhci = val & ROUTER_CS_6_HCI;
+ tbt3 = !(val & ROUTER_CS_6_TNS);
+
+ tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
+ tbt3 ? "yes" : "no", xhci ? "yes" : "no");
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ if (sw->link_usb4 && tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
+ val |= ROUTER_CS_5_UTO;
+ xhci = false;
+ }
+
+ /* Only enable PCIe tunneling if the parent router supports it */
+ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
+ val |= ROUTER_CS_5_PTO;
+ /*
+ * xHCI can be enabled if PCIe tunneling is supported
+ * and the parent does not have any USB3 dowstream
+ * adapters (so we cannot do USB 3.x tunneling).
+ */
+ if (xhci)
+ val |= ROUTER_CS_5_HCO;
+ }
+
+ /* TBT3 supported by the CM */
+ val |= ROUTER_CS_5_C3S;
+ /* Tunneling configuration is ready now */
+ val |= ROUTER_CS_5_CV;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+ ROUTER_CS_6_CR, 50);
+}
+
+/**
+ * usb4_switch_read_uid() - Read UID from USB4 router
+ * @sw: USB4 router
+ * @uid: UID is stored here
+ *
+ * Reads 64-bit UID from USB4 router config space.
+ */
+int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
+{
+ return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
+}
+
+static int usb4_switch_drom_read_block(void *data,
+ unsigned int dwaddress, void *buf,
+ size_t dwords)
+{
+ struct tb_switch *sw = data;
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
+ metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
+ USB4_DROM_ADDRESS_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
+ * @sw: USB4 router
+ * @address: Byte address inside DROM to start reading
+ * @buf: Buffer where the DROM content is stored
+ * @size: Number of bytes to read from DROM
+ *
+ * Uses USB4 router operations to read router DROM. For devices this
+ * should always work but for hosts it may return %-EOPNOTSUPP in which
+ * case the host router does not have DROM.
+ */
+int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_do_read_data(address, buf, size,
+ usb4_switch_drom_read_block, sw);
+}
+
+/**
+ * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
+ * @sw: USB4 router
+ *
+ * Checks whether conditions are met so that lane bonding can be
+ * established with the upstream router. Call only for device routers.
+ */
+bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
+{
+ struct tb_port *up;
+ int ret;
+ u32 val;
+
+ up = tb_upstream_port(sw);
+ ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
+ if (ret)
+ return false;
+
+ return !!(val & PORT_CS_18_BE);
+}
+
+/**
+ * usb4_switch_set_wake() - Enabled/disable wake
+ * @sw: USB4 router
+ * @flags: Wakeup flags (%0 to disable)
+ *
+ * Enables/disables router to wake up from sleep.
+ */
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+{
+ struct tb_port *port;
+ u64 route = tb_route(sw);
+ u32 val;
+ int ret;
+
+ /*
+ * Enable wakes coming from all USB4 downstream ports (from
+ * child routers). For device routers do this also for the
+ * upstream USB4 port.
+ */
+ tb_switch_for_each_port(sw, port) {
+ if (!tb_port_is_null(port))
+ continue;
+ if (!route && tb_is_upstream_port(port))
+ continue;
+ if (!port->cap_usb4)
+ continue;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
+
+ if (flags & TB_WAKE_ON_CONNECT)
+ val |= PORT_CS_19_WOC;
+ if (flags & TB_WAKE_ON_DISCONNECT)
+ val |= PORT_CS_19_WOD;
+ if (flags & TB_WAKE_ON_USB4)
+ val |= PORT_CS_19_WOU4;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Enable wakes from PCIe and USB 3.x on this router. Only
+ * needed for device routers.
+ */
+ if (route) {
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU);
+ if (flags & TB_WAKE_ON_USB3)
+ val |= ROUTER_CS_5_WOU;
+ if (flags & TB_WAKE_ON_PCIE)
+ val |= ROUTER_CS_5_WOP;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * usb4_switch_set_sleep() - Prepare the router to enter sleep
+ * @sw: USB4 router
+ *
+ * Sets sleep bit for the router. Returns when the router sleep ready
+ * bit has been asserted.
+ */
+int usb4_switch_set_sleep(struct tb_switch *sw)
+{
+ int ret;
+ u32 val;
+
+ /* Set sleep bit and wait for sleep ready to be asserted */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val |= ROUTER_CS_5_SLP;
+
+ ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
+ if (ret)
+ return ret;
+
+ return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+ ROUTER_CS_6_SLPR, 500);
+}
+
+/**
+ * usb4_switch_nvm_sector_size() - Return router NVM sector size
+ * @sw: USB4 router
+ *
+ * If the router supports NVM operations this function returns the NVM
+ * sector size in bytes. If NVM operations are not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_sector_size(struct tb_switch *sw)
+{
+ u32 metadata;
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return status == 0x2 ? -EOPNOTSUPP : -EIO;
+
+ ret = usb4_switch_op_read_metadata(sw, &metadata);
+ if (ret)
+ return ret;
+
+ return metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_switch_nvm_read_block(void *data,
+ unsigned int dwaddress, void *buf, size_t dwords)
+{
+ struct tb_switch *sw = data;
+ u8 status = 0;
+ u32 metadata;
+ int ret;
+
+ metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
+ USB4_NVM_READ_LENGTH_MASK;
+ metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
+ USB4_NVM_READ_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status);
+ if (ret)
+ return ret;
+
+ if (status)
+ return -EIO;
+
+ return usb4_switch_op_read_data(sw, buf, dwords);
+}
+
+/**
+ * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
+ * @sw: USB4 router
+ * @address: Starting address in bytes
+ * @buf: Read data is placed here
+ * @size: How many bytes to read
+ *
+ * Reads NVM contents of the router. If NVM is not supported returns
+ * %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
+ size_t size)
+{
+ return usb4_do_read_data(address, buf, size,
+ usb4_switch_nvm_read_block, sw);
+}
+
+static int usb4_switch_nvm_set_offset(struct tb_switch *sw,
+ unsigned int address)
+{
+ u32 metadata, dwaddress;
+ u8 status = 0;
+ int ret;
+
+ dwaddress = address / 4;
+ metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+ USB4_NVM_SET_OFFSET_MASK;
+
+ ret = usb4_switch_op_write_metadata(sw, metadata);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_switch_nvm_write_next_block(void *data, const void *buf,
+ size_t dwords)
+{
+ struct tb_switch *sw = data;
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_data(sw, buf, dwords);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status);
+ if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+/**
+ * usb4_switch_nvm_write() - Write to the router NVM
+ * @sw: USB4 router
+ * @address: Start address where to write in bytes
+ * @buf: Pointer to the data to write
+ * @size: Size of @buf in bytes
+ *
+ * Writes @buf to the router NVM using USB4 router operations. If NVM
+ * write is not supported returns %-EOPNOTSUPP.
+ */
+int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
+ const void *buf, size_t size)
+{
+ int ret;
+
+ ret = usb4_switch_nvm_set_offset(sw, address);
+ if (ret)
+ return ret;
+
+ return usb4_do_write_data(address, buf, size,
+ usb4_switch_nvm_write_next_block, sw);
+}
+
+/**
+ * usb4_switch_nvm_authenticate() - Authenticate new NVM
+ * @sw: USB4 router
+ *
+ * After the new NVM has been written via usb4_switch_nvm_write(), this
+ * function triggers NVM authentication process. If the authentication
+ * is successful the router is power cycled and the new NVM starts
+ * running. In case of failure returns negative errno.
+ */
+int usb4_switch_nvm_authenticate(struct tb_switch *sw)
+{
+ u8 status = 0;
+ int ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case 0x0:
+ tb_sw_dbg(sw, "NVM authentication successful\n");
+ return 0;
+ case 0x1:
+ return -EINVAL;
+ case 0x2:
+ return -EAGAIN;
+ case 0x3:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
+/**
+ * usb4_switch_query_dp_resource() - Query availability of DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * For DP tunneling this function can be used to query availability of
+ * DP IN resource. Returns true if the resource is available for DP
+ * tunneling, false otherwise.
+ */
+bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return false;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status);
+ /*
+ * If DP resource allocation is not supported assume it is
+ * always available.
+ */
+ if (ret == -EOPNOTSUPP)
+ return true;
+ else if (ret)
+ return false;
+
+ return !status;
+}
+
+/**
+ * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Allocates DP IN resource for DP tunneling using USB4 router
+ * operations. If the resource was allocated returns %0. Otherwise
+ * returns negative errno, in particular %-EBUSY if the resource is
+ * already allocated.
+ */
+int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EBUSY : 0;
+}
+
+/**
+ * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
+ * @sw: USB4 router
+ * @in: DP IN adapter
+ *
+ * Releases the previously allocated DP IN resource.
+ */
+int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
+{
+ u8 status;
+ int ret;
+
+ ret = usb4_switch_op_write_metadata(sw, in->port);
+ if (ret)
+ return ret;
+
+ ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status);
+ if (ret == -EOPNOTSUPP)
+ return 0;
+ else if (ret)
+ return ret;
+
+ return status ? -EIO : 0;
+}
+
+static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
+{
+ struct tb_port *p;
+ int usb4_idx = 0;
+
+ /* Assume port is primary */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_null(p))
+ continue;
+ if (tb_is_upstream_port(p))
+ continue;
+ if (!p->link_nr) {
+ if (p == port)
+ break;
+ usb4_idx++;
+ }
+ }
+
+ return usb4_idx;
+}
+
+/**
+ * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and PCIe
+ * downstream adapters where the PCIe topology is extended. This
+ * function returns the corresponding downstream PCIe adapter or %NULL
+ * if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int pcie_idx = 0;
+
+ /* Find PCIe down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_pcie_down(p))
+ continue;
+
+ if (pcie_idx == usb4_idx)
+ return p;
+
+ pcie_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
+ * @sw: USB4 router
+ * @port: USB4 port
+ *
+ * USB4 routers have direct mapping between USB4 ports and USB 3.x
+ * downstream adapters where the USB 3.x topology is extended. This
+ * function returns the corresponding downstream USB 3.x adapter or
+ * %NULL if no such mapping was possible.
+ */
+struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
+ const struct tb_port *port)
+{
+ int usb4_idx = usb4_port_idx(sw, port);
+ struct tb_port *p;
+ int usb_idx = 0;
+
+ /* Find USB3 down port matching usb4_port */
+ tb_switch_for_each_port(sw, p) {
+ if (!tb_port_is_usb3_down(p))
+ continue;
+
+ if (usb_idx == usb4_idx)
+ return p;
+
+ usb_idx++;
+ }
+
+ return NULL;
+}
+
+/**
+ * usb4_port_unlock() - Unlock USB4 downstream port
+ * @port: USB4 port to unlock
+ *
+ * Unlocks USB4 downstream port so that the connection manager can
+ * access the router below this port.
+ */
+int usb4_port_unlock(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_4_LCK;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
+}
+
+/**
+ * usb4_port_hotplug_enable() - Enables hotplug for a port
+ * @port: USB4 port to operate on
+ *
+ * Enables hot plug events on a given port. This is only intended
+ * to be used on lane, DP-IN, and DP-OUT adapters.
+ */
+int usb4_port_hotplug_enable(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_CS_5_DHP;
+ return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
+}
+
+static int usb4_port_set_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PC;
+ else
+ val &= ~PORT_CS_19_PC;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_port_configure() - Set USB4 port configured
+ * @port: USB4 router
+ *
+ * Sets the USB4 link to be configured for power management purposes.
+ */
+int usb4_port_configure(struct tb_port *port)
+{
+ return usb4_port_set_configured(port, true);
+}
+
+/**
+ * usb4_port_unconfigure() - Set USB4 port unconfigured
+ * @port: USB4 router
+ *
+ * Sets the USB4 link to be unconfigured for power management purposes.
+ */
+void usb4_port_unconfigure(struct tb_port *port)
+{
+ usb4_port_set_configured(port, false);
+}
+
+static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ if (configured)
+ val |= PORT_CS_19_PID;
+ else
+ val &= ~PORT_CS_19_PID;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
+/**
+ * usb4_port_configure_xdomain() - Configure port for XDomain
+ * @port: USB4 port connected to another host
+ *
+ * Marks the USB4 port as being connected to another host. Returns %0 in
+ * success and negative errno in failure.
+ */
+int usb4_port_configure_xdomain(struct tb_port *port)
+{
+ return usb4_set_xdomain_configured(port, true);
+}
+
+/**
+ * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
+ * @port: USB4 port that was connected to another host
+ *
+ * Clears USB4 port from being marked as XDomain.
+ */
+void usb4_port_unconfigure_xdomain(struct tb_port *port)
+{
+ usb4_set_xdomain_configured(port, false);
+}
+
+static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
+ dwords);
+}
+
+static int usb4_port_write_data(struct tb_port *port, const void *data,
+ size_t dwords)
+{
+ if (dwords > USB4_DATA_DWORDS)
+ return -EINVAL;
+
+ return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
+ dwords);
+}
+
+static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u8 reg, void *buf, u8 size)
+{
+ size_t dwords = DIV_ROUND_UP(size, 4);
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ val = reg;
+ val |= size << PORT_CS_1_LENGTH_SHIFT;
+ val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
+ if (target == USB4_SB_TARGET_RETIMER)
+ val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
+ val |= PORT_CS_1_PND;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_1, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
+ PORT_CS_1_PND, 0, 500);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (val & PORT_CS_1_NR)
+ return -ENODEV;
+ if (val & PORT_CS_1_RC)
+ return -EIO;
+
+ return buf ? usb4_port_read_data(port, buf, dwords) : 0;
+}
+
+static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, u8 reg, const void *buf, u8 size)
+{
+ size_t dwords = DIV_ROUND_UP(size, 4);
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ if (buf) {
+ ret = usb4_port_write_data(port, buf, dwords);
+ if (ret)
+ return ret;
+ }
+
+ val = reg;
+ val |= size << PORT_CS_1_LENGTH_SHIFT;
+ val |= PORT_CS_1_WNR_WRITE;
+ val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
+ if (target == USB4_SB_TARGET_RETIMER)
+ val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
+ val |= PORT_CS_1_PND;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_1, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
+ PORT_CS_1_PND, 0, 500);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (val & PORT_CS_1_NR)
+ return -ENODEV;
+ if (val & PORT_CS_1_RC)
+ return -EIO;
+
+ return 0;
+}
+
+static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
+ u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
+{
+ ktime_t timeout;
+ u32 val;
+ int ret;
+
+ val = opcode;
+ ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
+ sizeof(val));
+ if (ret)
+ return ret;
+
+ timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ /* Check results */
+ ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
+ &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ return 0;
+
+ case USB4_SB_OPCODE_ERR:
+ return -EAGAIN;
+
+ case USB4_SB_OPCODE_ONS:
+ return -EOPNOTSUPP;
+
+ default:
+ if (val != opcode)
+ return -EIO;
+ break;
+ }
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * usb4_port_enumerate_retimers() - Send RT broadcast transaction
+ * @port: USB4 port
+ *
+ * This forces the USB4 port to send broadcast RT transaction which
+ * makes the retimers on the link to assign index to themselves. Returns
+ * %0 in case of success and negative errno if there was an error.
+ */
+int usb4_port_enumerate_retimers(struct tb_port *port)
+{
+ u32 val;
+
+ val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
+ return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
+ USB4_SB_OPCODE, &val, sizeof(val));
+}
+
+static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
+ enum usb4_sb_opcode opcode,
+ int timeout_msec)
+{
+ return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
+ timeout_msec);
+}
+
+/**
+ * usb4_port_retimer_read() - Read from retimer sideband registers
+ * @port: USB4 port
+ * @index: Retimer index
+ * @reg: Sideband register to read
+ * @buf: Data from @reg is stored here
+ * @size: Number of bytes to read
+ *
+ * Function reads retimer sideband registers starting from @reg. The
+ * retimer is connected to @port at @index. Returns %0 in case of
+ * success, and read data is copied to @buf. If there is no retimer
+ * present at given @index returns %-ENODEV. In any other failure
+ * returns negative errno.
+ */
+int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
+ u8 size)
+{
+ return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
+ size);
+}
+
+/**
+ * usb4_port_retimer_write() - Write to retimer sideband registers
+ * @port: USB4 port
+ * @index: Retimer index
+ * @reg: Sideband register to write
+ * @buf: Data that is written starting from @reg
+ * @size: Number of bytes to write
+ *
+ * Writes retimer sideband registers starting from @reg. The retimer is
+ * connected to @port at @index. Returns %0 in case of success. If there
+ * is no retimer present at given @index returns %-ENODEV. In any other
+ * failure returns negative errno.
+ */
+int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
+ const void *buf, u8 size)
+{
+ return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
+ size);
+}
+
+/**
+ * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * If the retimer at @index is last one (connected directly to the
+ * Type-C port) this function returns %1. If it is not returns %0. If
+ * the retimer is not present returns %-ENODEV. Otherwise returns
+ * negative errno.
+ */
+int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
+{
+ u32 metadata;
+ int ret;
+
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
+ 500);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
+ sizeof(metadata));
+ return ret ? ret : metadata & 1;
+}
+
+/**
+ * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Reads NVM sector size (in bytes) of a retimer at @index. This
+ * operation can be used to determine whether the retimer supports NVM
+ * upgrade for example. Returns sector size in bytes or negative errno
+ * in case of error. Specifically returns %-ENODEV if there is no
+ * retimer at @index.
+ */
+int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
+{
+ u32 metadata;
+ int ret;
+
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
+ 500);
+ if (ret)
+ return ret;
+
+ ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
+ sizeof(metadata));
+ return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
+}
+
+static int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
+ unsigned int address)
+{
+ u32 metadata, dwaddress;
+ int ret;
+
+ dwaddress = address / 4;
+ metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
+ USB4_NVM_SET_OFFSET_MASK;
+
+ ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
+ sizeof(metadata));
+ if (ret)
+ return ret;
+
+ return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
+ 500);
+}
+
+struct retimer_info {
+ struct tb_port *port;
+ u8 index;
+};
+
+static int usb4_port_retimer_nvm_write_next_block(void *data, const void *buf,
+ size_t dwords)
+
+{
+ const struct retimer_info *info = data;
+ struct tb_port *port = info->port;
+ u8 index = info->index;
+ int ret;
+
+ ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
+ buf, dwords * 4);
+ if (ret)
+ return ret;
+
+ return usb4_port_retimer_op(port, index,
+ USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
+}
+
+/**
+ * usb4_port_retimer_nvm_write() - Write to retimer NVM
+ * @port: USB4 port
+ * @index: Retimer index
+ * @address: Byte address where to start the write
+ * @buf: Data to write
+ * @size: Size in bytes how much to write
+ *
+ * Writes @size bytes from @buf to the retimer NVM. Used for NVM
+ * upgrade. Returns %0 if the data was written successfully and negative
+ * errno in case of failure. Specifically returns %-ENODEV if there is
+ * no retimer at @index.
+ */
+int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
+ const void *buf, size_t size)
+{
+ struct retimer_info info = { .port = port, .index = index };
+ int ret;
+
+ ret = usb4_port_retimer_nvm_set_offset(port, index, address);
+ if (ret)
+ return ret;
+
+ return usb4_do_write_data(address, buf, size,
+ usb4_port_retimer_nvm_write_next_block, &info);
+}
+
+/**
+ * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * After the new NVM image has been written via usb4_port_retimer_nvm_write()
+ * this function can be used to trigger the NVM upgrade process. If
+ * successful the retimer restarts with the new NVM and may not have the
+ * index set so one needs to call usb4_port_enumerate_retimers() to
+ * force index to be assigned.
+ */
+int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
+{
+ u32 val;
+
+ /*
+ * We need to use the raw operation here because once the
+ * authentication completes the retimer index is not set anymore
+ * so we do not get back the status now.
+ */
+ val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
+ return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
+ USB4_SB_OPCODE, &val, sizeof(val));
+}
+
+/**
+ * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
+ * @port: USB4 port
+ * @index: Retimer index
+ * @status: Raw status code read from metadata
+ *
+ * This can be called after usb4_port_retimer_nvm_authenticate() and
+ * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
+ *
+ * Returns %0 if the authentication status was successfully read. The
+ * completion metadata (the result) is then stored into @status. If
+ * reading the status fails, returns negative errno.
+ */
+int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
+ u32 *status)
+{
+ u32 metadata, val;
+ int ret;
+
+ ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
+ sizeof(val));
+ if (ret)
+ return ret;
+
+ switch (val) {
+ case 0:
+ *status = 0;
+ return 0;
+
+ case USB4_SB_OPCODE_ERR:
+ ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
+ &metadata, sizeof(metadata));
+ if (ret)
+ return ret;
+
+ *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
+ return 0;
+
+ case USB4_SB_OPCODE_ONS:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EIO;
+ }
+}
+
+static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
+ void *buf, size_t dwords)
+{
+ const struct retimer_info *info = data;
+ struct tb_port *port = info->port;
+ u8 index = info->index;
+ u32 metadata;
+ int ret;
+
+ metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
+ if (dwords < USB4_DATA_DWORDS)
+ metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
+
+ ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
+ sizeof(metadata));
+ if (ret)
+ return ret;
+
+ ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
+ if (ret)
+ return ret;
+
+ return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
+ dwords * 4);
+}
+
+/**
+ * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
+ * @port: USB4 port
+ * @index: Retimer index
+ * @address: NVM address (in bytes) to start reading
+ * @buf: Data read from NVM is stored here
+ * @size: Number of bytes to read
+ *
+ * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
+ * read was successful and negative errno in case of failure.
+ * Specifically returns %-ENODEV if there is no retimer at @index.
+ */
+int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
+ unsigned int address, void *buf, size_t size)
+{
+ struct retimer_info info = { .port = port, .index = index };
+
+ return usb4_do_read_data(address, buf, size,
+ usb4_port_retimer_nvm_read_block, &info);
+}
+
+/**
+ * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
+ * @port: USB3 adapter port
+ *
+ * Return maximum supported link rate of a USB3 adapter in Mb/s.
+ * Negative errno in case of error.
+ */
+int usb4_usb3_port_max_link_rate(struct tb_port *port)
+{
+ int ret, lr;
+ u32 val;
+
+ if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_4, 1);
+ if (ret)
+ return ret;
+
+ lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
+ return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
+}
+
+/**
+ * usb4_usb3_port_actual_link_rate() - Established USB3 link rate
+ * @port: USB3 adapter port
+ *
+ * Return actual established link rate of a USB3 adapter in Mb/s. If the
+ * link is not up returns %0 and negative errno in case of failure.
+ */
+int usb4_usb3_port_actual_link_rate(struct tb_port *port)
+{
+ int ret, lr;
+ u32 val;
+
+ if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_4, 1);
+ if (ret)
+ return ret;
+
+ if (!(val & ADP_USB3_CS_4_ULV))
+ return 0;
+
+ lr = val & ADP_USB3_CS_4_ALR_MASK;
+ return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
+}
+
+static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
+{
+ int ret;
+ u32 val;
+
+ if (!tb_port_is_usb3_down(port))
+ return -EINVAL;
+ if (tb_route(port->sw))
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_2, 1);
+ if (ret)
+ return ret;
+
+ if (request)
+ val |= ADP_USB3_CS_2_CMR;
+ else
+ val &= ~ADP_USB3_CS_2_CMR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_2, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * We can use val here directly as the CMR bit is in the same place
+ * as HCA. Just mask out others.
+ */
+ val &= ADP_USB3_CS_2_CMR;
+ return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
+ ADP_USB3_CS_1_HCA, val, 1500);
+}
+
+static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
+{
+ return usb4_usb3_port_cm_request(port, true);
+}
+
+static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
+{
+ return usb4_usb3_port_cm_request(port, false);
+}
+
+static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
+{
+ unsigned long uframes;
+
+ uframes = bw * 512UL << scale;
+ return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000);
+}
+
+static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
+{
+ unsigned long uframes;
+
+ /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
+ uframes = ((unsigned long)mbps * 1000 * 1000) / 8000;
+ return DIV_ROUND_UP(uframes, 512UL << scale);
+}
+
+static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
+ int *upstream_bw,
+ int *downstream_bw)
+{
+ u32 val, bw, scale;
+ int ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_2, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(port, &scale, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_3, 1);
+ if (ret)
+ return ret;
+
+ scale &= ADP_USB3_CS_3_SCALE_MASK;
+
+ bw = val & ADP_USB3_CS_2_AUBW_MASK;
+ *upstream_bw = usb3_bw_to_mbps(bw, scale);
+
+ bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
+ *downstream_bw = usb3_bw_to_mbps(bw, scale);
+
+ return 0;
+}
+
+/**
+ * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
+ * @port: USB3 adapter port
+ * @upstream_bw: Allocated upstream bandwidth is stored here
+ * @downstream_bw: Allocated downstream bandwidth is stored here
+ *
+ * Stores currently allocated USB3 bandwidth into @upstream_bw and
+ * @downstream_bw in Mb/s. Returns %0 in case of success and negative
+ * errno in failure.
+ */
+int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
+ int *downstream_bw)
+{
+ int ret;
+
+ ret = usb4_usb3_port_set_cm_request(port);
+ if (ret)
+ return ret;
+
+ ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
+ downstream_bw);
+ usb4_usb3_port_clear_cm_request(port);
+
+ return ret;
+}
+
+static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
+ int *upstream_bw,
+ int *downstream_bw)
+{
+ u32 val, bw, scale;
+ int ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_1, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(port, &scale, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_3, 1);
+ if (ret)
+ return ret;
+
+ scale &= ADP_USB3_CS_3_SCALE_MASK;
+
+ bw = val & ADP_USB3_CS_1_CUBW_MASK;
+ *upstream_bw = usb3_bw_to_mbps(bw, scale);
+
+ bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
+ *downstream_bw = usb3_bw_to_mbps(bw, scale);
+
+ return 0;
+}
+
+static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
+ int upstream_bw,
+ int downstream_bw)
+{
+ u32 val, ubw, dbw, scale;
+ int ret, max_bw;
+
+ /* Figure out suitable scale */
+ scale = 0;
+ max_bw = max(upstream_bw, downstream_bw);
+ while (scale < 64) {
+ if (mbps_to_usb3_bw(max_bw, scale) < 4096)
+ break;
+ scale++;
+ }
+
+ if (WARN_ON(scale >= 64))
+ return -EINVAL;
+
+ ret = tb_port_write(port, &scale, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_3, 1);
+ if (ret)
+ return ret;
+
+ ubw = mbps_to_usb3_bw(upstream_bw, scale);
+ dbw = mbps_to_usb3_bw(downstream_bw, scale);
+
+ tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
+ val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
+ val |= ubw;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_2, 1);
+}
+
+/**
+ * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
+ * @port: USB3 adapter port
+ * @upstream_bw: New upstream bandwidth
+ * @downstream_bw: New downstream bandwidth
+ *
+ * This can be used to set how much bandwidth is allocated for the USB3
+ * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
+ * new values programmed to the USB3 adapter allocation registers. If
+ * the values are lower than what is currently consumed the allocation
+ * is set to what is currently consumed instead (consumed bandwidth
+ * cannot be taken away by CM). The actual new values are returned in
+ * @upstream_bw and @downstream_bw.
+ *
+ * Returns %0 in case of success and negative errno if there was a
+ * failure.
+ */
+int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
+ int *downstream_bw)
+{
+ int ret, consumed_up, consumed_down, allocate_up, allocate_down;
+
+ ret = usb4_usb3_port_set_cm_request(port);
+ if (ret)
+ return ret;
+
+ ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
+ &consumed_down);
+ if (ret)
+ goto err_request;
+
+ /* Don't allow it go lower than what is consumed */
+ allocate_up = max(*upstream_bw, consumed_up);
+ allocate_down = max(*downstream_bw, consumed_down);
+
+ ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
+ allocate_down);
+ if (ret)
+ goto err_request;
+
+ *upstream_bw = allocate_up;
+ *downstream_bw = allocate_down;
+
+err_request:
+ usb4_usb3_port_clear_cm_request(port);
+ return ret;
+}
+
+/**
+ * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
+ * @port: USB3 adapter port
+ * @upstream_bw: New allocated upstream bandwidth
+ * @downstream_bw: New allocated downstream bandwidth
+ *
+ * Releases USB3 allocated bandwidth down to what is actually consumed.
+ * The new bandwidth is returned in @upstream_bw and @downstream_bw.
+ *
+ * Returns 0% in success and negative errno in case of failure.
+ */
+int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
+ int *downstream_bw)
+{
+ int ret, consumed_up, consumed_down;
+
+ ret = usb4_usb3_port_set_cm_request(port);
+ if (ret)
+ return ret;
+
+ ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
+ &consumed_down);
+ if (ret)
+ goto err_request;
+
+ /*
+ * Always keep 1000 Mb/s to make sure xHCI has at least some
+ * bandwidth available for isochronous traffic.
+ */
+ if (consumed_up < 1000)
+ consumed_up = 1000;
+ if (consumed_down < 1000)
+ consumed_down = 1000;
+
+ ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
+ consumed_down);
+ if (ret)
+ goto err_request;
+
+ *upstream_bw = consumed_up;
+ *downstream_bw = consumed_down;
+
+err_request:
+ usb4_usb3_port_clear_cm_request(port);
+ return ret;
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
new file mode 100644
index 000000000..c00ad8170
--- /dev/null
+++ b/drivers/thunderbolt/xdomain.c
@@ -0,0 +1,1752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Thunderbolt XDomain discovery protocol support
+ *
+ * Copyright (C) 2017, Intel Corporation
+ * Authors: Michael Jamet <michael.jamet@intel.com>
+ * Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/utsname.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include "tb.h"
+
+#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
+#define XDOMAIN_UUID_RETRIES 10
+#define XDOMAIN_PROPERTIES_RETRIES 60
+#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
+
+struct xdomain_request_work {
+ struct work_struct work;
+ struct tb_xdp_header *pkg;
+ struct tb *tb;
+};
+
+/* Serializes access to the properties and protocol handlers below */
+static DEFINE_MUTEX(xdomain_lock);
+
+/* Properties exposed to the remote domains */
+static struct tb_property_dir *xdomain_property_dir;
+static u32 *xdomain_property_block;
+static u32 xdomain_property_block_len;
+static u32 xdomain_property_block_gen;
+
+/* Additional protocol handlers */
+static LIST_HEAD(protocol_handlers);
+
+/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
+static const uuid_t tb_xdp_uuid =
+ UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
+ 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
+
+static bool tb_xdomain_match(const struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ switch (pkg->frame.eof) {
+ case TB_CFG_PKG_ERROR:
+ return true;
+
+ case TB_CFG_PKG_XDOMAIN_RESP: {
+ const struct tb_xdp_header *res_hdr = pkg->buffer;
+ const struct tb_xdp_header *req_hdr = req->request;
+
+ if (pkg->frame.size < req->response_size / 4)
+ return false;
+
+ /* Make sure route matches */
+ if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
+ req_hdr->xd_hdr.route_hi)
+ return false;
+ if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
+ return false;
+
+ /* Check that the XDomain protocol matches */
+ if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
+ return false;
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static bool tb_xdomain_copy(struct tb_cfg_request *req,
+ const struct ctl_pkg *pkg)
+{
+ memcpy(req->response, pkg->buffer, req->response_size);
+ req->result.err = 0;
+ return true;
+}
+
+static void response_ready(void *data)
+{
+ tb_cfg_request_put(data);
+}
+
+static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
+ size_t size, enum tb_cfg_pkg_type type)
+{
+ struct tb_cfg_request *req;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = tb_xdomain_match;
+ req->copy = tb_xdomain_copy;
+ req->request = response;
+ req->request_size = size;
+ req->request_type = type;
+
+ return tb_cfg_request(ctl, req, response_ready, req);
+}
+
+/**
+ * tb_xdomain_response() - Send a XDomain response message
+ * @xd: XDomain to send the message
+ * @response: Response to send
+ * @size: Size of the response
+ * @type: PDF type of the response
+ *
+ * This can be used to send a XDomain response message to the other
+ * domain. No response for the message is expected.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
+ size_t size, enum tb_cfg_pkg_type type)
+{
+ return __tb_xdomain_response(xd->tb->ctl, response, size, type);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_response);
+
+static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
+ size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
+ size_t response_size, enum tb_cfg_pkg_type response_type,
+ unsigned int timeout_msec)
+{
+ struct tb_cfg_request *req;
+ struct tb_cfg_result res;
+
+ req = tb_cfg_request_alloc();
+ if (!req)
+ return -ENOMEM;
+
+ req->match = tb_xdomain_match;
+ req->copy = tb_xdomain_copy;
+ req->request = request;
+ req->request_size = request_size;
+ req->request_type = request_type;
+ req->response = response;
+ req->response_size = response_size;
+ req->response_type = response_type;
+
+ res = tb_cfg_request_sync(ctl, req, timeout_msec);
+
+ tb_cfg_request_put(req);
+
+ return res.err == 1 ? -EIO : res.err;
+}
+
+/**
+ * tb_xdomain_request() - Send a XDomain request
+ * @xd: XDomain to send the request
+ * @request: Request to send
+ * @request_size: Size of the request in bytes
+ * @request_type: PDF type of the request
+ * @response: Response is copied here
+ * @response_size: Expected size of the response in bytes
+ * @response_type: Expected PDF type of the response
+ * @timeout_msec: Timeout in milliseconds to wait for the response
+ *
+ * This function can be used to send XDomain control channel messages to
+ * the other domain. The function waits until the response is received
+ * or when timeout triggers. Whichever comes first.
+ *
+ * Return: %0 in case of success and negative errno in case of failure
+ */
+int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
+ size_t request_size, enum tb_cfg_pkg_type request_type,
+ void *response, size_t response_size,
+ enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
+{
+ return __tb_xdomain_request(xd->tb->ctl, request, request_size,
+ request_type, response, response_size,
+ response_type, timeout_msec);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_request);
+
+static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
+ u8 sequence, enum tb_xdp_type type, size_t size)
+{
+ u32 length_sn;
+
+ length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
+ length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
+
+ hdr->xd_hdr.route_hi = upper_32_bits(route);
+ hdr->xd_hdr.route_lo = lower_32_bits(route);
+ hdr->xd_hdr.length_sn = length_sn;
+ hdr->type = type;
+ memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
+}
+
+static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+{
+ const struct tb_xdp_error_response *error;
+
+ if (hdr->type != ERROR_RESPONSE)
+ return 0;
+
+ error = (const struct tb_xdp_error_response *)hdr;
+
+ switch (error->error) {
+ case ERROR_UNKNOWN_PACKET:
+ case ERROR_UNKNOWN_DOMAIN:
+ return -EIO;
+ case ERROR_NOT_SUPPORTED:
+ return -ENOTSUPP;
+ case ERROR_NOT_READY:
+ return -EAGAIN;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
+ uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+ struct tb_xdp_uuid req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.hdr);
+ if (ret)
+ return ret;
+
+ uuid_copy(uuid, &res.src_uuid);
+ return 0;
+}
+
+static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ const uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
+ sizeof(res));
+
+ uuid_copy(&res.src_uuid, uuid);
+ res.src_route_hi = upper_32_bits(route);
+ res.src_route_lo = lower_32_bits(route);
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ enum tb_xdp_error error)
+{
+ struct tb_xdp_error_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
+ sizeof(res));
+ res.error = error;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
+ const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
+ u32 **block, u32 *generation)
+{
+ struct tb_xdp_properties_response *res;
+ struct tb_xdp_properties req;
+ u16 data_len, len;
+ size_t total_size;
+ u32 *data = NULL;
+ int ret;
+
+ total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
+ res = kzalloc(total_size, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
+ sizeof(req));
+ memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
+ memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
+
+ len = 0;
+ data_len = 0;
+
+ do {
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, res,
+ total_size, TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ goto err;
+
+ ret = tb_xdp_handle_error(&res->hdr);
+ if (ret)
+ goto err;
+
+ /*
+ * Package length includes the whole payload without the
+ * XDomain header. Validate first that the package is at
+ * least size of the response structure.
+ */
+ len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+ if (len < sizeof(*res) / 4) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len += sizeof(res->hdr.xd_hdr) / 4;
+ len -= sizeof(*res) / 4;
+
+ if (res->offset != req.offset) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * First time allocate block that has enough space for
+ * the whole properties block.
+ */
+ if (!data) {
+ data_len = res->data_length;
+ if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
+ ret = -E2BIG;
+ goto err;
+ }
+
+ data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ memcpy(data + req.offset, res->data, len * 4);
+ req.offset += len;
+ } while (!data_len || req.offset < data_len);
+
+ *block = data;
+ *generation = res->generation;
+
+ kfree(res);
+
+ return data_len;
+
+err:
+ kfree(data);
+ kfree(res);
+
+ return ret;
+}
+
+static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
+ u64 route, u8 sequence, const uuid_t *src_uuid,
+ const struct tb_xdp_properties *req)
+{
+ struct tb_xdp_properties_response *res;
+ size_t total_size;
+ u16 len;
+ int ret;
+
+ /*
+ * Currently we expect all requests to be directed to us. The
+ * protocol supports forwarding, though which we might add
+ * support later on.
+ */
+ if (!uuid_equal(src_uuid, &req->dst_uuid)) {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_UNKNOWN_DOMAIN);
+ return 0;
+ }
+
+ mutex_lock(&xdomain_lock);
+
+ if (req->offset >= xdomain_property_block_len) {
+ mutex_unlock(&xdomain_lock);
+ return -EINVAL;
+ }
+
+ len = xdomain_property_block_len - req->offset;
+ len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
+ total_size = sizeof(*res) + len * 4;
+
+ res = kzalloc(total_size, GFP_KERNEL);
+ if (!res) {
+ mutex_unlock(&xdomain_lock);
+ return -ENOMEM;
+ }
+
+ tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
+ total_size);
+ res->generation = xdomain_property_block_gen;
+ res->data_length = xdomain_property_block_len;
+ res->offset = req->offset;
+ uuid_copy(&res->src_uuid, src_uuid);
+ uuid_copy(&res->dst_uuid, &req->src_uuid);
+ memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
+
+ mutex_unlock(&xdomain_lock);
+
+ ret = __tb_xdomain_response(ctl, res, total_size,
+ TB_CFG_PKG_XDOMAIN_RESP);
+
+ kfree(res);
+ return ret;
+}
+
+static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
+ int retry, const uuid_t *uuid)
+{
+ struct tb_xdp_properties_changed_response res;
+ struct tb_xdp_properties_changed req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4,
+ PROPERTIES_CHANGED_REQUEST, sizeof(req));
+ uuid_copy(&req.src_uuid, uuid);
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ return tb_xdp_handle_error(&res.hdr);
+}
+
+static int
+tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
+{
+ struct tb_xdp_properties_changed_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence,
+ PROPERTIES_CHANGED_RESPONSE, sizeof(res));
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+/**
+ * tb_register_protocol_handler() - Register protocol handler
+ * @handler: Handler to register
+ *
+ * This allows XDomain service drivers to hook into incoming XDomain
+ * messages. After this function is called the service driver needs to
+ * be able to handle calls to callback whenever a package with the
+ * registered protocol is received.
+ */
+int tb_register_protocol_handler(struct tb_protocol_handler *handler)
+{
+ if (!handler->uuid || !handler->callback)
+ return -EINVAL;
+ if (uuid_equal(handler->uuid, &tb_xdp_uuid))
+ return -EINVAL;
+
+ mutex_lock(&xdomain_lock);
+ list_add_tail(&handler->list, &protocol_handlers);
+ mutex_unlock(&xdomain_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
+
+/**
+ * tb_unregister_protocol_handler() - Unregister protocol handler
+ * @handler: Handler to unregister
+ *
+ * Removes the previously registered protocol handler.
+ */
+void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
+{
+ mutex_lock(&xdomain_lock);
+ list_del_init(&handler->list);
+ mutex_unlock(&xdomain_lock);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
+
+static int rebuild_property_block(void)
+{
+ u32 *block, len;
+ int ret;
+
+ ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ len = ret;
+
+ block = kcalloc(len, sizeof(u32), GFP_KERNEL);
+ if (!block)
+ return -ENOMEM;
+
+ ret = tb_property_format_dir(xdomain_property_dir, block, len);
+ if (ret) {
+ kfree(block);
+ return ret;
+ }
+
+ kfree(xdomain_property_block);
+ xdomain_property_block = block;
+ xdomain_property_block_len = len;
+ xdomain_property_block_gen++;
+
+ return 0;
+}
+
+static void finalize_property_block(void)
+{
+ const struct tb_property *nodename;
+
+ /*
+ * On first XDomain connection we set up the the system
+ * nodename. This delayed here because userspace may not have it
+ * set when the driver is first probed.
+ */
+ mutex_lock(&xdomain_lock);
+ nodename = tb_property_find(xdomain_property_dir, "deviceid",
+ TB_PROPERTY_TYPE_TEXT);
+ if (!nodename) {
+ tb_property_add_text(xdomain_property_dir, "deviceid",
+ utsname()->nodename);
+ rebuild_property_block();
+ }
+ mutex_unlock(&xdomain_lock);
+}
+
+static void tb_xdp_handle_request(struct work_struct *work)
+{
+ struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
+ const struct tb_xdp_header *pkg = xw->pkg;
+ const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
+ struct tb *tb = xw->tb;
+ struct tb_ctl *ctl = tb->ctl;
+ const uuid_t *uuid;
+ int ret = 0;
+ u32 sequence;
+ u64 route;
+
+ route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
+ sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
+ sequence >>= TB_XDOMAIN_SN_SHIFT;
+
+ mutex_lock(&tb->lock);
+ if (tb->root_switch)
+ uuid = tb->root_switch->uuid;
+ else
+ uuid = NULL;
+ mutex_unlock(&tb->lock);
+
+ if (!uuid) {
+ tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
+ goto out;
+ }
+
+ finalize_property_block();
+
+ switch (pkg->type) {
+ case PROPERTIES_REQUEST:
+ ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
+ (const struct tb_xdp_properties *)pkg);
+ break;
+
+ case PROPERTIES_CHANGED_REQUEST: {
+ const struct tb_xdp_properties_changed *xchg =
+ (const struct tb_xdp_properties_changed *)pkg;
+ struct tb_xdomain *xd;
+
+ ret = tb_xdp_properties_changed_response(ctl, route, sequence);
+
+ /*
+ * Since the properties have been changed, let's update
+ * the xdomain related to this connection as well in
+ * case there is a change in services it offers.
+ */
+ xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
+ if (xd) {
+ queue_delayed_work(tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(50));
+ tb_xdomain_put(xd);
+ }
+
+ break;
+ }
+
+ case UUID_REQUEST_OLD:
+ case UUID_REQUEST:
+ ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+ break;
+
+ default:
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_SUPPORTED);
+ break;
+ }
+
+ if (ret) {
+ tb_warn(tb, "failed to send XDomain response for %#x\n",
+ pkg->type);
+ }
+
+out:
+ kfree(xw->pkg);
+ kfree(xw);
+
+ tb_domain_put(tb);
+}
+
+static bool
+tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
+ size_t size)
+{
+ struct xdomain_request_work *xw;
+
+ xw = kmalloc(sizeof(*xw), GFP_KERNEL);
+ if (!xw)
+ return false;
+
+ INIT_WORK(&xw->work, tb_xdp_handle_request);
+ xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
+ if (!xw->pkg) {
+ kfree(xw);
+ return false;
+ }
+ xw->tb = tb_domain_get(tb);
+
+ schedule_work(&xw->work);
+ return true;
+}
+
+/**
+ * tb_register_service_driver() - Register XDomain service driver
+ * @drv: Driver to register
+ *
+ * Registers new service driver from @drv to the bus.
+ */
+int tb_register_service_driver(struct tb_service_driver *drv)
+{
+ drv->driver.bus = &tb_bus_type;
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_register_service_driver);
+
+/**
+ * tb_unregister_service_driver() - Unregister XDomain service driver
+ * @xdrv: Driver to unregister
+ *
+ * Unregisters XDomain service driver from the bus.
+ */
+void tb_unregister_service_driver(struct tb_service_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
+
+static ssize_t key_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ /*
+ * It should be null terminated but anything else is pretty much
+ * allowed.
+ */
+ return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
+}
+static DEVICE_ATTR_RO(key);
+
+static int get_modalias(struct tb_service *svc, char *buf, size_t size)
+{
+ return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
+ svc->prtcid, svc->prtcvers, svc->prtcrevs);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ /* Full buffer size except new line and null termination */
+ get_modalias(svc, buf, PAGE_SIZE - 2);
+ return sprintf(buf, "%s\n", buf);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcid);
+}
+static DEVICE_ATTR_RO(prtcid);
+
+static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcvers);
+}
+static DEVICE_ATTR_RO(prtcvers);
+
+static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "%u\n", svc->prtcrevs);
+}
+static DEVICE_ATTR_RO(prtcrevs);
+
+static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+
+ return sprintf(buf, "0x%08x\n", svc->prtcstns);
+}
+static DEVICE_ATTR_RO(prtcstns);
+
+static struct attribute *tb_service_attrs[] = {
+ &dev_attr_key.attr,
+ &dev_attr_modalias.attr,
+ &dev_attr_prtcid.attr,
+ &dev_attr_prtcvers.attr,
+ &dev_attr_prtcrevs.attr,
+ &dev_attr_prtcstns.attr,
+ NULL,
+};
+
+static struct attribute_group tb_service_attr_group = {
+ .attrs = tb_service_attrs,
+};
+
+static const struct attribute_group *tb_service_attr_groups[] = {
+ &tb_service_attr_group,
+ NULL,
+};
+
+static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ char modalias[64];
+
+ get_modalias(svc, modalias, sizeof(modalias));
+ return add_uevent_var(env, "MODALIAS=%s", modalias);
+}
+
+static void tb_service_release(struct device *dev)
+{
+ struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ struct tb_xdomain *xd = tb_service_parent(svc);
+
+ ida_simple_remove(&xd->service_ids, svc->id);
+ kfree(svc->key);
+ kfree(svc);
+}
+
+struct device_type tb_service_type = {
+ .name = "thunderbolt_service",
+ .groups = tb_service_attr_groups,
+ .uevent = tb_service_uevent,
+ .release = tb_service_release,
+};
+EXPORT_SYMBOL_GPL(tb_service_type);
+
+static int remove_missing_service(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd = data;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return 0;
+
+ if (!tb_property_find(xd->properties, svc->key,
+ TB_PROPERTY_TYPE_DIRECTORY))
+ device_unregister(dev);
+
+ return 0;
+}
+
+static int find_service(struct device *dev, void *data)
+{
+ const struct tb_property *p = data;
+ struct tb_service *svc;
+
+ svc = tb_to_service(dev);
+ if (!svc)
+ return 0;
+
+ return !strcmp(svc->key, p->key);
+}
+
+static int populate_service(struct tb_service *svc,
+ struct tb_property *property)
+{
+ struct tb_property_dir *dir = property->value.dir;
+ struct tb_property *p;
+
+ /* Fill in standard properties */
+ p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcid = p->value.immediate;
+ p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcvers = p->value.immediate;
+ p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcrevs = p->value.immediate;
+ p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
+ if (p)
+ svc->prtcstns = p->value.immediate;
+
+ svc->key = kstrdup(property->key, GFP_KERNEL);
+ if (!svc->key)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void enumerate_services(struct tb_xdomain *xd)
+{
+ struct tb_service *svc;
+ struct tb_property *p;
+ struct device *dev;
+ int id;
+
+ /*
+ * First remove all services that are not available anymore in
+ * the updated property block.
+ */
+ device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
+
+ /* Then re-enumerate properties creating new services as we go */
+ tb_property_for_each(xd->properties, p) {
+ if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
+ continue;
+
+ /* If the service exists already we are fine */
+ dev = device_find_child(&xd->dev, p, find_service);
+ if (dev) {
+ put_device(dev);
+ continue;
+ }
+
+ svc = kzalloc(sizeof(*svc), GFP_KERNEL);
+ if (!svc)
+ break;
+
+ if (populate_service(svc, p)) {
+ kfree(svc);
+ break;
+ }
+
+ id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ if (id < 0) {
+ kfree(svc->key);
+ kfree(svc);
+ break;
+ }
+ svc->id = id;
+ svc->dev.bus = &tb_bus_type;
+ svc->dev.type = &tb_service_type;
+ svc->dev.parent = &xd->dev;
+ dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
+
+ if (device_register(&svc->dev)) {
+ put_device(&svc->dev);
+ break;
+ }
+ }
+}
+
+static int populate_properties(struct tb_xdomain *xd,
+ struct tb_property_dir *dir)
+{
+ const struct tb_property *p;
+
+ /* Required properties */
+ p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
+ if (!p)
+ return -EINVAL;
+ xd->device = p->value.immediate;
+
+ p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
+ if (!p)
+ return -EINVAL;
+ xd->vendor = p->value.immediate;
+
+ kfree(xd->device_name);
+ xd->device_name = NULL;
+ kfree(xd->vendor_name);
+ xd->vendor_name = NULL;
+
+ /* Optional properties */
+ p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
+ if (p)
+ xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
+ p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
+ if (p)
+ xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
+
+ return 0;
+}
+
+/* Called with @xd->lock held */
+static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
+{
+ if (!xd->resume)
+ return;
+
+ xd->resume = false;
+ if (xd->transmit_path) {
+ dev_dbg(&xd->dev, "re-establishing DMA path\n");
+ tb_domain_approve_xdomain_paths(xd->tb, xd);
+ }
+}
+
+static void tb_xdomain_get_uuid(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ get_uuid_work.work);
+ struct tb *tb = xd->tb;
+ uuid_t uuid;
+ int ret;
+
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+ if (ret < 0) {
+ if (xd->uuid_retries-- > 0) {
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+ msecs_to_jiffies(100));
+ } else {
+ dev_dbg(&xd->dev, "failed to read remote UUID\n");
+ }
+ return;
+ }
+
+ if (uuid_equal(&uuid, xd->local_uuid)) {
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
+ return;
+ }
+
+ /*
+ * If the UUID is different, there is another domain connected
+ * so mark this one unplugged and wait for the connection
+ * manager to replace it.
+ */
+ if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
+ dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
+ xd->is_unplugged = true;
+ return;
+ }
+
+ /* First time fill in the missing UUID */
+ if (!xd->remote_uuid) {
+ xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->remote_uuid)
+ return;
+ }
+
+ /* Now we can start the normal properties exchange */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+}
+
+static void tb_xdomain_get_properties(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ get_properties_work.work);
+ struct tb_property_dir *dir;
+ struct tb *tb = xd->tb;
+ bool update = false;
+ u32 *block = NULL;
+ u32 gen = 0;
+ int ret;
+
+ ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
+ xd->remote_uuid, xd->properties_retries,
+ &block, &gen);
+ if (ret < 0) {
+ if (xd->properties_retries-- > 0) {
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+ } else {
+ /* Give up now */
+ dev_err(&xd->dev,
+ "failed read XDomain properties from %pUb\n",
+ xd->remote_uuid);
+ }
+ return;
+ }
+
+ xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+
+ mutex_lock(&xd->lock);
+
+ /* Only accept newer generation properties */
+ if (xd->properties && gen <= xd->property_block_gen) {
+ /*
+ * On resume it is likely that the properties block is
+ * not changed (unless the other end added or removed
+ * services). However, we need to make sure the existing
+ * DMA paths are restored properly.
+ */
+ tb_xdomain_restore_paths(xd);
+ goto err_free_block;
+ }
+
+ dir = tb_property_parse_dir(block, ret);
+ if (!dir) {
+ dev_err(&xd->dev, "failed to parse XDomain properties\n");
+ goto err_free_block;
+ }
+
+ ret = populate_properties(xd, dir);
+ if (ret) {
+ dev_err(&xd->dev, "missing XDomain properties in response\n");
+ goto err_free_dir;
+ }
+
+ /* Release the existing one */
+ if (xd->properties) {
+ tb_property_free_dir(xd->properties);
+ update = true;
+ }
+
+ xd->properties = dir;
+ xd->property_block_gen = gen;
+
+ tb_xdomain_restore_paths(xd);
+
+ mutex_unlock(&xd->lock);
+
+ kfree(block);
+
+ /*
+ * Now the device should be ready enough so we can add it to the
+ * bus and let userspace know about it. If the device is already
+ * registered, we notify the userspace that it has changed.
+ */
+ if (!update) {
+ if (device_add(&xd->dev)) {
+ dev_err(&xd->dev, "failed to add XDomain device\n");
+ return;
+ }
+ } else {
+ kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
+ }
+
+ enumerate_services(xd);
+ return;
+
+err_free_dir:
+ tb_property_free_dir(dir);
+err_free_block:
+ kfree(block);
+ mutex_unlock(&xd->lock);
+}
+
+static void tb_xdomain_properties_changed(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ properties_changed_work.work);
+ int ret;
+
+ ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
+ xd->properties_changed_retries, xd->local_uuid);
+ if (ret) {
+ if (xd->properties_changed_retries-- > 0)
+ queue_delayed_work(xd->tb->wq,
+ &xd->properties_changed_work,
+ msecs_to_jiffies(1000));
+ return;
+ }
+
+ xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+}
+
+static ssize_t device_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%#x\n", xd->device);
+}
+static DEVICE_ATTR_RO(device);
+
+static ssize_t
+device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&xd->lock))
+ return -ERESTARTSYS;
+ ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(device_name);
+
+static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%#x\n", xd->vendor);
+}
+static DEVICE_ATTR_RO(vendor);
+
+static ssize_t
+vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ int ret;
+
+ if (mutex_lock_interruptible(&xd->lock))
+ return -ERESTARTSYS;
+ ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(vendor_name);
+
+static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sprintf(buf, "%pUb\n", xd->remote_uuid);
+}
+static DEVICE_ATTR_RO(unique_id);
+
+static struct attribute *xdomain_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_device_name.attr,
+ &dev_attr_unique_id.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_vendor_name.attr,
+ NULL,
+};
+
+static struct attribute_group xdomain_attr_group = {
+ .attrs = xdomain_attrs,
+};
+
+static const struct attribute_group *xdomain_attr_groups[] = {
+ &xdomain_attr_group,
+ NULL,
+};
+
+static void tb_xdomain_release(struct device *dev)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ put_device(xd->dev.parent);
+
+ tb_property_free_dir(xd->properties);
+ ida_destroy(&xd->service_ids);
+
+ kfree(xd->local_uuid);
+ kfree(xd->remote_uuid);
+ kfree(xd->device_name);
+ kfree(xd->vendor_name);
+ kfree(xd);
+}
+
+static void start_handshake(struct tb_xdomain *xd)
+{
+ xd->uuid_retries = XDOMAIN_UUID_RETRIES;
+ xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+ xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+
+ if (xd->needs_uuid) {
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+ msecs_to_jiffies(100));
+ } else {
+ /* Start exchanging properties with the other host */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+ }
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+ xd->uuid_retries = 0;
+ xd->properties_retries = 0;
+ xd->properties_changed_retries = 0;
+
+ cancel_delayed_work_sync(&xd->get_uuid_work);
+ cancel_delayed_work_sync(&xd->get_properties_work);
+ cancel_delayed_work_sync(&xd->properties_changed_work);
+}
+
+static int __maybe_unused tb_xdomain_suspend(struct device *dev)
+{
+ stop_handshake(tb_to_xdomain(dev));
+ return 0;
+}
+
+static int __maybe_unused tb_xdomain_resume(struct device *dev)
+{
+ struct tb_xdomain *xd = tb_to_xdomain(dev);
+
+ /*
+ * Ask tb_xdomain_get_properties() restore any existing DMA
+ * paths after properties are re-read.
+ */
+ xd->resume = true;
+ start_handshake(xd);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tb_xdomain_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
+};
+
+struct device_type tb_xdomain_type = {
+ .name = "thunderbolt_xdomain",
+ .release = tb_xdomain_release,
+ .pm = &tb_xdomain_pm_ops,
+};
+EXPORT_SYMBOL_GPL(tb_xdomain_type);
+
+/**
+ * tb_xdomain_alloc() - Allocate new XDomain object
+ * @tb: Domain where the XDomain belongs
+ * @parent: Parent device (the switch through the connection to the
+ * other domain is reached).
+ * @route: Route string used to reach the other domain
+ * @local_uuid: Our local domain UUID
+ * @remote_uuid: UUID of the other domain (optional)
+ *
+ * Allocates new XDomain structure and returns pointer to that. The
+ * object must be released by calling tb_xdomain_put().
+ */
+struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
+ u64 route, const uuid_t *local_uuid,
+ const uuid_t *remote_uuid)
+{
+ struct tb_switch *parent_sw = tb_to_switch(parent);
+ struct tb_xdomain *xd;
+ struct tb_port *down;
+
+ /* Make sure the downstream domain is accessible */
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
+
+ xd = kzalloc(sizeof(*xd), GFP_KERNEL);
+ if (!xd)
+ return NULL;
+
+ xd->tb = tb;
+ xd->route = route;
+ ida_init(&xd->service_ids);
+ mutex_init(&xd->lock);
+ INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
+ INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+ INIT_DELAYED_WORK(&xd->properties_changed_work,
+ tb_xdomain_properties_changed);
+
+ xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->local_uuid)
+ goto err_free;
+
+ if (remote_uuid) {
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
+ GFP_KERNEL);
+ if (!xd->remote_uuid)
+ goto err_free_local_uuid;
+ } else {
+ xd->needs_uuid = true;
+ }
+
+ device_initialize(&xd->dev);
+ xd->dev.parent = get_device(parent);
+ xd->dev.bus = &tb_bus_type;
+ xd->dev.type = &tb_xdomain_type;
+ xd->dev.groups = xdomain_attr_groups;
+ dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+
+ /*
+ * This keeps the DMA powered on as long as we have active
+ * connection to another host.
+ */
+ pm_runtime_set_active(&xd->dev);
+ pm_runtime_get_noresume(&xd->dev);
+ pm_runtime_enable(&xd->dev);
+
+ return xd;
+
+err_free_local_uuid:
+ kfree(xd->local_uuid);
+err_free:
+ kfree(xd);
+
+ return NULL;
+}
+
+/**
+ * tb_xdomain_add() - Add XDomain to the bus
+ * @xd: XDomain to add
+ *
+ * This function starts XDomain discovery protocol handshake and
+ * eventually adds the XDomain to the bus. After calling this function
+ * the caller needs to call tb_xdomain_remove() in order to remove and
+ * release the object regardless whether the handshake succeeded or not.
+ */
+void tb_xdomain_add(struct tb_xdomain *xd)
+{
+ /* Start exchanging properties with the other host */
+ start_handshake(xd);
+}
+
+static int unregister_service(struct device *dev, void *data)
+{
+ device_unregister(dev);
+ return 0;
+}
+
+/**
+ * tb_xdomain_remove() - Remove XDomain from the bus
+ * @xd: XDomain to remove
+ *
+ * This will stop all ongoing configuration work and remove the XDomain
+ * along with any services from the bus. When the last reference to @xd
+ * is released the object will be released as well.
+ */
+void tb_xdomain_remove(struct tb_xdomain *xd)
+{
+ stop_handshake(xd);
+
+ device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+
+ /*
+ * Undo runtime PM here explicitly because it is possible that
+ * the XDomain was never added to the bus and thus device_del()
+ * is not called for it (device_del() would handle this otherwise).
+ */
+ pm_runtime_disable(&xd->dev);
+ pm_runtime_put_noidle(&xd->dev);
+ pm_runtime_set_suspended(&xd->dev);
+
+ if (!device_is_registered(&xd->dev))
+ put_device(&xd->dev);
+ else
+ device_unregister(&xd->dev);
+}
+
+/**
+ * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ * @transmit_path: HopID of the transmit path the other end is using to
+ * send packets
+ * @transmit_ring: DMA ring used to receive packets from the other end
+ * @receive_path: HopID of the receive path the other end is using to
+ * receive packets
+ * @receive_ring: DMA ring used to send packets to the other end
+ *
+ * The function enables DMA paths accordingly so that after successful
+ * return the caller can send and receive packets using high-speed DMA
+ * path.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
+ u16 transmit_ring, u16 receive_path,
+ u16 receive_ring)
+{
+ int ret;
+
+ mutex_lock(&xd->lock);
+
+ if (xd->transmit_path) {
+ ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
+ goto exit_unlock;
+ }
+
+ xd->transmit_path = transmit_path;
+ xd->transmit_ring = transmit_ring;
+ xd->receive_path = receive_path;
+ xd->receive_ring = receive_ring;
+
+ ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
+
+exit_unlock:
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
+
+/**
+ * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ *
+ * This does the opposite of tb_xdomain_enable_paths(). After call to
+ * this the caller is not expected to use the rings anymore.
+ *
+ * Return: %0 in case of success and negative errno in case of error
+ */
+int tb_xdomain_disable_paths(struct tb_xdomain *xd)
+{
+ int ret = 0;
+
+ mutex_lock(&xd->lock);
+ if (xd->transmit_path) {
+ xd->transmit_path = 0;
+ xd->transmit_ring = 0;
+ xd->receive_path = 0;
+ xd->receive_ring = 0;
+
+ ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
+ }
+ mutex_unlock(&xd->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
+
+struct tb_xdomain_lookup {
+ const uuid_t *uuid;
+ u8 link;
+ u8 depth;
+ u64 route;
+};
+
+static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
+ const struct tb_xdomain_lookup *lookup)
+{
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ struct tb_xdomain *xd;
+
+ if (port->xdomain) {
+ xd = port->xdomain;
+
+ if (lookup->uuid) {
+ if (xd->remote_uuid &&
+ uuid_equal(xd->remote_uuid, lookup->uuid))
+ return xd;
+ } else if (lookup->link &&
+ lookup->link == xd->link &&
+ lookup->depth == xd->depth) {
+ return xd;
+ } else if (lookup->route &&
+ lookup->route == xd->route) {
+ return xd;
+ }
+ } else if (tb_port_has_remote(port)) {
+ xd = switch_find_xdomain(port->remote->sw, lookup);
+ if (xd)
+ return xd;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
+ * @tb: Domain where the XDomain belongs to
+ * @uuid: UUID to look for
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.uuid = uuid;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ return tb_xdomain_get(xd);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
+
+/**
+ * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
+ * @tb: Domain where the XDomain belongs to
+ * @link: Root switch link number
+ * @depth: Depth in the link
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
+ u8 depth)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.link = link;
+ lookup.depth = depth;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ return tb_xdomain_get(xd);
+}
+
+/**
+ * tb_xdomain_find_by_route() - Find an XDomain by route string
+ * @tb: Domain where the XDomain belongs to
+ * @route: XDomain route string
+ *
+ * Finds XDomain by walking through the Thunderbolt topology below @tb.
+ * The returned XDomain will have its reference count increased so the
+ * caller needs to call tb_xdomain_put() when it is done with the
+ * object.
+ *
+ * This will find all XDomains including the ones that are not yet added
+ * to the bus (handshake is still in progress).
+ *
+ * The caller needs to hold @tb->lock.
+ */
+struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
+{
+ struct tb_xdomain_lookup lookup;
+ struct tb_xdomain *xd;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.route = route;
+
+ xd = switch_find_xdomain(tb->root_switch, &lookup);
+ return tb_xdomain_get(xd);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
+
+bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
+ const void *buf, size_t size)
+{
+ const struct tb_protocol_handler *handler, *tmp;
+ const struct tb_xdp_header *hdr = buf;
+ unsigned int length;
+ int ret = 0;
+
+ /* We expect the packet is at least size of the header */
+ length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
+ if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
+ return true;
+ if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
+ return true;
+
+ /*
+ * Handle XDomain discovery protocol packets directly here. For
+ * other protocols (based on their UUID) we call registered
+ * handlers in turn.
+ */
+ if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
+ if (type == TB_CFG_PKG_XDOMAIN_REQ)
+ return tb_xdp_schedule_request(tb, hdr, size);
+ return false;
+ }
+
+ mutex_lock(&xdomain_lock);
+ list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
+ if (!uuid_equal(&hdr->uuid, handler->uuid))
+ continue;
+
+ mutex_unlock(&xdomain_lock);
+ ret = handler->callback(buf, size, handler->data);
+ mutex_lock(&xdomain_lock);
+
+ if (ret)
+ break;
+ }
+ mutex_unlock(&xdomain_lock);
+
+ return ret > 0;
+}
+
+static int update_xdomain(struct device *dev, void *data)
+{
+ struct tb_xdomain *xd;
+
+ xd = tb_to_xdomain(dev);
+ if (xd) {
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(50));
+ }
+
+ return 0;
+}
+
+static void update_all_xdomains(void)
+{
+ bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
+}
+
+static bool remove_directory(const char *key, const struct tb_property_dir *dir)
+{
+ struct tb_property *p;
+
+ p = tb_property_find(xdomain_property_dir, key,
+ TB_PROPERTY_TYPE_DIRECTORY);
+ if (p && p->value.dir == dir) {
+ tb_property_remove(p);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tb_register_property_dir() - Register property directory to the host
+ * @key: Key (name) of the directory to add
+ * @dir: Directory to add
+ *
+ * Service drivers can use this function to add new property directory
+ * to the host available properties. The other connected hosts are
+ * notified so they can re-read properties of this host if they are
+ * interested.
+ *
+ * Return: %0 on success and negative errno on failure
+ */
+int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
+{
+ int ret;
+
+ if (WARN_ON(!xdomain_property_dir))
+ return -EAGAIN;
+
+ if (!key || strlen(key) > 8)
+ return -EINVAL;
+
+ mutex_lock(&xdomain_lock);
+ if (tb_property_find(xdomain_property_dir, key,
+ TB_PROPERTY_TYPE_DIRECTORY)) {
+ ret = -EEXIST;
+ goto err_unlock;
+ }
+
+ ret = tb_property_add_dir(xdomain_property_dir, key, dir);
+ if (ret)
+ goto err_unlock;
+
+ ret = rebuild_property_block();
+ if (ret) {
+ remove_directory(key, dir);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&xdomain_lock);
+ update_all_xdomains();
+ return 0;
+
+err_unlock:
+ mutex_unlock(&xdomain_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tb_register_property_dir);
+
+/**
+ * tb_unregister_property_dir() - Removes property directory from host
+ * @key: Key (name) of the directory
+ * @dir: Directory to remove
+ *
+ * This will remove the existing directory from this host and notify the
+ * connected hosts about the change.
+ */
+void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
+{
+ int ret = 0;
+
+ mutex_lock(&xdomain_lock);
+ if (remove_directory(key, dir))
+ ret = rebuild_property_block();
+ mutex_unlock(&xdomain_lock);
+
+ if (!ret)
+ update_all_xdomains();
+}
+EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
+
+int tb_xdomain_init(void)
+{
+ xdomain_property_dir = tb_property_create_dir(NULL);
+ if (!xdomain_property_dir)
+ return -ENOMEM;
+
+ /*
+ * Initialize standard set of properties without any service
+ * directories. Those will be added by service drivers
+ * themselves when they are loaded.
+ *
+ * We also add node name later when first connection is made.
+ */
+ tb_property_add_immediate(xdomain_property_dir, "vendorid",
+ PCI_VENDOR_ID_INTEL);
+ tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
+ tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
+ tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
+
+ return 0;
+}
+
+void tb_xdomain_exit(void)
+{
+ kfree(xdomain_property_block);
+ tb_property_free_dir(xdomain_property_dir);
+}