summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tunnel.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/tunnel.c')
-rw-r--r--drivers/thunderbolt/tunnel.c263
1 files changed, 165 insertions, 98 deletions
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index a6810fb368..7534cd3a81 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -21,12 +21,18 @@
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
+#define TB_PCI_PRIORITY 3
+#define TB_PCI_WEIGHT 1
+
/* USB3 adapters use always HopID of 8 for both directions */
#define TB_USB3_HOPID 8
#define TB_USB3_PATH_DOWN 0
#define TB_USB3_PATH_UP 1
+#define TB_USB3_PRIORITY 3
+#define TB_USB3_WEIGHT 2
+
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
@@ -36,6 +42,12 @@
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
+#define TB_DP_VIDEO_PRIORITY 1
+#define TB_DP_VIDEO_WEIGHT 1
+
+#define TB_DP_AUX_PRIORITY 2
+#define TB_DP_AUX_WEIGHT 1
+
/* Minimum number of credits needed for PCIe path */
#define TB_MIN_PCIE_CREDITS 6U
/*
@@ -46,6 +58,18 @@
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1
+#define TB_DMA_PRIORITY 5
+#define TB_DMA_WEIGHT 1
+
+/*
+ * Reserve additional bandwidth for USB 3.x and PCIe bulk traffic
+ * according to USB4 v2 Connection Manager guide. This ends up reserving
+ * 1500 Mb/s for PCIe and 3000 Mb/s for USB 3.x taking weights into
+ * account.
+ */
+#define USB4_V2_PCI_MIN_BANDWIDTH (1500 * TB_PCI_WEIGHT)
+#define USB4_V2_USB3_MIN_BANDWIDTH (1500 * TB_USB3_WEIGHT)
+
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
@@ -58,27 +82,6 @@ MODULE_PARM_DESC(bw_alloc_mode,
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
-#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
- do { \
- struct tb_tunnel *__tunnel = (tunnel); \
- level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
- tb_route(__tunnel->src_port->sw), \
- __tunnel->src_port->port, \
- tb_route(__tunnel->dst_port->sw), \
- __tunnel->dst_port->port, \
- tb_tunnel_names[__tunnel->type], \
- ## arg); \
- } while (0)
-
-#define tb_tunnel_WARN(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
-#define tb_tunnel_warn(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
-#define tb_tunnel_info(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
-#define tb_tunnel_dbg(tunnel, fmt, arg...) \
- __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
-
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
@@ -131,6 +134,16 @@ static unsigned int tb_available_credits(const struct tb_port *port,
return credits > 0 ? credits : 0;
}
+static void tb_init_pm_support(struct tb_path_hop *hop)
+{
+ struct tb_port *out_port = hop->out_port;
+ struct tb_port *in_port = hop->in_port;
+
+ if (tb_port_is_null(in_port) && tb_port_is_null(out_port) &&
+ usb4_switch_version(in_port->sw) >= 2)
+ hop->pm_support = true;
+}
+
static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
enum tb_tunnel_type type)
{
@@ -156,11 +169,11 @@ static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
+ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
int ret;
/* Only supported of both routers are at least USB4 v2 */
- if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
- usb4_switch_version(tunnel->dst_port->sw) < 2)
+ if (tb_port_get_link_generation(port) < 4)
return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
@@ -234,8 +247,8 @@ static int tb_pci_init_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 1;
+ path->priority = TB_PCI_PRIORITY;
+ path->weight = TB_PCI_WEIGHT;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop) {
@@ -376,6 +389,51 @@ err_free:
return NULL;
}
+/**
+ * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
+ * @port: Lane 0 adapter
+ * @reserved_up: Upstream bandwidth in Mb/s to reserve
+ * @reserved_down: Downstream bandwidth in Mb/s to reserve
+ *
+ * Can be called to any connected lane 0 adapter to find out how much
+ * bandwidth needs to be left in reserve for possible PCIe bulk traffic.
+ * Returns true if there is something to be reserved and writes the
+ * amount to @reserved_down/@reserved_up. Otherwise returns false and
+ * does not touch the parameters.
+ */
+bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
+ int *reserved_down)
+{
+ if (WARN_ON_ONCE(!port->remote))
+ return false;
+
+ if (!tb_acpi_may_tunnel_pcie())
+ return false;
+
+ if (tb_port_get_link_generation(port) < 4)
+ return false;
+
+ /* Must have PCIe adapters */
+ if (tb_is_upstream_port(port)) {
+ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP))
+ return false;
+ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN))
+ return false;
+ } else {
+ if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN))
+ return false;
+ if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP))
+ return false;
+ }
+
+ *reserved_up = USB4_V2_PCI_MIN_BANDWIDTH;
+ *reserved_down = USB4_V2_PCI_MIN_BANDWIDTH;
+
+ tb_port_dbg(port, "reserving %u/%u Mb/s for PCIe\n", *reserved_up,
+ *reserved_down);
+ return true;
+}
+
static bool tb_dp_is_usb4(const struct tb_switch *sw)
{
/* Titan Ridge DP adapters need the same treatment as USB4 */
@@ -614,8 +672,9 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in_rate = tb_dp_cap_get_rate(in_dp_cap);
in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
/*
* If the tunnel bandwidth is limited (max_bw is set) then see
@@ -624,10 +683,11 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
out_rate = tb_dp_cap_get_rate(out_dp_cap);
out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
bw = tb_dp_bandwidth(out_rate, out_lanes);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- out_rate, out_lanes, bw);
+ tb_tunnel_dbg(tunnel,
+ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ out_rate, out_lanes, bw);
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -639,13 +699,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
out_rate, out_lanes, &new_rate,
&new_lanes);
if (ret) {
- tb_port_info(out, "not enough bandwidth for DP tunnel\n");
+ tb_tunnel_info(tunnel, "not enough bandwidth\n");
return ret;
}
new_bw = tb_dp_bandwidth(new_rate, new_lanes);
- tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
- new_rate, new_lanes, new_bw);
+ tb_tunnel_dbg(tunnel,
+ "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
+ new_rate, new_lanes, new_bw);
/*
* Set new rate and number of lanes before writing it to
@@ -662,7 +723,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
*/
if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
- tb_port_dbg(out, "disabling LTTPR\n");
+ tb_tunnel_dbg(tunnel, "disabling LTTPR\n");
}
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
@@ -712,8 +773,8 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
lanes = min(in_lanes, out_lanes);
tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
- lanes, tmp);
+ tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
ret = usb4_dp_port_set_nrd(in, rate, lanes);
if (ret)
@@ -728,15 +789,15 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
rate = min(in_rate, out_rate);
tmp = tb_dp_bandwidth(rate, lanes);
- tb_port_dbg(in,
- "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tmp);
+ tb_tunnel_dbg(tunnel,
+ "maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tmp);
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
- tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
+ tb_tunnel_dbg(tunnel, "granularity %d Mb/s\n", granularity);
/*
* Returns -EINVAL if granularity above is outside of the
@@ -751,12 +812,12 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (in->sw->config.depth < out->sw->config.depth)
+ if (tb_port_path_direction_downstream(in, out))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
- tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
+ tb_tunnel_dbg(tunnel, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
if (ret)
@@ -767,7 +828,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
if (ret)
return ret;
- tb_port_dbg(in, "bandwidth allocation mode enabled\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode enabled\n");
return 0;
}
@@ -788,7 +849,7 @@ static int tb_dp_init(struct tb_tunnel *tunnel)
if (!usb4_dp_port_bandwidth_mode_supported(in))
return 0;
- tb_port_dbg(in, "bandwidth allocation mode supported\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode supported\n");
ret = usb4_dp_port_set_cm_id(in, tb->index);
if (ret)
@@ -805,7 +866,7 @@ static void tb_dp_deinit(struct tb_tunnel *tunnel)
return;
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
- tb_port_dbg(in, "bandwidth allocation mode disabled\n");
+ tb_tunnel_dbg(tunnel, "bandwidth allocation mode disabled\n");
}
}
@@ -921,10 +982,7 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
if (allocated_bw == max_bw)
allocated_bw = ret;
- tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
- allocated_bw);
-
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -959,7 +1017,7 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
if (allocated_bw == max_bw)
allocated_bw = ret;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -987,7 +1045,7 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
if (ret < 0)
return ret;
- if (in->sw->config.depth < out->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, out)) {
tmp = min(*alloc_down, max_bw);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
@@ -1006,9 +1064,6 @@ static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
-
- tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
- tmp);
return 0;
}
@@ -1035,8 +1090,7 @@ static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
- tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
- tb_dp_bandwidth(*rate, *lanes));
+ tb_tunnel_dbg(tunnel, "DPRX read done\n");
return 0;
}
usleep_range(100, 150);
@@ -1073,9 +1127,6 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
-
- tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
- tb_dp_bandwidth(*rate, *lanes));
return 0;
}
@@ -1092,7 +1143,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
if (ret < 0)
return ret;
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1150,7 +1201,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
@@ -1172,7 +1223,7 @@ static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
hop->initial_credits = 1;
}
-static void tb_dp_init_aux_path(struct tb_path *path)
+static void tb_dp_init_aux_path(struct tb_path *path, bool pm_support)
{
struct tb_path_hop *hop;
@@ -1180,11 +1231,14 @@ static void tb_dp_init_aux_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 2;
- path->weight = 1;
+ path->priority = TB_DP_AUX_PRIORITY;
+ path->weight = TB_DP_AUX_WEIGHT;
- tb_path_for_each_hop(path, hop)
+ tb_path_for_each_hop(path, hop) {
tb_dp_init_aux_credits(hop);
+ if (pm_support)
+ tb_init_pm_support(hop);
+ }
}
static int tb_dp_init_video_credits(struct tb_path_hop *hop)
@@ -1216,7 +1270,7 @@ static int tb_dp_init_video_credits(struct tb_path_hop *hop)
return 0;
}
-static int tb_dp_init_video_path(struct tb_path *path)
+static int tb_dp_init_video_path(struct tb_path *path, bool pm_support)
{
struct tb_path_hop *hop;
@@ -1224,8 +1278,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 1;
- path->weight = 1;
+ path->priority = TB_DP_VIDEO_PRIORITY;
+ path->weight = TB_DP_VIDEO_WEIGHT;
tb_path_for_each_hop(path, hop) {
int ret;
@@ -1233,6 +1287,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
ret = tb_dp_init_video_credits(hop);
if (ret)
return ret;
+ if (pm_support)
+ tb_init_pm_support(hop);
}
return 0;
@@ -1253,8 +1309,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP IN maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
out = tunnel->dst_port;
@@ -1265,8 +1322,9 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel,
+ "DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1))
@@ -1275,8 +1333,8 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
- tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
- rate, lanes, tb_dp_bandwidth(rate, lanes));
+ tb_tunnel_dbg(tunnel, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
+ rate, lanes, tb_dp_bandwidth(rate, lanes));
}
/**
@@ -1322,7 +1380,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
goto err_free;
}
tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
- if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
+ if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false))
goto err_free;
path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
@@ -1330,14 +1388,14 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
- tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
&port, "AUX RX", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path;
- tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
+ tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false);
/* Validate that the tunnel is complete */
if (!tb_port_is_dpout(tunnel->dst_port)) {
@@ -1392,6 +1450,7 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_tunnel *tunnel;
struct tb_path **paths;
struct tb_path *path;
+ bool pm_support;
if (WARN_ON(!in->cap_adap || !out->cap_adap))
return NULL;
@@ -1413,26 +1472,27 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
tunnel->max_down = max_down;
paths = tunnel->paths;
+ pm_support = usb4_switch_version(in->sw) >= 2;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
link_nr, "Video");
if (!path)
goto err_free;
- tb_dp_init_video_path(path);
+ tb_dp_init_video_path(path, pm_support);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
- tb_dp_init_aux_path(path);
+ tb_dp_init_aux_path(path, pm_support);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
- tb_dp_init_aux_path(path);
+ tb_dp_init_aux_path(path, pm_support);
paths[TB_DP_AUX_PATH_IN] = path;
return tunnel;
@@ -1497,8 +1557,8 @@ static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 5;
- path->weight = 1;
+ path->priority = TB_DMA_PRIORITY;
+ path->weight = TB_DMA_WEIGHT;
path->clear_fc = true;
/*
@@ -1531,8 +1591,8 @@ static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 5;
- path->weight = 1;
+ path->priority = TB_DMA_PRIORITY;
+ path->weight = TB_DMA_WEIGHT;
path->clear_fc = true;
tb_path_for_each_hop(path, hop) {
@@ -1758,14 +1818,23 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
{
- int pcie_enabled = tb_acpi_may_tunnel_pcie();
+ struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw);
+ int pcie_weight = tb_acpi_may_tunnel_pcie() ? TB_PCI_WEIGHT : 0;
/*
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
* take that it into account here.
*/
- *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
- *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
+ *consumed_up = tunnel->allocated_up *
+ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
+ *consumed_down = tunnel->allocated_down *
+ (TB_USB3_WEIGHT + pcie_weight) / TB_USB3_WEIGHT;
+
+ if (tb_port_get_link_generation(port) >= 4) {
+ *consumed_up = max(*consumed_up, USB4_V2_USB3_MIN_BANDWIDTH);
+ *consumed_down = max(*consumed_down, USB4_V2_USB3_MIN_BANDWIDTH);
+ }
+
return 0;
}
@@ -1790,17 +1859,10 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
{
int ret, max_rate, allocate_up, allocate_down;
- ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
+ ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
if (ret < 0) {
- tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
+ tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
return;
- } else if (!ret) {
- /* Use maximum link rate if the link valid is not set */
- ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
- if (ret < 0) {
- tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
- return;
- }
}
/*
@@ -1871,8 +1933,8 @@ static void tb_usb3_init_path(struct tb_path *path)
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
- path->priority = 3;
- path->weight = 3;
+ path->priority = TB_USB3_PRIORITY;
+ path->weight = TB_USB3_WEIGHT;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop)
@@ -2387,3 +2449,8 @@ void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
tunnel->reclaim_available_bandwidth(tunnel, available_up,
available_down);
}
+
+const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel)
+{
+ return tb_tunnel_names[tunnel->type];
+}