summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/tb.c')
-rw-r--r--drivers/thunderbolt/tb.c813
1 files changed, 493 insertions, 320 deletions
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 525f515e8b..3e44c78ac4 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -17,6 +17,7 @@
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
+#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
@@ -75,112 +76,6 @@ struct tb_hotplug_event {
bool unplug;
};
-static void tb_init_bandwidth_groups(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- group->tb = tcm_to_tb(tcm);
- group->index = i + 1;
- INIT_LIST_HEAD(&group->ports);
- }
-}
-
-static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
- struct tb_port *in)
-{
- if (!group || WARN_ON(in->group))
- return;
-
- in->group = group;
- list_add_tail(&in->group_list, &group->ports);
-
- tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
-}
-
-static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (list_empty(&group->ports))
- return group;
- }
-
- return NULL;
-}
-
-static struct tb_bandwidth_group *
-tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- struct tb_bandwidth_group *group;
- struct tb_tunnel *tunnel;
-
- /*
- * Find all DP tunnels that go through all the same USB4 links
- * as this one. Because we always setup tunnels the same way we
- * can just check for the routers at both ends of the tunnels
- * and if they are the same we have a match.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (!tb_tunnel_is_dp(tunnel))
- continue;
-
- if (tunnel->src_port->sw == in->sw &&
- tunnel->dst_port->sw == out->sw) {
- group = tunnel->src_port->group;
- if (group) {
- tb_bandwidth_group_attach_port(group, in);
- return group;
- }
- }
- }
-
- /* Pick up next available group then */
- group = tb_find_free_bandwidth_group(tcm);
- if (group)
- tb_bandwidth_group_attach_port(group, in);
- else
- tb_port_warn(in, "no available bandwidth groups\n");
-
- return group;
-}
-
-static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- if (usb4_dp_port_bandwidth_mode_enabled(in)) {
- int index, i;
-
- index = usb4_dp_port_group_id(in);
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- if (tcm->groups[i].index == index) {
- tb_bandwidth_group_attach_port(&tcm->groups[i], in);
- return;
- }
- }
- }
-
- tb_attach_bandwidth_group(tcm, in, out);
-}
-
-static void tb_detach_bandwidth_group(struct tb_port *in)
-{
- struct tb_bandwidth_group *group = in->group;
-
- if (group) {
- in->group = NULL;
- list_del_init(&in->group_list);
-
- tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
- }
-}
-
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -472,34 +367,6 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
}
}
-static void tb_discover_tunnels(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_tunnel *tunnel;
-
- tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
-
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_pci(tunnel)) {
- struct tb_switch *parent = tunnel->dst_port->sw;
-
- while (parent != tunnel->src_port->sw) {
- parent->boot = true;
- parent = tb_switch_parent(parent);
- }
- } else if (tb_tunnel_is_dp(tunnel)) {
- struct tb_port *in = tunnel->src_port;
- struct tb_port *out = tunnel->dst_port;
-
- /* Keep the domain from powering down */
- pm_runtime_get_sync(&in->sw->dev);
- pm_runtime_get_sync(&out->sw->dev);
-
- tb_discover_bandwidth_group(tcm, in, out);
- }
- }
-}
-
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
@@ -681,6 +548,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* Calculates consumed DP bandwidth at @port between path from @src_port
* to @dst_port. Does not take tunnel starting from @src_port and ending
* from @src_port into account.
+ *
+ * If there is bandwidth reserved for any of the groups between
+ * @src_port and @dst_port (but not yet used) that is also taken into
+ * account in the returned consumed bandwidth.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -689,9 +560,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
int *consumed_up,
int *consumed_down)
{
+ int group_reserved[MAX_GROUPS] = {};
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
- int ret;
+ bool downstream;
+ int i, ret;
*consumed_up = *consumed_down = 0;
@@ -700,6 +573,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ const struct tb_bandwidth_group *group;
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
@@ -712,6 +586,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
continue;
/*
+ * Calculate what is reserved for groups crossing the
+ * same ports only once (as that is reserved for all the
+ * tunnels in the group).
+ */
+ group = tunnel->src_port->group;
+ if (group && group->reserved && !group_reserved[group->index])
+ group_reserved[group->index] = group->reserved;
+
+ /*
* Ignore the DP tunnel between src_port and dst_port
* because it is the same tunnel and we may be
* re-calculating estimated bandwidth.
@@ -729,6 +612,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
*consumed_down += dp_consumed_down;
}
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
+ if (downstream)
+ *consumed_down += group_reserved[i];
+ else
+ *consumed_up += group_reserved[i];
+ }
+
return 0;
}
@@ -1181,8 +1072,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @tb: Domain structure
* @src_port: Source adapter to start the transition
* @dst_port: Destination adapter
- * @requested_up: New lower bandwidth request upstream (Mb/s)
- * @requested_down: New lower bandwidth request downstream (Mb/s)
* @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
@@ -1190,8 +1079,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int requested_up,
- int requested_down, bool keep_asym)
+ struct tb_port *dst_port, bool keep_asym)
{
bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
@@ -1230,10 +1118,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
* guard band 10%) as the link was configured asymmetric
* already.
*/
- if (consumed_down + requested_down >= asym_threshold)
+ if (consumed_down >= asym_threshold)
continue;
} else {
- if (consumed_up + requested_up >= asym_threshold)
+ if (consumed_up >= asym_threshold)
continue;
}
@@ -1306,7 +1194,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0, false);
+ tb_configure_sym(tb, host_port, up, false);
}
/* Set the link configured */
@@ -1464,6 +1352,297 @@ out_rpm_put:
}
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down, true);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ if (tb_tunnel_direction_downstream(tunnel))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ /*
+ * If there is reserved bandwidth for the group that is
+ * not yet released we report that too.
+ */
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
+ estimated_bw, group->reserved,
+ estimated_bw + group->reserved);
+
+ if (usb4_dp_port_set_estimated_bandwidth(in,
+ estimated_bw + group->reserved))
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
+static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
+{
+ if (group->reserved) {
+ tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
+ group->reserved);
+ group->reserved = 0;
+ return true;
+ }
+ return false;
+}
+
+static void __configure_group_sym(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (list_empty(&group->ports))
+ return;
+
+ /*
+ * All the tunnels in the group go through the same USB4 links
+ * so we find the first one here and pass the IN and OUT
+ * adapters to tb_configure_sym() which now transitions the
+ * links back to symmetric if bandwidth requirement < asym_threshold.
+ *
+ * We do this here to avoid unnecessary transitions (for example
+ * if the graphics released bandwidth for other tunnel in the
+ * same group).
+ */
+ in = list_first_entry(&group->ports, struct tb_port, group_list);
+ tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
+ if (tunnel)
+ tb_configure_sym(group->tb, in, tunnel->dst_port, true);
+}
+
+static void tb_bandwidth_group_release_work(struct work_struct *work)
+{
+ struct tb_bandwidth_group *group =
+ container_of(work, typeof(*group), release_work.work);
+ struct tb *tb = group->tb;
+
+ mutex_lock(&tb->lock);
+ if (__release_group_bandwidth(group))
+ tb_recalc_estimated_bandwidth(tb);
+ __configure_group_sym(group);
+ mutex_unlock(&tb->lock);
+}
+
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ INIT_DELAYED_WORK(&group->release_work,
+ tb_bandwidth_group_release_work);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+
+ /* No more tunnels so release the reserved bandwidth if any */
+ if (list_empty(&group->ports)) {
+ cancel_delayed_work(&group->release_work);
+ __release_group_bandwidth(group);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
+ }
+ }
+}
+
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
@@ -1491,7 +1670,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
+ tb_configure_sym(tb, src_port, dst_port, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1605,101 +1784,6 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static void
-tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
-{
- struct tb_tunnel *first_tunnel;
- struct tb *tb = group->tb;
- struct tb_port *in;
- int ret;
-
- tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
- group->index);
-
- first_tunnel = NULL;
- list_for_each_entry(in, &group->ports, group_list) {
- int estimated_bw, estimated_up, estimated_down;
- struct tb_tunnel *tunnel;
- struct tb_port *out;
-
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
- continue;
-
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (WARN_ON(!tunnel))
- break;
-
- if (!first_tunnel) {
- /*
- * Since USB3 bandwidth is shared by all DP
- * tunnels under the host router USB4 port, even
- * if they do not begin from the host router, we
- * can release USB3 bandwidth just once and not
- * for each tunnel separately.
- */
- first_tunnel = tunnel;
- ret = tb_release_unused_usb3_bandwidth(tb,
- first_tunnel->src_port, first_tunnel->dst_port);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to release unused bandwidth\n");
- break;
- }
- }
-
- out = tunnel->dst_port;
- ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down, true);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to re-calculate estimated bandwidth\n");
- break;
- }
-
- /*
- * Estimated bandwidth includes:
- * - already allocated bandwidth for the DP tunnel
- * - available bandwidth along the path
- * - bandwidth allocated for USB 3.x but not used.
- */
- tb_tunnel_dbg(tunnel,
- "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
-
- if (tb_port_path_direction_downstream(in, out))
- estimated_bw = estimated_down;
- else
- estimated_bw = estimated_up;
-
- if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_tunnel_warn(tunnel,
- "failed to update estimated bandwidth\n");
- }
-
- if (first_tunnel)
- tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
- first_tunnel->dst_port);
-
- tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
-}
-
-static void tb_recalc_estimated_bandwidth(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- int i;
-
- tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (!list_empty(&group->ports))
- tb_recalc_estimated_bandwidth_for_group(group);
- }
-
- tb_dbg(tb, "bandwidth re-calculation done\n");
-}
-
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -1743,49 +1827,15 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- struct tb_port *port, *in, *out;
int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
- * Find pair of inactive DP IN and DP OUT adapters and then
- * establish a DP tunnel between them.
- */
- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
-
- in = NULL;
- out = NULL;
- list_for_each_entry(port, &tcm->dp_resources, list) {
- if (!tb_port_is_dpin(port))
- continue;
-
- if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "DP IN in use\n");
- continue;
- }
-
- in = port;
- tb_port_dbg(in, "DP IN available\n");
-
- out = tb_find_dp_out(tb, port);
- if (out)
- break;
- }
-
- if (!in) {
- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return false;
- }
- if (!out) {
- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return false;
- }
-
- /*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
@@ -1845,15 +1895,19 @@ static bool tb_tunnel_one_dp(struct tb *tb)
goto err_free;
}
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
+ if (ret)
+ goto err_deactivate;
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_reclaim_usb3_bandwidth(tb, in, out);
/*
* Transition the links to asymmetric if the consumption exceeds
* the threshold.
*/
- if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1865,6 +1919,8 @@ static bool tb_tunnel_one_dp(struct tb *tb)
tb_increase_tmu_accuracy(tunnel);
return true;
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
err_reclaim_usb:
@@ -1884,13 +1940,43 @@ err_rpm_put:
static void tb_tunnel_dp(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
- while (tb_tunnel_one_dp(tb))
- ;
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP IN in use\n");
+ continue;
+ }
+
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out)
+ tb_tunnel_one_dp(tb, in, out);
+ else
+ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
+ }
+
+ if (!in)
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
}
static void tb_enter_redrive(struct tb_port *port)
@@ -2296,8 +2382,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb_bandwidth_group *group;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
+ bool downstream;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
@@ -2323,11 +2411,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
- return ret;
+ goto fail;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
- return ret;
+ goto fail;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
@@ -2357,24 +2445,48 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
"bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto fail;
}
+ downstream = tb_tunnel_direction_downstream(tunnel);
+ group = in->group;
+
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
- /*
- * If bandwidth on a link is < asym_threshold transition
- * the link to symmetric.
- */
- tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
- /*
- * If requested bandwidth is less or equal than what is
- * currently allocated to that tunnel we simply change
- * the reservation of the tunnel. Since all the tunnels
- * going out from the same USB4 port are in the same
- * group the released bandwidth will be taken into
- * account for the other tunnels automatically below.
- */
+ if (tunnel->bw_mode) {
+ int reserved;
+ /*
+ * If requested bandwidth is less or equal than
+ * what is currently allocated to that tunnel we
+ * simply change the reservation of the tunnel
+ * and add the released bandwidth for the group
+ * for the next 10s. Then we release it for
+ * others to use.
+ */
+ if (downstream)
+ reserved = allocated_down - *requested_down;
+ else
+ reserved = allocated_up - *requested_up;
+
+ if (reserved > 0) {
+ group->reserved += reserved;
+ tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
+ group->index, reserved, group->reserved);
+
+ /*
+ * If it was not already pending,
+ * schedule release now. If it is then
+ * postpone it for the next 10s (unless
+ * it is already running in which case
+ * the 10s already expired and we should
+ * give the reserved back to others).
+ */
+ mod_delayed_work(system_wq, &group->release_work,
+ msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
+ }
+ }
+
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
@@ -2385,7 +2497,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
- return ret;
+ goto fail;
/*
* Then go over all tunnels that cross the same USB4 ports (they
@@ -2397,11 +2509,15 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if (ret)
goto reclaim;
- tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
+ available_up, available_down, group->reserved);
+
+ if ((*requested_up >= 0 &&
+ available_up + group->reserved >= requested_up_corrected) ||
+ (*requested_down >= 0 &&
+ available_down + group->reserved >= requested_down_corrected)) {
+ int released = 0;
- if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
- (*requested_down >= 0 && available_down >= requested_down_corrected)) {
/*
* If bandwidth on a link is >= asym_threshold
* transition the link to asymmetric.
@@ -2409,15 +2525,28 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0, true);
- return ret;
+ tb_configure_sym(tb, in, out, true);
+ goto fail;
}
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0, true);
+ tb_configure_sym(tb, in, out, true);
+ }
+
+ if (downstream) {
+ if (*requested_down > available_down)
+ released = *requested_down - available_down;
+ } else {
+ if (*requested_up > available_up)
+ released = *requested_up - available_up;
+ }
+ if (released) {
+ group->reserved -= released;
+ tb_dbg(tb, "group %d released %d total %d Mb/s\n",
+ group->index, released, group->reserved);
}
} else {
ret = -ENOBUFS;
@@ -2425,6 +2554,18 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
+fail:
+ if (ret && ret != -ENODEV) {
+ /*
+ * Write back the same allocated (so no change), this
+ * makes the DPTX request fail on graphics side.
+ */
+ tb_tunnel_dbg(tunnel,
+ "failing the request by rewriting allocated %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+ tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ }
+
return ret;
}
@@ -2432,11 +2573,11 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
- struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
+ struct tb_port *in;
pm_runtime_get_sync(&tb->dev);
@@ -2459,32 +2600,48 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto put_sw;
+ }
+
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
- tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ if (tunnel->bw_mode) {
+ /*
+ * Reset the tunnel back to use the legacy
+ * allocation.
+ */
+ tunnel->bw_mode = false;
+ tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
+ } else {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ }
goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
- if (ret == -ENODATA)
- tb_port_dbg(in, "no bandwidth request active\n");
- else
+ if (ret == -ENODATA) {
+ /*
+ * There is no request active so this means the
+ * BW allocation mode was enabled from graphics
+ * side. At this point we know that the graphics
+ * driver has read the DRPX capabilities so we
+ * can offer an better bandwidth estimatation.
+ */
+ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
+ tb_recalc_estimated_bandwidth(tb);
+ } else {
tb_port_warn(in, "failed to read requested bandwidth\n");
+ }
goto put_sw;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (!tunnel) {
- tb_port_warn(in, "failed to find tunnel\n");
- goto put_sw;
- }
-
- out = tunnel->dst_port;
-
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -2613,6 +2770,16 @@ static void tb_stop(struct tb *tb)
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static void tb_deinit(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ /* Cancel all the release bandwidth workers */
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
+ cancel_delayed_work_sync(&tcm->groups[i].release_work);
+}
+
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
@@ -2637,6 +2804,7 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
+ bool discover = true;
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
@@ -2682,9 +2850,13 @@ static int tb_start(struct tb *tb, bool reset)
* reset the ports to handle it as new hotplug for USB4 v1
* routers (for USB4 v2 and beyond we already do host reset).
*/
- if (reset && usb4_switch_version(tb->root_switch) == 1) {
- tb_switch_reset(tb->root_switch);
- } else {
+ if (reset && tb_switch_is_usb4(tb->root_switch)) {
+ discover = false;
+ if (usb4_switch_version(tb->root_switch) == 1)
+ tb_switch_reset(tb->root_switch);
+ }
+
+ if (discover) {
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
@@ -2916,6 +3088,7 @@ static int tb_runtime_resume(struct tb *tb)
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
+ .deinit = tb_deinit,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,