thunderbolt: Changes for v6.3 merge window

This includes following Thunderbolt/USB4 changes for the v6.3 merge
 window:
 
   - Add support for DisplayPort bandwidth allocation mode
   - Debug logging improvements
   - Minor cleanups.
 
 All these have been in linux-next with no reported issues.
 -----BEGIN PGP SIGNATURE-----
 
 iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmPjS/4gHG1pa2Eud2Vz
 dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKD7xhAAiye8RAaV2KkV
 u5ZxF14fY4mIholmBeq5da7wgERcsQlsbKJRBlNHwJAEWzJRYU1ujPERLYOob0zV
 rx/yopbg8x7VtiHHnnQ7FooZOL2oCOTsSMgQldmcHG7U3oVc/vNVB/YDlGresE5F
 TvllRtvCOQWanUfv021G7nVPHdubospkjaS6inK3l/wp6RCpO7Kdyg0JOkeYiRLb
 MkuO9Eb7E/Ppg8Zq1d8eOtTddKqb9v7AmsejyuBWZcJfuTHBIOM017T06URYtUJz
 VN5zS3mM3LVSVxtHotlPHsuQtGBXzSZxKZx46BsrEZ3EWquDeE0F7s8L9FhRkB5y
 K2H7B8k/TKVUGiHSBmYa0u0hX1dRY5EOvgIngIXy+LWAEGjdpAmKgyokgDaP4W0c
 Ms/bm4FhakNX8/UkgBcy/1TWW4yk7dlvdZzBI8xTdzgcFuOxiOf2OnAy1nnkCQG2
 g8p0c8JasIXPgYtZ+mWMKfdKeUB+GzKD/h18M3rLxcEP+i+pVUXr27EeYzR6yLZ5
 7oVxbJK0pZbPitaYBIKZV39MW47i6JWyl4ubkccp2R1rLqaCCqaESlcc2Qb67Z3i
 aBsUdviH70jIoi27CBhOnMb0vELlr/ZUNe4znMjW/kPoZOquR4EilBmrV254LOOK
 VJcEaXVnV59e7hPzbdT+FXH96UBRf/Y=
 =Kegw
 -----END PGP SIGNATURE-----

Merge tag 'thunderbolt-for-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next

Mika writes:

thunderbolt: Changes for v6.3 merge window

This includes following Thunderbolt/USB4 changes for the v6.3 merge
window:

  - Add support for DisplayPort bandwidth allocation mode
  - Debug logging improvements
  - Minor cleanups.

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
  thunderbolt: Add missing kernel-doc comment to tb_tunnel_maximum_bandwidth()
  thunderbolt: Handle bandwidth allocation mode enablement notification
  thunderbolt: Add support for DisplayPort bandwidth allocation mode
  thunderbolt: Include the additional DP IN double word in debugfs dump
  thunderbolt: Add functions to support DisplayPort bandwidth allocation mode
  thunderbolt: Increase timeout of DP OUT adapter handshake
  thunderbolt: Take CL states into account when waiting for link to come up
  thunderbolt: Improve debug logging in tb_available_bandwidth()
  thunderbolt: Log DP adapter type
  thunderbolt: Use decimal port number in control and tunnel logs too
  thunderbolt: Refactor tb_acpi_add_link()
  thunderbolt: Use correct type in tb_port_is_clx_enabled() prototype
This commit is contained in:
Greg Kroah-Hartman 2023-02-08 12:49:26 +01:00
commit 88e054e8df
12 changed files with 1710 additions and 94 deletions

View File

@ -36,16 +36,13 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
* We need to do this because the xHCI driver might not yet be * We need to do this because the xHCI driver might not yet be
* bound so the USB3 SuperSpeed ports are not yet created. * bound so the USB3 SuperSpeed ports are not yet created.
*/ */
dev = acpi_get_first_physical_node(adev); do {
while (!dev) {
adev = acpi_dev_parent(adev);
if (!adev)
break;
dev = acpi_get_first_physical_node(adev); dev = acpi_get_first_physical_node(adev);
} if (dev)
break;
if (!dev) adev = acpi_dev_parent(adev);
goto out_put; } while (adev);
/* /*
* Check that the device is PCIe. This is because USB3 * Check that the device is PCIe. This is because USB3

View File

@ -230,7 +230,6 @@ static int check_config_address(struct tb_cfg_address addr,
static struct tb_cfg_result decode_error(const struct ctl_pkg *response) static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{ {
struct cfg_error_pkg *pkg = response->buffer; struct cfg_error_pkg *pkg = response->buffer;
struct tb_ctl *ctl = response->ctl;
struct tb_cfg_result res = { 0 }; struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header); res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0; res.response_port = 0;
@ -239,13 +238,6 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
if (res.err) if (res.err)
return res; return res;
if (pkg->zero1)
tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
if (pkg->zero2)
tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
if (pkg->zero3)
tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
res.err = 1; res.err = 1;
res.tb_error = pkg->error; res.tb_error = pkg->error;
res.response_port = pkg->port; res.response_port = pkg->port;
@ -416,6 +408,7 @@ static int tb_async_error(const struct ctl_pkg *pkg)
case TB_CFG_ERROR_LINK_ERROR: case TB_CFG_ERROR_LINK_ERROR:
case TB_CFG_ERROR_HEC_ERROR_DETECTED: case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR: case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
case TB_CFG_ERROR_DP_BW:
return true; return true;
default: default:
@ -735,6 +728,47 @@ void tb_ctl_stop(struct tb_ctl *ctl)
/* public interface, commands */ /* public interface, commands */
/**
* tb_cfg_ack_notification() - Ack notification
* @ctl: Control channel to use
* @route: Router that originated the event
* @error: Pointer to the notification package
*
* Call this as response for non-plug notification to ack it. Returns
* %0 on success or an error code on failure.
*/
int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
const struct cfg_error_pkg *error)
{
struct cfg_ack_pkg pkg = {
.header = tb_cfg_make_header(route),
};
const char *name;
switch (error->error) {
case TB_CFG_ERROR_LINK_ERROR:
name = "link error";
break;
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
name = "HEC error";
break;
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
name = "flow control error";
break;
case TB_CFG_ERROR_DP_BW:
name = "DP_BW";
break;
default:
name = "unknown";
break;
}
tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
error->error, route);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
}
/** /**
* tb_cfg_ack_plug() - Ack hot plug/unplug event * tb_cfg_ack_plug() - Ack hot plug/unplug event
* @ctl: Control channel to use * @ctl: Control channel to use
@ -754,7 +788,7 @@ int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG .pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
: TB_CFG_ERROR_PG_HOT_PLUG, : TB_CFG_ERROR_PG_HOT_PLUG,
}; };
tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n", tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
unplug ? "un" : "", route, port); unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
} }

View File

@ -122,6 +122,8 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
return header; return header;
} }
int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
const struct cfg_error_pkg *error);
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug); int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route); struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,

View File

@ -1159,7 +1159,10 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) { if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
length = PORT_CAP_PCIE_LEN; length = PORT_CAP_PCIE_LEN;
} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) { } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
length = PORT_CAP_DP_LEN; if (usb4_dp_port_bw_mode_supported(port))
length = PORT_CAP_DP_LEN + 1;
else
length = PORT_CAP_DP_LEN;
} else if (tb_port_is_usb3_down(port) || } else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) { tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN; length = PORT_CAP_USB3_LEN;

View File

@ -513,36 +513,44 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
while (retries--) { while (retries--) {
state = tb_port_state(port); state = tb_port_state(port);
if (state < 0) switch (state) {
return state; case TB_PORT_DISABLED:
if (state == TB_PORT_DISABLED) {
tb_port_dbg(port, "is disabled (state: 0)\n"); tb_port_dbg(port, "is disabled (state: 0)\n");
return 0; return 0;
}
if (state == TB_PORT_UNPLUGGED) { case TB_PORT_UNPLUGGED:
if (wait_if_unplugged) { if (wait_if_unplugged) {
/* used during resume */ /* used during resume */
tb_port_dbg(port, tb_port_dbg(port,
"is unplugged (state: 7), retrying...\n"); "is unplugged (state: 7), retrying...\n");
msleep(100); msleep(100);
continue; break;
} }
tb_port_dbg(port, "is unplugged (state: 7)\n"); tb_port_dbg(port, "is unplugged (state: 7)\n");
return 0; return 0;
}
if (state == TB_PORT_UP) { case TB_PORT_UP:
tb_port_dbg(port, "is connected, link is up (state: 2)\n"); case TB_PORT_TX_CL0S:
case TB_PORT_RX_CL0S:
case TB_PORT_CL1:
case TB_PORT_CL2:
tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
return 1; return 1;
default:
if (state < 0)
return state;
/*
* After plug-in the state is TB_PORT_CONNECTING. Give it some
* time.
*/
tb_port_dbg(port,
"is connected, link is not up (state: %d), retrying...\n",
state);
msleep(100);
} }
/*
* After plug-in the state is TB_PORT_CONNECTING. Give it some
* time.
*/
tb_port_dbg(port,
"is connected, link is not up (state: %d), retrying...\n",
state);
msleep(100);
} }
tb_port_warn(port, tb_port_warn(port,
"failed to reach state TB_PORT_UP. Ignoring port...\n"); "failed to reach state TB_PORT_UP. Ignoring port...\n");

View File

@ -16,7 +16,8 @@
#include "tb_regs.h" #include "tb_regs.h"
#include "tunnel.h" #include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */ #define TB_TIMEOUT 100 /* ms */
#define MAX_GROUPS 7 /* max Group_ID is 7 */
/** /**
* struct tb_cm - Simple Thunderbolt connection manager * struct tb_cm - Simple Thunderbolt connection manager
@ -28,12 +29,14 @@
* after cfg has been paused. * after cfg has been paused.
* @remove_work: Work used to remove any unplugged routers after * @remove_work: Work used to remove any unplugged routers after
* runtime resume * runtime resume
* @groups: Bandwidth groups used in this domain.
*/ */
struct tb_cm { struct tb_cm {
struct list_head tunnel_list; struct list_head tunnel_list;
struct list_head dp_resources; struct list_head dp_resources;
bool hotplug_active; bool hotplug_active;
struct delayed_work remove_work; struct delayed_work remove_work;
struct tb_bandwidth_group groups[MAX_GROUPS];
}; };
static inline struct tb *tcm_to_tb(struct tb_cm *tcm) static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
@ -49,6 +52,112 @@ struct tb_hotplug_event {
bool unplug; bool unplug;
}; };
static void tb_init_bandwidth_groups(struct tb_cm *tcm)
{
int i;
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
struct tb_bandwidth_group *group = &tcm->groups[i];
group->tb = tcm_to_tb(tcm);
group->index = i + 1;
INIT_LIST_HEAD(&group->ports);
}
}
static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
struct tb_port *in)
{
if (!group || WARN_ON(in->group))
return;
in->group = group;
list_add_tail(&in->group_list, &group->ports);
tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
}
static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
{
int i;
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
struct tb_bandwidth_group *group = &tcm->groups[i];
if (list_empty(&group->ports))
return group;
}
return NULL;
}
static struct tb_bandwidth_group *
tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
struct tb_port *out)
{
struct tb_bandwidth_group *group;
struct tb_tunnel *tunnel;
/*
* Find all DP tunnels that go through all the same USB4 links
* as this one. Because we always setup tunnels the same way we
* can just check for the routers at both ends of the tunnels
* and if they are the same we have a match.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (!tb_tunnel_is_dp(tunnel))
continue;
if (tunnel->src_port->sw == in->sw &&
tunnel->dst_port->sw == out->sw) {
group = tunnel->src_port->group;
if (group) {
tb_bandwidth_group_attach_port(group, in);
return group;
}
}
}
/* Pick up next available group then */
group = tb_find_free_bandwidth_group(tcm);
if (group)
tb_bandwidth_group_attach_port(group, in);
else
tb_port_warn(in, "no available bandwidth groups\n");
return group;
}
static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
struct tb_port *out)
{
if (usb4_dp_port_bw_mode_enabled(in)) {
int index, i;
index = usb4_dp_port_group_id(in);
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
if (tcm->groups[i].index == index) {
tb_bandwidth_group_attach_port(&tcm->groups[i], in);
return;
}
}
}
tb_attach_bandwidth_group(tcm, in, out);
}
static void tb_detach_bandwidth_group(struct tb_port *in)
{
struct tb_bandwidth_group *group = in->group;
if (group) {
in->group = NULL;
list_del_init(&in->group_list);
tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
}
}
static void tb_handle_hotplug(struct work_struct *work); static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@ -193,9 +302,14 @@ static void tb_discover_tunnels(struct tb *tb)
parent = tb_switch_parent(parent); parent = tb_switch_parent(parent);
} }
} else if (tb_tunnel_is_dp(tunnel)) { } else if (tb_tunnel_is_dp(tunnel)) {
struct tb_port *in = tunnel->src_port;
struct tb_port *out = tunnel->dst_port;
/* Keep the domain from powering down */ /* Keep the domain from powering down */
pm_runtime_get_sync(&tunnel->src_port->sw->dev); pm_runtime_get_sync(&in->sw->dev);
pm_runtime_get_sync(&tunnel->dst_port->sw->dev); pm_runtime_get_sync(&out->sw->dev);
tb_discover_bandwidth_group(tcm, in, out);
} }
} }
} }
@ -350,10 +464,13 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_tunnel *tunnel; struct tb_tunnel *tunnel;
struct tb_port *port; struct tb_port *port;
tb_port_dbg(dst_port, "calculating available bandwidth\n"); tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
dst_port->port);
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
if (tunnel) { if (tunnel && tunnel->src_port != src_port &&
tunnel->dst_port != dst_port) {
ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
&usb3_consumed_down); &usb3_consumed_down);
if (ret) if (ret)
@ -387,7 +504,8 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
up_bw -= up_bw / 10; up_bw -= up_bw / 10;
down_bw = up_bw; down_bw = up_bw;
tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
down_bw);
/* /*
* Find all DP tunnels that cross the port and reduce * Find all DP tunnels that cross the port and reduce
@ -396,12 +514,24 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
list_for_each_entry(tunnel, &tcm->tunnel_list, list) { list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
int dp_consumed_up, dp_consumed_down; int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
continue;
if (!tb_tunnel_is_dp(tunnel)) if (!tb_tunnel_is_dp(tunnel))
continue; continue;
if (!tb_tunnel_port_on_path(tunnel, port)) if (!tb_tunnel_port_on_path(tunnel, port))
continue; continue;
/*
* Ignore the DP tunnel between src_port and
* dst_port because it is the same tunnel and we
* may be re-calculating estimated bandwidth.
*/
if (tunnel->src_port == src_port &&
tunnel->dst_port == dst_port)
continue;
ret = tb_tunnel_consumed_bandwidth(tunnel, ret = tb_tunnel_consumed_bandwidth(tunnel,
&dp_consumed_up, &dp_consumed_up,
&dp_consumed_down); &dp_consumed_down);
@ -762,6 +892,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
switch (tunnel->type) { switch (tunnel->type) {
case TB_TUNNEL_DP: case TB_TUNNEL_DP:
tb_detach_bandwidth_group(src_port);
/* /*
* In case of DP tunnel make sure the DP IN resource is * In case of DP tunnel make sure the DP IN resource is
* deallocated properly. * deallocated properly.
@ -879,6 +1010,99 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
} }
static void
tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
{
struct tb_tunnel *first_tunnel;
struct tb *tb = group->tb;
struct tb_port *in;
int ret;
tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
group->index);
first_tunnel = NULL;
list_for_each_entry(in, &group->ports, group_list) {
int estimated_bw, estimated_up, estimated_down;
struct tb_tunnel *tunnel;
struct tb_port *out;
if (!usb4_dp_port_bw_mode_enabled(in))
continue;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (WARN_ON(!tunnel))
break;
if (!first_tunnel) {
/*
* Since USB3 bandwidth is shared by all DP
* tunnels under the host router USB4 port, even
* if they do not begin from the host router, we
* can release USB3 bandwidth just once and not
* for each tunnel separately.
*/
first_tunnel = tunnel;
ret = tb_release_unused_usb3_bandwidth(tb,
first_tunnel->src_port, first_tunnel->dst_port);
if (ret) {
tb_port_warn(in,
"failed to release unused bandwidth\n");
break;
}
}
out = tunnel->dst_port;
ret = tb_available_bandwidth(tb, in, out, &estimated_up,
&estimated_down);
if (ret) {
tb_port_warn(in,
"failed to re-calculate estimated bandwidth\n");
break;
}
/*
* Estimated bandwidth includes:
* - already allocated bandwidth for the DP tunnel
* - available bandwidth along the path
* - bandwidth allocated for USB 3.x but not used.
*/
tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
estimated_up, estimated_down);
if (in->sw->config.depth < out->sw->config.depth)
estimated_bw = estimated_down;
else
estimated_bw = estimated_up;
if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
tb_port_warn(in, "failed to update estimated bandwidth\n");
}
if (first_tunnel)
tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
first_tunnel->dst_port);
tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
}
static void tb_recalc_estimated_bandwidth(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
int i;
tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
struct tb_bandwidth_group *group = &tcm->groups[i];
if (!list_empty(&group->ports))
tb_recalc_estimated_bandwidth_for_group(group);
}
tb_dbg(tb, "bandwidth re-calculation done\n");
}
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{ {
struct tb_port *host_port, *port; struct tb_port *host_port, *port;
@ -892,7 +1116,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue; continue;
if (tb_port_is_enabled(port)) { if (tb_port_is_enabled(port)) {
tb_port_dbg(port, "in use\n"); tb_port_dbg(port, "DP OUT in use\n");
continue; continue;
} }
@ -941,7 +1165,7 @@ static void tb_tunnel_dp(struct tb *tb)
continue; continue;
if (tb_port_is_enabled(port)) { if (tb_port_is_enabled(port)) {
tb_port_dbg(port, "in use\n"); tb_port_dbg(port, "DP IN in use\n");
continue; continue;
} }
@ -993,17 +1217,19 @@ static void tb_tunnel_dp(struct tb *tb)
goto err_rpm_put; goto err_rpm_put;
} }
if (!tb_attach_bandwidth_group(tcm, in, out))
goto err_dealloc_dp;
/* Make all unused USB3 bandwidth available for the new DP tunnel */ /* Make all unused USB3 bandwidth available for the new DP tunnel */
ret = tb_release_unused_usb3_bandwidth(tb, in, out); ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret) { if (ret) {
tb_warn(tb, "failed to release unused bandwidth\n"); tb_warn(tb, "failed to release unused bandwidth\n");
goto err_dealloc_dp; goto err_detach_group;
} }
ret = tb_available_bandwidth(tb, in, out, &available_up, ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
&available_down);
if (ret) if (ret)
goto err_reclaim; goto err_reclaim_usb;
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down); available_up, available_down);
@ -1012,7 +1238,7 @@ static void tb_tunnel_dp(struct tb *tb)
available_down); available_down);
if (!tunnel) { if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n"); tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim; goto err_reclaim_usb;
} }
if (tb_tunnel_activate(tunnel)) { if (tb_tunnel_activate(tunnel)) {
@ -1022,6 +1248,10 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list); list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out); tb_reclaim_usb3_bandwidth(tb, in, out);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
/* /*
* In case of DP tunnel exists, change host router's 1st children * In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work. * TMU mode to HiFi for CL0s to work.
@ -1032,8 +1262,10 @@ static void tb_tunnel_dp(struct tb *tb)
err_free: err_free:
tb_tunnel_free(tunnel); tb_tunnel_free(tunnel);
err_reclaim: err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out); tb_reclaim_usb3_bandwidth(tb, in, out);
err_detach_group:
tb_detach_bandwidth_group(in);
err_dealloc_dp: err_dealloc_dp:
tb_switch_dealloc_dp_resource(in->sw, in); tb_switch_dealloc_dp_resource(in->sw, in);
err_rpm_put: err_rpm_put:
@ -1066,6 +1298,7 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
* See if there is another DP OUT port that can be used for * See if there is another DP OUT port that can be used for
* to create another tunnel. * to create another tunnel.
*/ */
tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb); tb_tunnel_dp(tb);
} }
@ -1313,6 +1546,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (port->dual_link_port) if (port->dual_link_port)
port->dual_link_port->remote = NULL; port->dual_link_port->remote = NULL;
/* Maybe we can create another DP tunnel */ /* Maybe we can create another DP tunnel */
tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb); tb_tunnel_dp(tb);
} else if (port->xdomain) { } else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@ -1370,6 +1604,239 @@ out:
kfree(ev); kfree(ev);
} }
static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int *requested_down)
{
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
return ret;
in = tunnel->src_port;
out = tunnel->dst_port;
tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
allocated_up, allocated_down);
/*
* If we get rounded up request from graphics side, say HBR2 x 4
* that is 17500 instead of 17280 (this is because of the
* granularity), we allow it too. Here the graphics has already
* negotiated with the DPRX the maximum possible rates (which is
* 17280 in this case).
*
* Since the link cannot go higher than 17280 we use that in our
* calculations but the DP IN adapter Allocated BW write must be
* the same value (17500) otherwise the adapter will mark it as
* failed for graphics.
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
return ret;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
max_down_rounded = roundup(max_down, granularity);
/*
* This will "fix" the request down to the maximum supported
* rate * lanes if it is at the maximum rounded up level.
*/
requested_up_corrected = *requested_up;
if (requested_up_corrected == max_up_rounded)
requested_up_corrected = max_up;
else if (requested_up_corrected < 0)
requested_up_corrected = 0;
requested_down_corrected = *requested_down;
if (requested_down_corrected == max_down_rounded)
requested_down_corrected = max_down;
else if (requested_down_corrected < 0)
requested_down_corrected = 0;
tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
requested_up_corrected, requested_down_corrected);
if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
(*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
return -ENOBUFS;
}
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
/*
* If requested bandwidth is less or equal than what is
* currently allocated to that tunnel we simply change
* the reservation of the tunnel. Since all the tunnels
* going out from the same USB4 port are in the same
* group the released bandwidth will be taken into
* account for the other tunnels automatically below.
*/
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
/*
* More bandwidth is requested. Release all the potential
* bandwidth from USB3 first.
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
return ret;
/*
* Then go over all tunnels that cross the same USB4 ports (they
* are also in the same group but we use the same function here
* that we use with the normal bandwidth allocation).
*/
ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
if (ret)
goto reclaim;
tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
available_up, available_down);
if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
(*requested_down >= 0 && available_down >= requested_down_corrected)) {
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
} else {
ret = -ENOBUFS;
}
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
return ret;
}
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
pm_runtime_get_sync(&tb->dev);
mutex_lock(&tb->lock);
if (!tcm->hotplug_active)
goto unlock;
sw = tb_switch_find_by_route(tb, ev->route);
if (!sw) {
tb_warn(tb, "bandwidth request from non-existent router %llx\n",
ev->route);
goto unlock;
}
in = &sw->ports[ev->port];
if (!tb_port_is_dpin(in)) {
tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
goto unlock;
}
tb_port_dbg(in, "handling bandwidth allocation request\n");
if (!usb4_dp_port_bw_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
goto unlock;
}
ret = usb4_dp_port_requested_bw(in);
if (ret < 0) {
if (ret == -ENODATA)
tb_port_dbg(in, "no bandwidth request active\n");
else
tb_port_warn(in, "failed to read requested bandwidth\n");
goto unlock;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
tb_port_warn(in, "failed to find tunnel\n");
goto unlock;
}
out = tunnel->dst_port;
if (in->sw->config.depth < out->sw->config.depth) {
requested_up = -1;
requested_down = requested_bw;
} else {
requested_up = requested_bw;
requested_down = -1;
}
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
if (ret == -ENOBUFS)
tb_port_warn(in, "not enough bandwidth available\n");
else
tb_port_warn(in, "failed to change bandwidth allocation\n");
} else {
tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
requested_up, requested_down);
/* Update other clients about the allocation change */
tb_recalc_estimated_bandwidth(tb);
}
unlock:
mutex_unlock(&tb->lock);
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
}
static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
{
struct tb_hotplug_event *ev;
ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return;
ev->tb = tb;
ev->route = route;
ev->port = port;
INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
queue_work(tb->wq, &ev->work);
}
static void tb_handle_notification(struct tb *tb, u64 route,
const struct cfg_error_pkg *error)
{
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n", route);
switch (error->error) {
case TB_CFG_ERROR_DP_BW:
tb_queue_dp_bandwidth_request(tb, route, error->port);
break;
default:
/* Ack is enough */
return;
}
}
/* /*
* tb_schedule_hotplug_handler() - callback function for the control channel * tb_schedule_hotplug_handler() - callback function for the control channel
* *
@ -1379,15 +1846,19 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size) const void *buf, size_t size)
{ {
const struct cfg_event_pkg *pkg = buf; const struct cfg_event_pkg *pkg = buf;
u64 route; u64 route = tb_cfg_get_route(&pkg->header);
if (type != TB_CFG_PKG_EVENT) { switch (type) {
case TB_CFG_PKG_ERROR:
tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
return;
case TB_CFG_PKG_EVENT:
break;
default:
tb_warn(tb, "unexpected event %#x, ignoring\n", type); tb_warn(tb, "unexpected event %#x, ignoring\n", type);
return; return;
} }
route = tb_cfg_get_route(&pkg->header);
if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) { if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route, tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port); pkg->port);
@ -1817,6 +2288,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
INIT_LIST_HEAD(&tcm->tunnel_list); INIT_LIST_HEAD(&tcm->tunnel_list);
INIT_LIST_HEAD(&tcm->dp_resources); INIT_LIST_HEAD(&tcm->dp_resources);
INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
tb_init_bandwidth_groups(tcm);
tb_dbg(tb, "using software connection manager\n"); tb_dbg(tb, "using software connection manager\n");

View File

@ -223,6 +223,23 @@ struct tb_switch {
enum tb_clx clx; enum tb_clx clx;
}; };
/**
* struct tb_bandwidth_group - Bandwidth management group
* @tb: Pointer to the domain the group belongs to
* @index: Index of the group (aka Group_ID). Valid values %1-%7
* @ports: DP IN adapters belonging to this group are linked here
*
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
* attached to a bandwidth group. All tunnels going through the same
* USB4 links share the same group and can dynamically distribute the
* bandwidth within the group.
*/
struct tb_bandwidth_group {
struct tb *tb;
int index;
struct list_head ports;
};
/** /**
* struct tb_port - a thunderbolt port, part of a tb_switch * struct tb_port - a thunderbolt port, part of a tb_switch
* @config: Cached port configuration read from registers * @config: Cached port configuration read from registers
@ -247,6 +264,9 @@ struct tb_switch {
* @ctl_credits: Buffers reserved for control path * @ctl_credits: Buffers reserved for control path
* @dma_credits: Number of credits allocated for DMA tunneling for all * @dma_credits: Number of credits allocated for DMA tunneling for all
* DMA paths through this port. * DMA paths through this port.
* @group: Bandwidth allocation group the adapter is assigned to. Only
* used for DP IN adapters for now.
* @group_list: The adapter is linked to the group's list of ports through this
* *
* In USB4 terminology this structure represents an adapter (protocol or * In USB4 terminology this structure represents an adapter (protocol or
* lane adapter). * lane adapter).
@ -272,6 +292,8 @@ struct tb_port {
unsigned int total_credits; unsigned int total_credits;
unsigned int ctl_credits; unsigned int ctl_credits;
unsigned int dma_credits; unsigned int dma_credits;
struct tb_bandwidth_group *group;
struct list_head group_list;
}; };
/** /**
@ -1047,7 +1069,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width, int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec); int timeout_msec);
int tb_port_update_credits(struct tb_port *port); int tb_port_update_credits(struct tb_port *port);
bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx); bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@ -1238,6 +1260,21 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw); int *downstream_bw);
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
bool usb4_dp_port_bw_mode_supported(struct tb_port *port);
bool usb4_dp_port_bw_mode_enabled(struct tb_port *port);
int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported);
int usb4_dp_port_group_id(struct tb_port *port);
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
int usb4_dp_port_granularity(struct tb_port *port);
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw);
int usb4_dp_port_allocated_bw(struct tb_port *port);
int usb4_dp_port_allocate_bw(struct tb_port *port, int bw);
int usb4_dp_port_requested_bw(struct tb_port *port);
static inline bool tb_is_usb4_port_device(const struct device *dev) static inline bool tb_is_usb4_port_device(const struct device *dev)
{ {
return dev->type == &usb4_port_device_type; return dev->type == &usb4_port_device_type;

View File

@ -29,6 +29,7 @@ enum tb_cfg_error {
TB_CFG_ERROR_HEC_ERROR_DETECTED = 12, TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13, TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15, TB_CFG_ERROR_LOCK = 15,
TB_CFG_ERROR_DP_BW = 32,
}; };
/* common header */ /* common header */
@ -64,14 +65,16 @@ struct cfg_write_pkg {
/* TB_CFG_PKG_ERROR */ /* TB_CFG_PKG_ERROR */
struct cfg_error_pkg { struct cfg_error_pkg {
struct tb_cfg_header header; struct tb_cfg_header header;
enum tb_cfg_error error:4; enum tb_cfg_error error:8;
u32 zero1:4;
u32 port:6; u32 port:6;
u32 zero2:2; /* Both should be zero, still they are different fields. */ u32 reserved:16;
u32 zero3:14;
u32 pg:2; u32 pg:2;
} __packed; } __packed;
struct cfg_ack_pkg {
struct tb_cfg_header header;
};
#define TB_CFG_ERROR_PG_HOT_PLUG 0x2 #define TB_CFG_ERROR_PG_HOT_PLUG 0x2
#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3 #define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3

View File

@ -50,6 +50,10 @@ enum tb_port_state {
TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */ TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
TB_PORT_CONNECTING = 1, /* retry */ TB_PORT_CONNECTING = 1, /* retry */
TB_PORT_UP = 2, TB_PORT_UP = 2,
TB_PORT_TX_CL0S = 3,
TB_PORT_RX_CL0S = 4,
TB_PORT_CL1 = 5,
TB_PORT_CL2 = 6,
TB_PORT_UNPLUGGED = 7, TB_PORT_UNPLUGGED = 7,
}; };
@ -381,15 +385,42 @@ struct tb_regs_port_header {
#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11) #define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11 #define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
#define ADP_DP_CS_2 0x02 #define ADP_DP_CS_2 0x02
#define ADP_DP_CS_2_NRD_MLC_MASK GENMASK(2, 0)
#define ADP_DP_CS_2_HDP BIT(6) #define ADP_DP_CS_2_HDP BIT(6)
#define ADP_DP_CS_2_NRD_MLR_MASK GENMASK(9, 7)
#define ADP_DP_CS_2_NRD_MLR_SHIFT 7
#define ADP_DP_CS_2_CA BIT(10)
#define ADP_DP_CS_2_GR_MASK GENMASK(12, 11)
#define ADP_DP_CS_2_GR_SHIFT 11
#define ADP_DP_CS_2_GR_0_25G 0x0
#define ADP_DP_CS_2_GR_0_5G 0x1
#define ADP_DP_CS_2_GR_1G 0x2
#define ADP_DP_CS_2_GROUP_ID_MASK GENMASK(15, 13)
#define ADP_DP_CS_2_GROUP_ID_SHIFT 13
#define ADP_DP_CS_2_CM_ID_MASK GENMASK(19, 16)
#define ADP_DP_CS_2_CM_ID_SHIFT 16
#define ADP_DP_CS_2_CMMS BIT(20)
#define ADP_DP_CS_2_ESTIMATED_BW_MASK GENMASK(31, 24)
#define ADP_DP_CS_2_ESTIMATED_BW_SHIFT 24
#define ADP_DP_CS_3 0x03 #define ADP_DP_CS_3 0x03
#define ADP_DP_CS_3_HDPC BIT(9) #define ADP_DP_CS_3_HDPC BIT(9)
#define DP_LOCAL_CAP 0x04 #define DP_LOCAL_CAP 0x04
#define DP_REMOTE_CAP 0x05 #define DP_REMOTE_CAP 0x05
/* For DP IN adapter */
#define DP_STATUS 0x06
#define DP_STATUS_ALLOCATED_BW_MASK GENMASK(31, 24)
#define DP_STATUS_ALLOCATED_BW_SHIFT 24
/* For DP OUT adapter */
#define DP_STATUS_CTRL 0x06 #define DP_STATUS_CTRL 0x06
#define DP_STATUS_CTRL_CMHS BIT(25) #define DP_STATUS_CTRL_CMHS BIT(25)
#define DP_STATUS_CTRL_UF BIT(26) #define DP_STATUS_CTRL_UF BIT(26)
#define DP_COMMON_CAP 0x07 #define DP_COMMON_CAP 0x07
/* Only if DP IN supports BW allocation mode */
#define ADP_DP_CS_8 0x08
#define ADP_DP_CS_8_REQUESTED_BW_MASK GENMASK(7, 0)
#define ADP_DP_CS_8_DPME BIT(30)
#define ADP_DP_CS_8_DR BIT(31)
/* /*
* DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
* with exception of DPRX done. * with exception of DPRX done.
@ -406,7 +437,12 @@ struct tb_regs_port_header {
#define DP_COMMON_CAP_2_LANES 0x1 #define DP_COMMON_CAP_2_LANES 0x1
#define DP_COMMON_CAP_4_LANES 0x2 #define DP_COMMON_CAP_4_LANES 0x2
#define DP_COMMON_CAP_LTTPR_NS BIT(27) #define DP_COMMON_CAP_LTTPR_NS BIT(27)
#define DP_COMMON_CAP_BW_MODE BIT(28)
#define DP_COMMON_CAP_DPRX_DONE BIT(31) #define DP_COMMON_CAP_DPRX_DONE BIT(31)
/* Only present if DP IN supports BW allocation mode */
#define ADP_DP_CS_8 0x08
#define ADP_DP_CS_8_DPME BIT(30)
#define ADP_DP_CS_8_DR BIT(31)
/* PCIe adapter registers */ /* PCIe adapter registers */
#define ADP_PCIE_CS_0 0x00 #define ADP_PCIE_CS_0 0x00

View File

@ -9,6 +9,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/ktime.h>
#include "tunnel.h" #include "tunnel.h"
#include "tb.h" #include "tb.h"
@ -44,12 +45,17 @@
/* Minimum number of credits for DMA path */ /* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1U #define TB_MIN_DMA_CREDITS 1U
static bool bw_alloc_mode = true;
module_param(bw_alloc_mode, bool, 0444);
MODULE_PARM_DESC(bw_alloc_mode,
"enable bandwidth allocation mode if supported (default: true)");
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \ do { \
struct tb_tunnel *__tunnel = (tunnel); \ struct tb_tunnel *__tunnel = (tunnel); \
level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
tb_route(__tunnel->src_port->sw), \ tb_route(__tunnel->src_port->sw), \
__tunnel->src_port->port, \ __tunnel->src_port->port, \
tb_route(__tunnel->dst_port->sw), \ tb_route(__tunnel->dst_port->sw), \
@ -339,9 +345,10 @@ static bool tb_dp_is_usb4(const struct tb_switch *sw)
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
} }
static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
int timeout_msec)
{ {
int timeout = 10; ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
u32 val; u32 val;
int ret; int ret;
@ -368,8 +375,8 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
return ret; return ret;
if (!(val & DP_STATUS_CTRL_CMHS)) if (!(val & DP_STATUS_CTRL_CMHS))
return 0; return 0;
usleep_range(10, 100); usleep_range(100, 150);
} while (timeout--); } while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT; return -ETIMEDOUT;
} }
@ -519,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
* Perform connection manager handshake between IN and OUT ports * Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place. * before capabilities exchange can take place.
*/ */
ret = tb_dp_cm_handshake(in, out); ret = tb_dp_cm_handshake(in, out, 1500);
if (ret) if (ret)
return ret; return ret;
@ -597,6 +604,133 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in->cap_adap + DP_REMOTE_CAP, 1); in->cap_adap + DP_REMOTE_CAP, 1);
} }
static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
{
int ret, estimated_bw, granularity, tmp;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
u32 out_dp_cap, out_rate, out_lanes;
u32 in_dp_cap, in_rate, in_lanes;
u32 rate, lanes;
if (!bw_alloc_mode)
return 0;
ret = usb4_dp_port_set_cm_bw_mode_supported(in, true);
if (ret)
return ret;
ret = usb4_dp_port_set_group_id(in, in->group->index);
if (ret)
return ret;
/*
* Get the non-reduced rate and lanes based on the lowest
* capability of both adapters.
*/
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
in_rate = tb_dp_cap_get_rate(in_dp_cap);
in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
out_rate = tb_dp_cap_get_rate(out_dp_cap);
out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
rate = min(in_rate, out_rate);
lanes = min(in_lanes, out_lanes);
tmp = tb_dp_bandwidth(rate, lanes);
tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
lanes, tmp);
ret = usb4_dp_port_set_nrd(in, rate, lanes);
if (ret)
return ret;
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
/*
* Returns -EINVAL if granularity above is outside of the
* accepted ranges.
*/
ret = usb4_dp_port_set_granularity(in, granularity);
if (ret)
return ret;
/*
* Bandwidth estimation is pretty much what we have in
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
if (in->sw->config.depth < out->sw->config.depth)
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bw(in, estimated_bw);
if (ret)
return ret;
/* Initial allocation should be 0 according the spec */
ret = usb4_dp_port_allocate_bw(in, 0);
if (ret)
return ret;
tb_port_dbg(in, "bandwidth allocation mode enabled\n");
return 0;
}
static int tb_dp_init(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
struct tb *tb = in->sw->tb;
int ret;
ret = tb_dp_xchg_caps(tunnel);
if (ret)
return ret;
if (!tb_switch_is_usb4(sw))
return 0;
if (!usb4_dp_port_bw_mode_supported(in))
return 0;
tb_port_dbg(in, "bandwidth allocation mode supported\n");
ret = usb4_dp_port_set_cm_id(in, tb->index);
if (ret)
return ret;
return tb_dp_bw_alloc_mode_enable(tunnel);
}
static void tb_dp_deinit(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
if (!usb4_dp_port_bw_mode_supported(in))
return;
if (usb4_dp_port_bw_mode_enabled(in)) {
usb4_dp_port_set_cm_bw_mode_supported(in, false);
tb_port_dbg(in, "bandwidth allocation mode disabled\n");
}
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{ {
int ret; int ret;
@ -634,49 +768,275 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0; return 0;
} }
/* max_bw is rounded up to next granularity */
static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
{
struct tb_port *in = tunnel->src_port;
int ret, rate, lanes, nrd_bw;
ret = usb4_dp_port_nrd(in, &rate, &lanes);
if (ret)
return ret;
nrd_bw = tb_dp_bandwidth(rate, lanes);
if (max_bw) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
*max_bw = roundup(nrd_bw, ret);
}
return nrd_bw;
}
static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int ret, allocated_bw, max_bw;
if (!usb4_dp_port_bw_mode_enabled(in))
return -EOPNOTSUPP;
if (!tunnel->bw_mode)
return -EOPNOTSUPP;
/* Read what was allocated previously if any */
ret = usb4_dp_port_allocated_bw(in);
if (ret < 0)
return ret;
allocated_bw = ret;
ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
allocated_bw = ret;
tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
allocated_bw);
if (in->sw->config.depth < out->sw->config.depth) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
*consumed_up = allocated_bw;
*consumed_down = 0;
}
return 0;
}
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
* If we have already set the allocated bandwidth then use that.
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) {
int ret, allocated_bw, max_bw;
ret = usb4_dp_port_allocated_bw(in);
if (ret < 0)
return ret;
allocated_bw = ret;
ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
allocated_bw = ret;
if (in->sw->config.depth < out->sw->config.depth) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
*allocated_up = allocated_bw;
*allocated_down = 0;
}
return 0;
}
return tunnel->consumed_bandwidth(tunnel, allocated_up,
allocated_down);
}
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int max_bw, ret, tmp;
if (!usb4_dp_port_bw_mode_enabled(in))
return -EOPNOTSUPP;
ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (in->sw->config.depth < out->sw->config.depth) {
tmp = min(*alloc_down, max_bw);
ret = usb4_dp_port_allocate_bw(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
tmp = min(*alloc_up, max_bw);
ret = usb4_dp_port_allocate_bw(in, tmp);
if (ret)
return ret;
*alloc_down = 0;
*alloc_up = tmp;
}
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
return 0;
}
static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
struct tb_port *in = tunnel->src_port;
/*
* Wait for DPRX done. Normally it should be already set for
* active tunnel.
*/
do {
u32 val;
int ret;
ret = tb_port_read(in, &val, TB_CFG_PORT,
in->cap_adap + DP_COMMON_CAP, 1);
if (ret)
return ret;
if (val & DP_COMMON_CAP_DPRX_DONE) {
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
tb_dp_bandwidth(*rate, *lanes));
return 0;
}
usleep_range(100, 150);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
{
struct tb_port *in = tunnel->src_port;
u32 val;
int ret;
switch (cap) {
case DP_LOCAL_CAP:
case DP_REMOTE_CAP:
break;
default:
tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
return -EINVAL;
}
/*
* Read from the copied remote cap so that we take into account
* if capabilities were reduced during exchange.
*/
ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
if (ret)
return ret;
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
tb_dp_bandwidth(*rate, *lanes));
return 0;
}
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
struct tb_port *in = tunnel->src_port;
u32 rate, lanes;
int ret;
/*
* DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read
* parameter values so this so we can use this to determine the
* maximum possible bandwidth over this link.
*/
ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes);
if (ret)
return ret;
if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
*max_up = 0;
*max_down = tb_dp_bandwidth(rate, lanes);
} else {
*max_up = tb_dp_bandwidth(rate, lanes);
*max_down = 0;
}
return 0;
}
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down) int *consumed_down)
{ {
struct tb_port *in = tunnel->src_port; struct tb_port *in = tunnel->src_port;
const struct tb_switch *sw = in->sw; const struct tb_switch *sw = in->sw;
u32 val, rate = 0, lanes = 0; u32 rate = 0, lanes = 0;
int ret; int ret;
if (tb_dp_is_usb4(sw)) { if (tb_dp_is_usb4(sw)) {
int timeout = 20;
/* /*
* Wait for DPRX done. Normally it should be already set * On USB4 routers check if the bandwidth allocation
* for active tunnel. * mode is enabled first and then read the bandwidth
* through those registers.
*/ */
do { ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up,
ret = tb_port_read(in, &val, TB_CFG_PORT, consumed_down);
in->cap_adap + DP_COMMON_CAP, 1); if (ret < 0) {
if (ret != -EOPNOTSUPP)
return ret;
} else if (!ret) {
return 0;
}
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
* reduced one). Otherwise return the remote (possibly
* reduced) caps.
*/
ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
if (ret) {
if (ret == -ETIMEDOUT)
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
&rate, &lanes);
if (ret) if (ret)
return ret; return ret;
}
if (val & DP_COMMON_CAP_DPRX_DONE) {
rate = tb_dp_cap_get_rate(val);
lanes = tb_dp_cap_get_lanes(val);
break;
}
msleep(250);
} while (timeout--);
if (!timeout)
return -ETIMEDOUT;
} else if (sw->generation >= 2) { } else if (sw->generation >= 2) {
/* ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
* Read from the copied remote cap so that we take into
* account if capabilities were reduced during exchange.
*/
ret = tb_port_read(in, &val, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1);
if (ret) if (ret)
return ret; return ret;
rate = tb_dp_cap_get_rate(val);
lanes = tb_dp_cap_get_lanes(val);
} else { } else {
/* No bandwidth management for legacy devices */ /* No bandwidth management for legacy devices */
*consumed_up = 0; *consumed_up = 0;
@ -798,8 +1158,12 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel) if (!tunnel)
return NULL; return NULL;
tunnel->init = tb_dp_xchg_caps; tunnel->init = tb_dp_init;
tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate; tunnel->activate = tb_dp_activate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in; tunnel->src_port = in;
@ -887,8 +1251,12 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel) if (!tunnel)
return NULL; return NULL;
tunnel->init = tb_dp_xchg_caps; tunnel->init = tb_dp_init;
tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate; tunnel->activate = tb_dp_activate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in; tunnel->src_port = in;
tunnel->dst_port = out; tunnel->dst_port = out;
@ -1713,6 +2081,72 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
return true; return true;
} }
/**
* tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
* @tunnel: Tunnel to check
* @max_up: Maximum upstream bandwidth in Mb/s
* @max_down: Maximum downstream bandwidth in Mb/s
*
* Returns maximum possible bandwidth this tunnel can go if not limited
* by other bandwidth clients. If the tunnel does not support this
* returns %-EOPNOTSUPP.
*/
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
return -EINVAL;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
return -EOPNOTSUPP;
}
/**
* tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
* @tunnel: Tunnel to check
* @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
* @allocated_down: Currently allocated downstream bandwidth in Mb/s is
* stored here
*
* Returns the bandwidth allocated for the tunnel. This may be higher
* than what the tunnel actually consumes.
*/
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
return -EINVAL;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
allocated_down);
return -EOPNOTSUPP;
}
/**
* tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
* @tunnel: Tunnel whose bandwidth allocation to change
* @alloc_up: New upstream bandwidth in Mb/s
* @alloc_down: New downstream bandwidth in Mb/s
*
* Tries to change tunnel bandwidth allocation. If succeeds returns %0
* and updates @alloc_up and @alloc_down to that was actually allocated
* (it may not be the same as passed originally). Returns negative errno
* in case of failure.
*/
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
return -EINVAL;
if (tunnel->alloc_bandwidth)
return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
return -EOPNOTSUPP;
}
/** /**
* tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
* @tunnel: Tunnel to check * @tunnel: Tunnel to check

View File

@ -29,6 +29,9 @@ enum tb_tunnel_type {
* @init: Optional tunnel specific initialization * @init: Optional tunnel specific initialization
* @deinit: Optional tunnel specific de-initialization * @deinit: Optional tunnel specific de-initialization
* @activate: Optional tunnel specific activation/deactivation * @activate: Optional tunnel specific activation/deactivation
* @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
* @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
* @alloc_bandwidth: Change tunnel bandwidth allocation
* @consumed_bandwidth: Return how much bandwidth the tunnel consumes * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @release_unused_bandwidth: Release all unused bandwidth * @release_unused_bandwidth: Release all unused bandwidth
* @reclaim_available_bandwidth: Reclaim back available bandwidth * @reclaim_available_bandwidth: Reclaim back available bandwidth
@ -40,6 +43,8 @@ enum tb_tunnel_type {
* Only set if the bandwidth needs to be limited. * Only set if the bandwidth needs to be limited.
* @allocated_up: Allocated upstream bandwidth (only for USB3) * @allocated_up: Allocated upstream bandwidth (only for USB3)
* @allocated_down: Allocated downstream bandwidth (only for USB3) * @allocated_down: Allocated downstream bandwidth (only for USB3)
* @bw_mode: DP bandwidth allocation mode registers can be used to
* determine consumed and allocated bandwidth
*/ */
struct tb_tunnel { struct tb_tunnel {
struct tb *tb; struct tb *tb;
@ -50,6 +55,12 @@ struct tb_tunnel {
int (*init)(struct tb_tunnel *tunnel); int (*init)(struct tb_tunnel *tunnel);
void (*deinit)(struct tb_tunnel *tunnel); void (*deinit)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate); int (*activate)(struct tb_tunnel *tunnel, bool activate);
int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
int *max_down);
int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down);
int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down);
int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up, int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down); int *consumed_down);
int (*release_unused_bandwidth)(struct tb_tunnel *tunnel); int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
@ -62,6 +73,7 @@ struct tb_tunnel {
int max_down; int max_down;
int allocated_up; int allocated_up;
int allocated_down; int allocated_down;
bool bw_mode;
}; };
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@ -92,6 +104,12 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port); const struct tb_port *port);
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down);
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down);
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down);
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down); int *consumed_down);
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel); int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);

View File

@ -2186,3 +2186,575 @@ err_request:
usb4_usb3_port_clear_cm_request(port); usb4_usb3_port_clear_cm_request(port);
return ret; return ret;
} }
static bool is_usb4_dpin(const struct tb_port *port)
{
if (!tb_port_is_dpin(port))
return false;
if (!tb_switch_is_usb4(port->sw))
return false;
return true;
}
/**
* usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
* @port: DP IN adapter
* @cm_id: CM ID to assign
*
* Sets CM ID for the @port. Returns %0 on success and negative errno
* otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
* support this.
*/
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_CM_ID_MASK;
val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the adapter
* supports USB4 bandwidth allocation mode, false otherwise.
*/
bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
{
int ret;
u32 val;
if (!is_usb4_dpin(port))
return false;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return false;
return !!(val & DP_COMMON_CAP_BW_MODE);
}
/**
* usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the bandwidth
* allocation mode has been enabled, false otherwise.
*/
bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
{
int ret;
u32 val;
if (!is_usb4_dpin(port))
return false;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_8, 1);
if (ret)
return false;
return !!(val & ADP_DP_CS_8_DPME);
}
/**
* usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
* @port: DP IN adapter
* @supported: Does the CM support bandwidth allocation mode
*
* Can be called to any DP IN adapter. Sets or clears the CM support bit
* of the DP IN adapter. Returns %0 in success and negative errno
* otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
* does not support this.
*/
int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
if (supported)
val |= ADP_DP_CS_2_CMMS;
else
val &= ~ADP_DP_CS_2_CMMS;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_group_id() - Return Group ID assigned for the adapter
* @port: DP IN adapter
*
* Reads bandwidth allocation Group ID from the DP IN adapter and
* returns it. If the adapter does not support setting Group_ID
* %-EOPNOTSUPP is returned.
*/
int usb4_dp_port_group_id(struct tb_port *port)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
}
/**
* usb4_dp_port_set_group_id() - Set adapter Group ID
* @port: DP IN adapter
* @group_id: Group ID for the adapter
*
* Sets bandwidth allocation mode Group ID for the DP IN adapter.
* Returns %0 in case of success and negative errno otherwise.
* Specifically returns %-EOPNOTSUPP if the adapter does not support
* this.
*/
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_nrd() - Read non-reduced rate and lanes
* @port: DP IN adapter
* @rate: Non-reduced rate in Mb/s is placed here
* @lanes: Non-reduced lanes are placed here
*
* Reads the non-reduced rate and lanes from the DP IN adapter. Returns
* %0 in success and negative errno otherwise. Specifically returns
* %-EOPNOTSUPP if the adapter does not support this.
*/
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
{
u32 val, tmp;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
switch (tmp) {
case DP_COMMON_CAP_RATE_RBR:
*rate = 1620;
break;
case DP_COMMON_CAP_RATE_HBR:
*rate = 2700;
break;
case DP_COMMON_CAP_RATE_HBR2:
*rate = 5400;
break;
case DP_COMMON_CAP_RATE_HBR3:
*rate = 8100;
break;
}
tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
switch (tmp) {
case DP_COMMON_CAP_1_LANE:
*lanes = 1;
break;
case DP_COMMON_CAP_2_LANES:
*lanes = 2;
break;
case DP_COMMON_CAP_4_LANES:
*lanes = 4;
break;
}
return 0;
}
/**
* usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
* @port: DP IN adapter
* @rate: Non-reduced rate in Mb/s
* @lanes: Non-reduced lanes
*
* Before the capabilities reduction this function can be used to set
* the non-reduced values for the DP IN adapter. Returns %0 in success
* and negative errno otherwise. If the adapter does not support this
* %-EOPNOTSUPP is returned.
*/
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
switch (rate) {
case 1620:
break;
case 2700:
val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
& ADP_DP_CS_2_NRD_MLR_MASK;
break;
case 5400:
val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
& ADP_DP_CS_2_NRD_MLR_MASK;
break;
case 8100:
val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
& ADP_DP_CS_2_NRD_MLR_MASK;
break;
default:
return -EINVAL;
}
val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
switch (lanes) {
case 1:
break;
case 2:
val |= DP_COMMON_CAP_2_LANES;
break;
case 4:
val |= DP_COMMON_CAP_4_LANES;
break;
default:
return -EINVAL;
}
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_granularity() - Return granularity for the bandwidth values
* @port: DP IN adapter
*
* Reads the programmed granularity from @port. If the DP IN adapter does
* not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
* errno in other error cases.
*/
int usb4_dp_port_granularity(struct tb_port *port)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ADP_DP_CS_2_GR_MASK;
val >>= ADP_DP_CS_2_GR_SHIFT;
switch (val) {
case ADP_DP_CS_2_GR_0_25G:
return 250;
case ADP_DP_CS_2_GR_0_5G:
return 500;
case ADP_DP_CS_2_GR_1G:
return 1000;
}
return -EINVAL;
}
/**
* usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
* @port: DP IN adapter
* @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
*
* Sets the granularity used with the estimated, allocated and requested
* bandwidth. Returns %0 in success and negative errno otherwise. If the
* adapter does not support this %-EOPNOTSUPP is returned.
*/
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_GR_MASK;
switch (granularity) {
case 250:
val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
break;
case 500:
val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
break;
case 1000:
val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
break;
default:
return -EINVAL;
}
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
* @port: DP IN adapter
* @bw: Estimated bandwidth in Mb/s.
*
* Sets the estimated bandwidth to @bw. Set the granularity by calling
* usb4_dp_port_set_granularity() before calling this. The @bw is round
* down to the closest granularity multiplier. Returns %0 in success
* and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
* the adapter does not support this.
*/
int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_allocated_bw() - Return allocated bandwidth
* @port: DP IN adapter
*
* Reads and returns allocated bandwidth for @port in Mb/s (taking into
* account the programmed granularity). Returns negative errno in case
* of error.
*/
int usb4_dp_port_allocated_bw(struct tb_port *port)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + DP_STATUS, 1);
if (ret)
return ret;
val &= DP_STATUS_ALLOCATED_BW_MASK;
val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
return val * granularity;
}
static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
{
u32 val;
int ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
if (ack)
val |= ADP_DP_CS_2_CA;
else
val &= ~ADP_DP_CS_2_CA;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
{
return __usb4_dp_port_set_cm_ack(port, true);
}
static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
int timeout_msec)
{
ktime_t end;
u32 val;
int ret;
ret = __usb4_dp_port_set_cm_ack(port, false);
if (ret)
return ret;
end = ktime_add_ms(ktime_get(), timeout_msec);
do {
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_8, 1);
if (ret)
return ret;
if (!(val & ADP_DP_CS_8_DR))
break;
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
if (val & ADP_DP_CS_8_DR)
return -ETIMEDOUT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_CA;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_allocate_bw() - Set allocated bandwidth
* @port: DP IN adapter
* @bw: New allocated bandwidth in Mb/s
*
* Communicates the new allocated bandwidth with the DPCD (graphics
* driver). Takes into account the programmed granularity. Returns %0 in
* success and negative errno in case of error.
*/
int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + DP_STATUS, 1);
if (ret)
return ret;
val &= ~DP_STATUS_ALLOCATED_BW_MASK;
val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + DP_STATUS, 1);
if (ret)
return ret;
ret = usb4_dp_port_set_cm_ack(port);
if (ret)
return ret;
return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
}
/**
* usb4_dp_port_requested_bw() - Read requested bandwidth
* @port: DP IN adapter
*
* Reads the DPCD (graphics driver) requested bandwidth and returns it
* in Mb/s. Takes the programmed granularity into account. In case of
* error returns negative errno. Specifically returns %-EOPNOTSUPP if
* the adapter does not support bandwidth allocation mode, and %ENODATA
* if there is no active bandwidth request from the graphics driver.
*/
int usb4_dp_port_requested_bw(struct tb_port *port)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_8, 1);
if (ret)
return ret;
if (!(val & ADP_DP_CS_8_DR))
return -ENODATA;
return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
}