aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tunnel.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/tunnel.c')
-rw-r--r--drivers/thunderbolt/tunnel.c96
1 files changed, 40 insertions, 56 deletions
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6fffb2c82d3d13..cb6609a56a03f9 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
@@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
-/* max_bw is rounded up to next granularity */
+/**
+ * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
+ * @tunnel: DP tunnel to check
+ * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
+ *
+ * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
- int *max_bw)
+ int *max_bw_rounded)
{
struct tb_port *in = tunnel->src_port;
- int ret, rate, lanes, nrd_bw;
+ int ret, rate, lanes, max_bw;
u32 cap;
/*
@@ -947,41 +953,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
- if (tb_dp_is_uhbr_rate(rate)) {
- /*
- * When UHBR is used there is no reduction in lanes so
- * we can use this directly.
- */
- lanes = tb_dp_cap_get_lanes(cap);
- } else {
- /*
- * If there is no UHBR supported then check the
- * non-reduced rate and lanes.
- */
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
- if (ret)
- return ret;
- }
+ lanes = tb_dp_cap_get_lanes(cap);
- nrd_bw = tb_dp_bandwidth(rate, lanes);
+ max_bw = tb_dp_bandwidth(rate, lanes);
- if (max_bw) {
+ if (max_bw_rounded) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
- *max_bw = roundup(nrd_bw, ret);
+ *max_bw_rounded = roundup(max_bw, ret);
}
- return nrd_bw;
+ return max_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
@@ -995,13 +986,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -1015,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
@@ -1023,20 +1013,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
+ &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -1053,26 +1044,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int max_bw, ret, tmp;
+ int max_bw_rounded, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, out)) {
- tmp = min(*alloc_down, max_bw);
+ if (tb_tunnel_direction_downstream(tunnel)) {
+ tmp = min(*alloc_down, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
- tmp = min(*alloc_up, max_bw);
+ tmp = min(*alloc_up, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
@@ -1150,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
- struct tb_port *in = tunnel->src_port;
int ret;
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1174,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
- struct tb_port *in = tunnel->src_port;
- const struct tb_switch *sw = in->sw;
+ const struct tb_switch *sw = tunnel->src_port->sw;
u32 rate = 0, lanes = 0;
int ret;
@@ -1196,17 +1184,13 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
+ * reduced one). According to VESA spec, the DPRX
+ * negotiation shall compete in 5 seconds after tunnel
+ * established. We give it 100ms extra just in case.
*/
- ret = tb_dp_wait_dprx(tunnel, 150);
- if (ret) {
- if (ret == -ETIMEDOUT)
- ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
- &rate, &lanes);
- if (ret)
- return ret;
- }
+ ret = tb_dp_wait_dprx(tunnel, 5100);
+ if (ret)
+ return ret;
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
if (ret)
return ret;
@@ -1221,7 +1205,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {