linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction."
@ 2020-04-23 16:42 Lyude Paul
  2020-04-23 17:09 ` Sean Paul
  2020-04-23 17:53 ` Harry Wentland
  0 siblings, 2 replies; 4+ messages in thread
From: Lyude Paul @ 2020-04-23 16:42 UTC (permalink / raw)
  To: dri-devel
  Cc: Wayne Lin, Sean Paul, Maarten Lankhorst, Maxime Ripard,
	Thomas Zimmermann, David Airlie, Daniel Vetter, Wayne Lin,
	linux-kernel

This reverts commit 6bb0942e8f46863a745489cce27efe5be2a3885e.

Unfortunately it would appear that the rumors we've heard of sideband
message interleaving not being very well supported are true. On the
Lenovo ThinkPad Thunderbolt 3 dock that I have, interleaved messages
appear to just get dropped:

  [drm:drm_dp_mst_wait_tx_reply [drm_kms_helper]] timedout msg send
  00000000571ddfd0 2 1
  [dp_mst] txmsg cur_offset=2 cur_len=2 seqno=1 state=SENT path_msg=1 dst=00
  [dp_mst] 	type=ENUM_PATH_RESOURCES contents:
  [dp_mst] 		port=2

DP descriptor for this hub:
  OUI 90-cc-24 dev-ID SYNA3  HW-rev 1.0 SW-rev 3.12 quirks 0x0008

It would seem like as well that this is a somewhat well known issue in
the field. From section 5.4.2 of the DisplayPort 2.0 specification:

  There are MST Sink/Branch devices in the field that do not handle
  interleaved message transactions.

  To facilitate message transaction handling by downstream devices, an
  MST Source device shall generate message transactions in an atomic
  manner (i.e., the MST Source device shall not concurrently interleave
  multiple message transactions). Therefore, an MST Source device shall
  clear the Message_Sequence_No value in the Sideband_MSG_Header to 0.

  MST Source devices that support field policy updates by way of
  software should update the policy to forego the generation of
  interleaved message transactions.

This is a bit disappointing, as features like HDCP require that we send
a sideband request every ~2 seconds for each active stream. However,
there isn't really anything in the specification that allows us to
accurately probe for interleaved messages.

If it ends up being that we -really- need this in the future, we might
be able to whitelist hubs where interleaving is known to work-or maybe
try some sort of heuristics. But for now, let's just play it safe and
not use it.

Signed-off-by: Lyude Paul <lyude@redhat.com>
Fixes: 6bb0942e8f46 ("drm/dp_mst: Remove single tx msg restriction.")
Cc: Wayne Lin <Wayne.Lin@amd.com>
Cc: Sean Paul <seanpaul@chromium.org>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
 include/drm/drm_dp_mst_helper.h       |  5 +++++
 2 files changed, 17 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 21f10ceb3d6c..03a1496f6120 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1205,6 +1205,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
 			mstb->tx_slots[txmsg->seqno] = NULL;
 		}
+		mgr->is_waiting_for_dwn_reply = false;
+
 	}
 out:
 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1214,6 +1216,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 	}
 	mutex_unlock(&mgr->qlock);
 
+	drm_dp_mst_kick_tx(mgr);
 	return ret;
 }
 
@@ -2789,9 +2792,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 	ret = process_single_tx_qlock(mgr, txmsg, false);
 	if (ret == 1) {
 		/* txmsg is sent it should be in the slots now */
+		mgr->is_waiting_for_dwn_reply = true;
 		list_del(&txmsg->next);
 	} else if (ret) {
 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+		mgr->is_waiting_for_dwn_reply = false;
 		list_del(&txmsg->next);
 		if (txmsg->seqno != -1)
 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2831,7 +2836,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
 	}
 
-	if (list_is_singular(&mgr->tx_msg_downq))
+	if (list_is_singular(&mgr->tx_msg_downq) &&
+	    !mgr->is_waiting_for_dwn_reply)
 		process_single_down_tx_qlock(mgr);
 	mutex_unlock(&mgr->qlock);
 }
@@ -3823,6 +3829,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 	mutex_lock(&mgr->qlock);
 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
 	mstb->tx_slots[seqno] = NULL;
+	mgr->is_waiting_for_dwn_reply = false;
 	mutex_unlock(&mgr->qlock);
 
 	wake_up_all(&mgr->tx_waitq);
@@ -3830,6 +3837,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 	return 0;
 
 out_clear_reply:
+	mutex_lock(&mgr->qlock);
+	mgr->is_waiting_for_dwn_reply = false;
+	mutex_unlock(&mgr->qlock);
 	if (msg)
 		memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
 out:
@@ -4683,7 +4693,7 @@ static void drm_dp_tx_work(struct work_struct *work)
 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
 	mutex_lock(&mgr->qlock);
-	if (!list_empty(&mgr->tx_msg_downq))
+	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
 		process_single_down_tx_qlock(mgr);
 	mutex_unlock(&mgr->qlock);
 }
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 2d7c26592c05..96bcf33c03d3 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -592,6 +592,11 @@ struct drm_dp_mst_topology_mgr {
 	 */
 	bool payload_id_table_cleared : 1;
 
+	/**
+	 * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
+	 */
+	bool is_waiting_for_dwn_reply : 1;
+
 	/**
 	 * @mst_primary: Pointer to the primary/first branch device.
 	 */
-- 
2.25.3


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction."
  2020-04-23 16:42 [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction." Lyude Paul
@ 2020-04-23 17:09 ` Sean Paul
  2020-04-23 17:53 ` Harry Wentland
  1 sibling, 0 replies; 4+ messages in thread
From: Sean Paul @ 2020-04-23 17:09 UTC (permalink / raw)
  To: Lyude Paul
  Cc: dri-devel, Wayne Lin, Sean Paul, Maarten Lankhorst,
	Maxime Ripard, Thomas Zimmermann, David Airlie, Daniel Vetter,
	Wayne Lin, LKML

On Thu, Apr 23, 2020 at 12:42 PM Lyude Paul <lyude@redhat.com> wrote:
>
> This reverts commit 6bb0942e8f46863a745489cce27efe5be2a3885e.
>
> Unfortunately it would appear that the rumors we've heard of sideband
> message interleaving not being very well supported are true. On the
> Lenovo ThinkPad Thunderbolt 3 dock that I have, interleaved messages
> appear to just get dropped:
>
>   [drm:drm_dp_mst_wait_tx_reply [drm_kms_helper]] timedout msg send
>   00000000571ddfd0 2 1
>   [dp_mst] txmsg cur_offset=2 cur_len=2 seqno=1 state=SENT path_msg=1 dst=00
>   [dp_mst]      type=ENUM_PATH_RESOURCES contents:
>   [dp_mst]              port=2
>
> DP descriptor for this hub:
>   OUI 90-cc-24 dev-ID SYNA3  HW-rev 1.0 SW-rev 3.12 quirks 0x0008
>
> It would seem like as well that this is a somewhat well known issue in
> the field. From section 5.4.2 of the DisplayPort 2.0 specification:
>
>   There are MST Sink/Branch devices in the field that do not handle
>   interleaved message transactions.
>
>   To facilitate message transaction handling by downstream devices, an
>   MST Source device shall generate message transactions in an atomic
>   manner (i.e., the MST Source device shall not concurrently interleave
>   multiple message transactions). Therefore, an MST Source device shall
>   clear the Message_Sequence_No value in the Sideband_MSG_Header to 0.
>
>   MST Source devices that support field policy updates by way of
>   software should update the policy to forego the generation of
>   interleaved message transactions.
>
> This is a bit disappointing, as features like HDCP require that we send
> a sideband request every ~2 seconds for each active stream. However,
> there isn't really anything in the specification that allows us to
> accurately probe for interleaved messages.
>
> If it ends up being that we -really- need this in the future, we might
> be able to whitelist hubs where interleaving is known to work-or maybe
> try some sort of heuristics. But for now, let's just play it safe and
> not use it.
>
> Signed-off-by: Lyude Paul <lyude@redhat.com>
> Fixes: 6bb0942e8f46 ("drm/dp_mst: Remove single tx msg restriction.")
> Cc: Wayne Lin <Wayne.Lin@amd.com>
> Cc: Sean Paul <seanpaul@chromium.org>

Bummer :-(

Thanks for digging into this!

Reviewed-by: Sean Paul <sean@poorly.run>

> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
>  include/drm/drm_dp_mst_helper.h       |  5 +++++
>  2 files changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 21f10ceb3d6c..03a1496f6120 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -1205,6 +1205,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
>                     txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
>                         mstb->tx_slots[txmsg->seqno] = NULL;
>                 }
> +               mgr->is_waiting_for_dwn_reply = false;
> +
>         }
>  out:
>         if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
> @@ -1214,6 +1216,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
>         }
>         mutex_unlock(&mgr->qlock);
>
> +       drm_dp_mst_kick_tx(mgr);
>         return ret;
>  }
>
> @@ -2789,9 +2792,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
>         ret = process_single_tx_qlock(mgr, txmsg, false);
>         if (ret == 1) {
>                 /* txmsg is sent it should be in the slots now */
> +               mgr->is_waiting_for_dwn_reply = true;
>                 list_del(&txmsg->next);
>         } else if (ret) {
>                 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> +               mgr->is_waiting_for_dwn_reply = false;
>                 list_del(&txmsg->next);
>                 if (txmsg->seqno != -1)
>                         txmsg->dst->tx_slots[txmsg->seqno] = NULL;
> @@ -2831,7 +2836,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
>                 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
>         }
>
> -       if (list_is_singular(&mgr->tx_msg_downq))
> +       if (list_is_singular(&mgr->tx_msg_downq) &&
> +           !mgr->is_waiting_for_dwn_reply)
>                 process_single_down_tx_qlock(mgr);
>         mutex_unlock(&mgr->qlock);
>  }
> @@ -3823,6 +3829,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
>         mutex_lock(&mgr->qlock);
>         txmsg->state = DRM_DP_SIDEBAND_TX_RX;
>         mstb->tx_slots[seqno] = NULL;
> +       mgr->is_waiting_for_dwn_reply = false;
>         mutex_unlock(&mgr->qlock);
>
>         wake_up_all(&mgr->tx_waitq);
> @@ -3830,6 +3837,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
>         return 0;
>
>  out_clear_reply:
> +       mutex_lock(&mgr->qlock);
> +       mgr->is_waiting_for_dwn_reply = false;
> +       mutex_unlock(&mgr->qlock);
>         if (msg)
>                 memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
>  out:
> @@ -4683,7 +4693,7 @@ static void drm_dp_tx_work(struct work_struct *work)
>         struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
>
>         mutex_lock(&mgr->qlock);
> -       if (!list_empty(&mgr->tx_msg_downq))
> +       if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
>                 process_single_down_tx_qlock(mgr);
>         mutex_unlock(&mgr->qlock);
>  }
> diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
> index 2d7c26592c05..96bcf33c03d3 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -592,6 +592,11 @@ struct drm_dp_mst_topology_mgr {
>          */
>         bool payload_id_table_cleared : 1;
>
> +       /**
> +        * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
> +        */
> +       bool is_waiting_for_dwn_reply : 1;
> +
>         /**
>          * @mst_primary: Pointer to the primary/first branch device.
>          */
> --
> 2.25.3
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction."
  2020-04-23 16:42 [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction." Lyude Paul
  2020-04-23 17:09 ` Sean Paul
@ 2020-04-23 17:53 ` Harry Wentland
  2020-04-24  2:49   ` Lin, Wayne
  1 sibling, 1 reply; 4+ messages in thread
From: Harry Wentland @ 2020-04-23 17:53 UTC (permalink / raw)
  To: Lyude Paul, dri-devel
  Cc: David Airlie, linux-kernel, Sean Paul, Thomas Zimmermann,
	Wayne Lin, Wayne Lin

On 2020-04-23 12:42 p.m., Lyude Paul wrote:
> This reverts commit 6bb0942e8f46863a745489cce27efe5be2a3885e.
> 
> Unfortunately it would appear that the rumors we've heard of sideband
> message interleaving not being very well supported are true. On the
> Lenovo ThinkPad Thunderbolt 3 dock that I have, interleaved messages
> appear to just get dropped:
> 
>   [drm:drm_dp_mst_wait_tx_reply [drm_kms_helper]] timedout msg send
>   00000000571ddfd0 2 1
>   [dp_mst] txmsg cur_offset=2 cur_len=2 seqno=1 state=SENT path_msg=1 dst=00
>   [dp_mst] 	type=ENUM_PATH_RESOURCES contents:
>   [dp_mst] 		port=2
> 
> DP descriptor for this hub:
>   OUI 90-cc-24 dev-ID SYNA3  HW-rev 1.0 SW-rev 3.12 quirks 0x0008
> 
> It would seem like as well that this is a somewhat well known issue in
> the field. From section 5.4.2 of the DisplayPort 2.0 specification:
> 
>   There are MST Sink/Branch devices in the field that do not handle
>   interleaved message transactions.
> 
>   To facilitate message transaction handling by downstream devices, an
>   MST Source device shall generate message transactions in an atomic
>   manner (i.e., the MST Source device shall not concurrently interleave
>   multiple message transactions). Therefore, an MST Source device shall
>   clear the Message_Sequence_No value in the Sideband_MSG_Header to 0.
> 
>   MST Source devices that support field policy updates by way of
>   software should update the policy to forego the generation of
>   interleaved message transactions.
> 
> This is a bit disappointing, as features like HDCP require that we send
> a sideband request every ~2 seconds for each active stream. However,
> there isn't really anything in the specification that allows us to
> accurately probe for interleaved messages.
> 
> If it ends up being that we -really- need this in the future, we might
> be able to whitelist hubs where interleaving is known to work-or maybe
> try some sort of heuristics. But for now, let's just play it safe and
> not use it.
> 

Sounds like the DP spec would need an addition bit to indicate actual
support of interleaved messages by the RX.

Acked-by: Harry Wentland <harry.wentland@amd.com>

Harry

> Signed-off-by: Lyude Paul <lyude@redhat.com>
> Fixes: 6bb0942e8f46 ("drm/dp_mst: Remove single tx msg restriction.")
> Cc: Wayne Lin <Wayne.Lin@amd.com>
> Cc: Sean Paul <seanpaul@chromium.org>
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
>  include/drm/drm_dp_mst_helper.h       |  5 +++++
>  2 files changed, 17 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 21f10ceb3d6c..03a1496f6120 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -1205,6 +1205,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
>  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
>  			mstb->tx_slots[txmsg->seqno] = NULL;
>  		}
> +		mgr->is_waiting_for_dwn_reply = false;
> +
>  	}
>  out:
>  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
> @@ -1214,6 +1216,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
>  	}
>  	mutex_unlock(&mgr->qlock);
>  
> +	drm_dp_mst_kick_tx(mgr);
>  	return ret;
>  }
>  
> @@ -2789,9 +2792,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
>  	ret = process_single_tx_qlock(mgr, txmsg, false);
>  	if (ret == 1) {
>  		/* txmsg is sent it should be in the slots now */
> +		mgr->is_waiting_for_dwn_reply = true;
>  		list_del(&txmsg->next);
>  	} else if (ret) {
>  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> +		mgr->is_waiting_for_dwn_reply = false;
>  		list_del(&txmsg->next);
>  		if (txmsg->seqno != -1)
>  			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
> @@ -2831,7 +2836,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
>  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
>  	}
>  
> -	if (list_is_singular(&mgr->tx_msg_downq))
> +	if (list_is_singular(&mgr->tx_msg_downq) &&
> +	    !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> @@ -3823,6 +3829,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
>  	mutex_lock(&mgr->qlock);
>  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
>  	mstb->tx_slots[seqno] = NULL;
> +	mgr->is_waiting_for_dwn_reply = false;
>  	mutex_unlock(&mgr->qlock);
>  
>  	wake_up_all(&mgr->tx_waitq);
> @@ -3830,6 +3837,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
>  	return 0;
>  
>  out_clear_reply:
> +	mutex_lock(&mgr->qlock);
> +	mgr->is_waiting_for_dwn_reply = false;
> +	mutex_unlock(&mgr->qlock);
>  	if (msg)
>  		memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
>  out:
> @@ -4683,7 +4693,7 @@ static void drm_dp_tx_work(struct work_struct *work)
>  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
>  
>  	mutex_lock(&mgr->qlock);
> -	if (!list_empty(&mgr->tx_msg_downq))
> +	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
> index 2d7c26592c05..96bcf33c03d3 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -592,6 +592,11 @@ struct drm_dp_mst_topology_mgr {
>  	 */
>  	bool payload_id_table_cleared : 1;
>  
> +	/**
> +	 * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
> +	 */
> +	bool is_waiting_for_dwn_reply : 1;
> +
>  	/**
>  	 * @mst_primary: Pointer to the primary/first branch device.
>  	 */
> 

^ permalink raw reply	[flat|nested] 4+ messages in thread

* RE: [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction."
  2020-04-23 17:53 ` Harry Wentland
@ 2020-04-24  2:49   ` Lin, Wayne
  0 siblings, 0 replies; 4+ messages in thread
From: Lin, Wayne @ 2020-04-24  2:49 UTC (permalink / raw)
  To: Wentland, Harry, Lyude Paul, dri-devel
  Cc: David Airlie, linux-kernel, Sean Paul, Thomas Zimmermann

[AMD Public Use]



> -----Original Message-----
> From: Wentland, Harry <Harry.Wentland@amd.com>
> Sent: Friday, April 24, 2020 1:53 AM
> To: Lyude Paul <lyude@redhat.com>; dri-devel@lists.freedesktop.org
> Cc: David Airlie <airlied@linux.ie>; linux-kernel@vger.kernel.org; Sean Paul
> <seanpaul@chromium.org>; Thomas Zimmermann <tzimmermann@suse.de>;
> Lin, Wayne <Wayne.Lin@amd.com>; Lin, Wayne <Wayne.Lin@amd.com>
> Subject: Re: [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction."
> 
> On 2020-04-23 12:42 p.m., Lyude Paul wrote:
> > This reverts commit 6bb0942e8f46863a745489cce27efe5be2a3885e.
> >
> > Unfortunately it would appear that the rumors we've heard of sideband
> > message interleaving not being very well supported are true. On the
> > Lenovo ThinkPad Thunderbolt 3 dock that I have, interleaved messages
> > appear to just get dropped:
> >
> >   [drm:drm_dp_mst_wait_tx_reply [drm_kms_helper]] timedout msg send
> >   00000000571ddfd0 2 1
> >   [dp_mst] txmsg cur_offset=2 cur_len=2 seqno=1 state=SENT path_msg=1
> dst=00
> >   [dp_mst] 	type=ENUM_PATH_RESOURCES contents:
> >   [dp_mst] 		port=2
> >
> > DP descriptor for this hub:
> >   OUI 90-cc-24 dev-ID SYNA3  HW-rev 1.0 SW-rev 3.12 quirks 0x0008
> >
> > It would seem like as well that this is a somewhat well known issue in
> > the field. From section 5.4.2 of the DisplayPort 2.0 specification:
> >
> >   There are MST Sink/Branch devices in the field that do not handle
> >   interleaved message transactions.
> >
> >   To facilitate message transaction handling by downstream devices, an
> >   MST Source device shall generate message transactions in an atomic
> >   manner (i.e., the MST Source device shall not concurrently interleave
> >   multiple message transactions). Therefore, an MST Source device shall
> >   clear the Message_Sequence_No value in the Sideband_MSG_Header to
> 0.
> >
> >   MST Source devices that support field policy updates by way of
> >   software should update the policy to forego the generation of
> >   interleaved message transactions.
> >
Hi Paul,

Appreciate for your time!
Didn't notice it on DP 2.0 spec before :)

Acked-by: Wayne Lin <wayne.lin@amd.com>

> > This is a bit disappointing, as features like HDCP require that we
> > send a sideband request every ~2 seconds for each active stream.
> > However, there isn't really anything in the specification that allows
> > us to accurately probe for interleaved messages.
> >
> > If it ends up being that we -really- need this in the future, we might
> > be able to whitelist hubs where interleaving is known to work-or maybe
> > try some sort of heuristics. But for now, let's just play it safe and
> > not use it.
> >
> 
> Sounds like the DP spec would need an addition bit to indicate actual support
> of interleaved messages by the RX.
> 
> Acked-by: Harry Wentland <harry.wentland@amd.com>
> 
> Harry
> 
> > Signed-off-by: Lyude Paul <lyude@redhat.com>
> > Fixes: 6bb0942e8f46 ("drm/dp_mst: Remove single tx msg restriction.")
> > Cc: Wayne Lin <Wayne.Lin@amd.com>
> > Cc: Sean Paul <seanpaul@chromium.org>
> > ---
> >  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
> >  include/drm/drm_dp_mst_helper.h       |  5 +++++
> >  2 files changed, 17 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> > b/drivers/gpu/drm/drm_dp_mst_topology.c
> > index 21f10ceb3d6c..03a1496f6120 100644
> > --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> > @@ -1205,6 +1205,8 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
> >  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
> >  			mstb->tx_slots[txmsg->seqno] = NULL;
> >  		}
> > +		mgr->is_waiting_for_dwn_reply = false;
> > +
> >  	}
> >  out:
> >  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@
> > -1214,6 +1216,7 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
> >  	}
> >  	mutex_unlock(&mgr->qlock);
> >
> > +	drm_dp_mst_kick_tx(mgr);
> >  	return ret;
> >  }
> >
> > @@ -2789,9 +2792,11 @@ static void process_single_down_tx_qlock(struct
> drm_dp_mst_topology_mgr *mgr)
> >  	ret = process_single_tx_qlock(mgr, txmsg, false);
> >  	if (ret == 1) {
> >  		/* txmsg is sent it should be in the slots now */
> > +		mgr->is_waiting_for_dwn_reply = true;
> >  		list_del(&txmsg->next);
> >  	} else if (ret) {
> >  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> > +		mgr->is_waiting_for_dwn_reply = false;
> >  		list_del(&txmsg->next);
> >  		if (txmsg->seqno != -1)
> >  			txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2831,7
> +2836,8 @@
> > static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
> >  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
> >  	}
> >
> > -	if (list_is_singular(&mgr->tx_msg_downq))
> > +	if (list_is_singular(&mgr->tx_msg_downq) &&
> > +	    !mgr->is_waiting_for_dwn_reply)
> >  		process_single_down_tx_qlock(mgr);
> >  	mutex_unlock(&mgr->qlock);
> >  }
> > @@ -3823,6 +3829,7 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
> >  	mutex_lock(&mgr->qlock);
> >  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
> >  	mstb->tx_slots[seqno] = NULL;
> > +	mgr->is_waiting_for_dwn_reply = false;
> >  	mutex_unlock(&mgr->qlock);
> >
> >  	wake_up_all(&mgr->tx_waitq);
> > @@ -3830,6 +3837,9 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
> >  	return 0;
> >
> >  out_clear_reply:
> > +	mutex_lock(&mgr->qlock);
> > +	mgr->is_waiting_for_dwn_reply = false;
> > +	mutex_unlock(&mgr->qlock);
> >  	if (msg)
> >  		memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
> >  out:
> > @@ -4683,7 +4693,7 @@ static void drm_dp_tx_work(struct work_struct
> *work)
> >  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct
> > drm_dp_mst_topology_mgr, tx_work);
> >
> >  	mutex_lock(&mgr->qlock);
> > -	if (!list_empty(&mgr->tx_msg_downq))
> > +	if (!list_empty(&mgr->tx_msg_downq) &&
> > +!mgr->is_waiting_for_dwn_reply)
> >  		process_single_down_tx_qlock(mgr);
> >  	mutex_unlock(&mgr->qlock);
> >  }
> > diff --git a/include/drm/drm_dp_mst_helper.h
> > b/include/drm/drm_dp_mst_helper.h index 2d7c26592c05..96bcf33c03d3
> > 100644
> > --- a/include/drm/drm_dp_mst_helper.h
> > +++ b/include/drm/drm_dp_mst_helper.h
> > @@ -592,6 +592,11 @@ struct drm_dp_mst_topology_mgr {
> >  	 */
> >  	bool payload_id_table_cleared : 1;
> >
> > +	/**
> > +	 * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
> > +	 */
> > +	bool is_waiting_for_dwn_reply : 1;
> > +
> >  	/**
> >  	 * @mst_primary: Pointer to the primary/first branch device.
> >  	 */
> >

--
Best regards,
Wayne Lin

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-04-24  2:49 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-23 16:42 [PATCH] Revert "drm/dp_mst: Remove single tx msg restriction." Lyude Paul
2020-04-23 17:09 ` Sean Paul
2020-04-23 17:53 ` Harry Wentland
2020-04-24  2:49   ` Lin, Wayne

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).