dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time
@ 2020-01-13  9:36 Wayne Lin
  2020-01-13 17:59 ` Lyude Paul
  2020-01-15 21:58 ` Lyude Paul
  0 siblings, 2 replies; 5+ messages in thread
From: Wayne Lin @ 2020-01-13  9:36 UTC (permalink / raw)
  To: dri-devel, amd-gfx; +Cc: jerry.zuo, Nicholas.Kazlauskas, Wayne Lin

[Why]
Noticed this while testing MST with the 4 ports MST hub from
StarTech.com. Sometimes can't light up monitors normally and get the
error message as 'sideband msg build failed'.

Look into aux transactions, found out that source sometimes will send
out another down request before receiving the down reply of the
previous down request. On the other hand, in drm_dp_get_one_sb_msg(),
current code doesn't handle the interleaved replies case. Hence, source
can't build up message completely and can't light up monitors.

[How]
For good compatibility, enforce source to send out one down request at a
time. Add a flag, is_waiting_for_dwn_reply, to determine if the source
can send out a down request immediately or not.

- Check the flag before calling process_single_down_tx_qlock to send out
a msg
- Set the flag when successfully send out a down request
- Clear the flag when successfully build up a down reply
- Clear the flag when find erros during sending out a down request
- Clear the flag when find errors during building up a down reply
- Clear the flag when timeout occurs during waiting for a down reply
- Use drm_dp_mst_kick_tx() to try to send another down request in queue
at the end of drm_dp_mst_wait_tx_reply() (attempt to send out messages
in queue when errors occur)

Cc: Lyude Paul <lyude@redhat.com>
Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
 include/drm/drm_dp_mst_helper.h       |  6 ++++++
 2 files changed, 18 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 4395d5cc0645..3542af15387a 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1195,6 +1195,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
 			mstb->tx_slots[txmsg->seqno] = NULL;
 		}
+		mgr->is_waiting_for_dwn_reply = false;
+
 	}
 out:
 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1204,6 +1206,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
 	}
 	mutex_unlock(&mgr->qlock);
 
+	drm_dp_mst_kick_tx(mgr);
 	return ret;
 }
 
@@ -2770,9 +2773,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 	ret = process_single_tx_qlock(mgr, txmsg, false);
 	if (ret == 1) {
 		/* txmsg is sent it should be in the slots now */
+		mgr->is_waiting_for_dwn_reply = true;
 		list_del(&txmsg->next);
 	} else if (ret) {
 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+		mgr->is_waiting_for_dwn_reply = false;
 		list_del(&txmsg->next);
 		if (txmsg->seqno != -1)
 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
@@ -2812,7 +2817,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
 	}
 
-	if (list_is_singular(&mgr->tx_msg_downq))
+	if (list_is_singular(&mgr->tx_msg_downq) &&
+	    !mgr->is_waiting_for_dwn_reply)
 		process_single_down_tx_qlock(mgr);
 	mutex_unlock(&mgr->qlock);
 }
@@ -3743,6 +3749,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 	mutex_lock(&mgr->qlock);
 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
 	mstb->tx_slots[slot] = NULL;
+	mgr->is_waiting_for_dwn_reply = false;
 	mutex_unlock(&mgr->qlock);
 
 	wake_up_all(&mgr->tx_waitq);
@@ -3752,6 +3759,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
 no_msg:
 	drm_dp_mst_topology_put_mstb(mstb);
 clear_down_rep_recv:
+	mutex_lock(&mgr->qlock);
+	mgr->is_waiting_for_dwn_reply = false;
+	mutex_unlock(&mgr->qlock);
 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
 
 	return 0;
@@ -4591,7 +4601,7 @@ static void drm_dp_tx_work(struct work_struct *work)
 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
 
 	mutex_lock(&mgr->qlock);
-	if (!list_empty(&mgr->tx_msg_downq))
+	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
 		process_single_down_tx_qlock(mgr);
 	mutex_unlock(&mgr->qlock);
 }
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 942575de86a0..d0b45468135a 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -610,6 +610,12 @@ struct drm_dp_mst_topology_mgr {
 	 * &drm_dp_sideband_msg_tx.state once they are queued
 	 */
 	struct mutex qlock;
+
+	/**
+	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+	 */
+	bool is_waiting_for_dwn_reply;
+
 	/**
 	 * @tx_msg_downq: List of pending down replies.
 	 */
-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time
  2020-01-13  9:36 [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time Wayne Lin
@ 2020-01-13 17:59 ` Lyude Paul
  2020-01-14  3:01   ` Lin, Wayne
  2020-01-15 21:58 ` Lyude Paul
  1 sibling, 1 reply; 5+ messages in thread
From: Lyude Paul @ 2020-01-13 17:59 UTC (permalink / raw)
  To: Wayne Lin, dri-devel, amd-gfx; +Cc: jerry.zuo, Nicholas.Kazlauskas

Hey! Haven't taken a look at this patch yet but just wanted to let you know
I'm going to try to get to most of the stuff you've got pending for me. I came
down with a really nasty cold last week so sorry if you've had any other
patches waiting until now!

On Mon, 2020-01-13 at 17:36 +0800, Wayne Lin wrote:
> [Why]
> Noticed this while testing MST with the 4 ports MST hub from
> StarTech.com. Sometimes can't light up monitors normally and get the
> error message as 'sideband msg build failed'.
> 
> Look into aux transactions, found out that source sometimes will send
> out another down request before receiving the down reply of the
> previous down request. On the other hand, in drm_dp_get_one_sb_msg(),
> current code doesn't handle the interleaved replies case. Hence, source
> can't build up message completely and can't light up monitors.
> 
> [How]
> For good compatibility, enforce source to send out one down request at a
> time. Add a flag, is_waiting_for_dwn_reply, to determine if the source
> can send out a down request immediately or not.
> 
> - Check the flag before calling process_single_down_tx_qlock to send out
> a msg
> - Set the flag when successfully send out a down request
> - Clear the flag when successfully build up a down reply
> - Clear the flag when find erros during sending out a down request
> - Clear the flag when find errors during building up a down reply
> - Clear the flag when timeout occurs during waiting for a down reply
> - Use drm_dp_mst_kick_tx() to try to send another down request in queue
> at the end of drm_dp_mst_wait_tx_reply() (attempt to send out messages
> in queue when errors occur)
> 
> Cc: Lyude Paul <lyude@redhat.com>
> Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
>  include/drm/drm_dp_mst_helper.h       |  6 ++++++
>  2 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 4395d5cc0645..3542af15387a 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -1195,6 +1195,8 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
>  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
>  			mstb->tx_slots[txmsg->seqno] = NULL;
>  		}
> +		mgr->is_waiting_for_dwn_reply = false;
> +
>  	}
>  out:
>  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
> @@ -1204,6 +1206,7 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
>  	}
>  	mutex_unlock(&mgr->qlock);
>  
> +	drm_dp_mst_kick_tx(mgr);
>  	return ret;
>  }
>  
> @@ -2770,9 +2773,11 @@ static void process_single_down_tx_qlock(struct
> drm_dp_mst_topology_mgr *mgr)
>  	ret = process_single_tx_qlock(mgr, txmsg, false);
>  	if (ret == 1) {
>  		/* txmsg is sent it should be in the slots now */
> +		mgr->is_waiting_for_dwn_reply = true;
>  		list_del(&txmsg->next);
>  	} else if (ret) {
>  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> +		mgr->is_waiting_for_dwn_reply = false;
>  		list_del(&txmsg->next);
>  		if (txmsg->seqno != -1)
>  			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
> @@ -2812,7 +2817,8 @@ static void drm_dp_queue_down_tx(struct
> drm_dp_mst_topology_mgr *mgr,
>  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
>  	}
>  
> -	if (list_is_singular(&mgr->tx_msg_downq))
> +	if (list_is_singular(&mgr->tx_msg_downq) &&
> +	    !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> @@ -3743,6 +3749,7 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
>  	mutex_lock(&mgr->qlock);
>  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
>  	mstb->tx_slots[slot] = NULL;
> +	mgr->is_waiting_for_dwn_reply = false;
>  	mutex_unlock(&mgr->qlock);
>  
>  	wake_up_all(&mgr->tx_waitq);
> @@ -3752,6 +3759,9 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
>  no_msg:
>  	drm_dp_mst_topology_put_mstb(mstb);
>  clear_down_rep_recv:
> +	mutex_lock(&mgr->qlock);
> +	mgr->is_waiting_for_dwn_reply = false;
> +	mutex_unlock(&mgr->qlock);
>  	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
>  
>  	return 0;
> @@ -4591,7 +4601,7 @@ static void drm_dp_tx_work(struct work_struct *work)
>  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct
> drm_dp_mst_topology_mgr, tx_work);
>  
>  	mutex_lock(&mgr->qlock);
> -	if (!list_empty(&mgr->tx_msg_downq))
> +	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> diff --git a/include/drm/drm_dp_mst_helper.h
> b/include/drm/drm_dp_mst_helper.h
> index 942575de86a0..d0b45468135a 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -610,6 +610,12 @@ struct drm_dp_mst_topology_mgr {
>  	 * &drm_dp_sideband_msg_tx.state once they are queued
>  	 */
>  	struct mutex qlock;
> +
> +	/**
> +	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down
> reply
> +	 */
> +	bool is_waiting_for_dwn_reply;
> +
>  	/**
>  	 * @tx_msg_downq: List of pending down replies.
>  	 */
-- 
Cheers,
	Lyude Paul

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time
  2020-01-13 17:59 ` Lyude Paul
@ 2020-01-14  3:01   ` Lin, Wayne
  0 siblings, 0 replies; 5+ messages in thread
From: Lin, Wayne @ 2020-01-14  3:01 UTC (permalink / raw)
  To: Lyude Paul, dri-devel, amd-gfx; +Cc: Zuo, Jerry, Kazlauskas, Nicholas

[AMD Public Use]

Thanks for your time and hope you get well soon!

-----Original Message-----
From: Lyude Paul <lyude@redhat.com> 
Sent: Tuesday, January 14, 2020 1:59 AM
To: Lin, Wayne <Wayne.Lin@amd.com>; dri-devel@lists.freedesktop.org; amd-gfx@lists.freedesktop.org
Cc: Kazlauskas, Nicholas <Nicholas.Kazlauskas@amd.com>; Wentland, Harry <Harry.Wentland@amd.com>; Zuo, Jerry <Jerry.Zuo@amd.com>
Subject: Re: [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time

Hey! Haven't taken a look at this patch yet but just wanted to let you know I'm going to try to get to most of the stuff you've got pending for me. I came down with a really nasty cold last week so sorry if you've had any other patches waiting until now!

On Mon, 2020-01-13 at 17:36 +0800, Wayne Lin wrote:
> [Why]
> Noticed this while testing MST with the 4 ports MST hub from 
> StarTech.com. Sometimes can't light up monitors normally and get the 
> error message as 'sideband msg build failed'.
> 
> Look into aux transactions, found out that source sometimes will send 
> out another down request before receiving the down reply of the 
> previous down request. On the other hand, in drm_dp_get_one_sb_msg(), 
> current code doesn't handle the interleaved replies case. Hence, 
> source can't build up message completely and can't light up monitors.
> 
> [How]
> For good compatibility, enforce source to send out one down request at 
> a time. Add a flag, is_waiting_for_dwn_reply, to determine if the 
> source can send out a down request immediately or not.
> 
> - Check the flag before calling process_single_down_tx_qlock to send 
> out a msg
> - Set the flag when successfully send out a down request
> - Clear the flag when successfully build up a down reply
> - Clear the flag when find erros during sending out a down request
> - Clear the flag when find errors during building up a down reply
> - Clear the flag when timeout occurs during waiting for a down reply
> - Use drm_dp_mst_kick_tx() to try to send another down request in 
> queue at the end of drm_dp_mst_wait_tx_reply() (attempt to send out 
> messages in queue when errors occur)
> 
> Cc: Lyude Paul <lyude@redhat.com>
> Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
>  include/drm/drm_dp_mst_helper.h       |  6 ++++++
>  2 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 4395d5cc0645..3542af15387a 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -1195,6 +1195,8 @@ static int drm_dp_mst_wait_tx_reply(struct 
> drm_dp_mst_branch *mstb,
>  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
>  			mstb->tx_slots[txmsg->seqno] = NULL;
>  		}
> +		mgr->is_waiting_for_dwn_reply = false;
> +
>  	}
>  out:
>  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ 
> -1204,6 +1206,7 @@ static int drm_dp_mst_wait_tx_reply(struct 
> drm_dp_mst_branch *mstb,
>  	}
>  	mutex_unlock(&mgr->qlock);
>  
> +	drm_dp_mst_kick_tx(mgr);
>  	return ret;
>  }
>  
> @@ -2770,9 +2773,11 @@ static void process_single_down_tx_qlock(struct
> drm_dp_mst_topology_mgr *mgr)
>  	ret = process_single_tx_qlock(mgr, txmsg, false);
>  	if (ret == 1) {
>  		/* txmsg is sent it should be in the slots now */
> +		mgr->is_waiting_for_dwn_reply = true;
>  		list_del(&txmsg->next);
>  	} else if (ret) {
>  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> +		mgr->is_waiting_for_dwn_reply = false;
>  		list_del(&txmsg->next);
>  		if (txmsg->seqno != -1)
>  			txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2812,7 +2817,8 @@ 
> static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
>  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
>  	}
>  
> -	if (list_is_singular(&mgr->tx_msg_downq))
> +	if (list_is_singular(&mgr->tx_msg_downq) &&
> +	    !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> @@ -3743,6 +3749,7 @@ static int drm_dp_mst_handle_down_rep(struct 
> drm_dp_mst_topology_mgr *mgr)
>  	mutex_lock(&mgr->qlock);
>  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
>  	mstb->tx_slots[slot] = NULL;
> +	mgr->is_waiting_for_dwn_reply = false;
>  	mutex_unlock(&mgr->qlock);
>  
>  	wake_up_all(&mgr->tx_waitq);
> @@ -3752,6 +3759,9 @@ static int drm_dp_mst_handle_down_rep(struct 
> drm_dp_mst_topology_mgr *mgr)
>  no_msg:
>  	drm_dp_mst_topology_put_mstb(mstb);
>  clear_down_rep_recv:
> +	mutex_lock(&mgr->qlock);
> +	mgr->is_waiting_for_dwn_reply = false;
> +	mutex_unlock(&mgr->qlock);
>  	memset(&mgr->down_rep_recv, 0, sizeof(struct 
> drm_dp_sideband_msg_rx));
>  
>  	return 0;
> @@ -4591,7 +4601,7 @@ static void drm_dp_tx_work(struct work_struct *work)
>  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct 
> drm_dp_mst_topology_mgr, tx_work);
>  
>  	mutex_lock(&mgr->qlock);
> -	if (!list_empty(&mgr->tx_msg_downq))
> +	if (!list_empty(&mgr->tx_msg_downq) && 
> +!mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> diff --git a/include/drm/drm_dp_mst_helper.h 
> b/include/drm/drm_dp_mst_helper.h index 942575de86a0..d0b45468135a 
> 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -610,6 +610,12 @@ struct drm_dp_mst_topology_mgr {
>  	 * &drm_dp_sideband_msg_tx.state once they are queued
>  	 */
>  	struct mutex qlock;
> +
> +	/**
> +	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down
> reply
> +	 */
> +	bool is_waiting_for_dwn_reply;
> +
>  	/**
>  	 * @tx_msg_downq: List of pending down replies.
>  	 */
> --
> Cheers,
>	Lyude Paul
--
Best regards,
Wayne Lin
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time
  2020-01-13  9:36 [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time Wayne Lin
  2020-01-13 17:59 ` Lyude Paul
@ 2020-01-15 21:58 ` Lyude Paul
  2020-01-16  2:09   ` Lin, Wayne
  1 sibling, 1 reply; 5+ messages in thread
From: Lyude Paul @ 2020-01-15 21:58 UTC (permalink / raw)
  To: Wayne Lin, dri-devel, amd-gfx; +Cc: jerry.zuo, Nicholas.Kazlauskas

Reviewed-by: Lyude Paul <lyude@redhat.com>

I will push this to drm-misc-fixes in just a moment, thanks!

On Mon, 2020-01-13 at 17:36 +0800, Wayne Lin wrote:
> [Why]
> Noticed this while testing MST with the 4 ports MST hub from
> StarTech.com. Sometimes can't light up monitors normally and get the
> error message as 'sideband msg build failed'.
> 
> Look into aux transactions, found out that source sometimes will send
> out another down request before receiving the down reply of the
> previous down request. On the other hand, in drm_dp_get_one_sb_msg(),
> current code doesn't handle the interleaved replies case. Hence, source
> can't build up message completely and can't light up monitors.
> 
> [How]
> For good compatibility, enforce source to send out one down request at a
> time. Add a flag, is_waiting_for_dwn_reply, to determine if the source
> can send out a down request immediately or not.
> 
> - Check the flag before calling process_single_down_tx_qlock to send out
> a msg
> - Set the flag when successfully send out a down request
> - Clear the flag when successfully build up a down reply
> - Clear the flag when find erros during sending out a down request
> - Clear the flag when find errors during building up a down reply
> - Clear the flag when timeout occurs during waiting for a down reply
> - Use drm_dp_mst_kick_tx() to try to send another down request in queue
> at the end of drm_dp_mst_wait_tx_reply() (attempt to send out messages
> in queue when errors occur)
> 
> Cc: Lyude Paul <lyude@redhat.com>
> Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
>  include/drm/drm_dp_mst_helper.h       |  6 ++++++
>  2 files changed, 18 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 4395d5cc0645..3542af15387a 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -1195,6 +1195,8 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
>  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
>  			mstb->tx_slots[txmsg->seqno] = NULL;
>  		}
> +		mgr->is_waiting_for_dwn_reply = false;
> +
>  	}
>  out:
>  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
> @@ -1204,6 +1206,7 @@ static int drm_dp_mst_wait_tx_reply(struct
> drm_dp_mst_branch *mstb,
>  	}
>  	mutex_unlock(&mgr->qlock);
>  
> +	drm_dp_mst_kick_tx(mgr);
>  	return ret;
>  }
>  
> @@ -2770,9 +2773,11 @@ static void process_single_down_tx_qlock(struct
> drm_dp_mst_topology_mgr *mgr)
>  	ret = process_single_tx_qlock(mgr, txmsg, false);
>  	if (ret == 1) {
>  		/* txmsg is sent it should be in the slots now */
> +		mgr->is_waiting_for_dwn_reply = true;
>  		list_del(&txmsg->next);
>  	} else if (ret) {
>  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> +		mgr->is_waiting_for_dwn_reply = false;
>  		list_del(&txmsg->next);
>  		if (txmsg->seqno != -1)
>  			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
> @@ -2812,7 +2817,8 @@ static void drm_dp_queue_down_tx(struct
> drm_dp_mst_topology_mgr *mgr,
>  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
>  	}
>  
> -	if (list_is_singular(&mgr->tx_msg_downq))
> +	if (list_is_singular(&mgr->tx_msg_downq) &&
> +	    !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> @@ -3743,6 +3749,7 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
>  	mutex_lock(&mgr->qlock);
>  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
>  	mstb->tx_slots[slot] = NULL;
> +	mgr->is_waiting_for_dwn_reply = false;
>  	mutex_unlock(&mgr->qlock);
>  
>  	wake_up_all(&mgr->tx_waitq);
> @@ -3752,6 +3759,9 @@ static int drm_dp_mst_handle_down_rep(struct
> drm_dp_mst_topology_mgr *mgr)
>  no_msg:
>  	drm_dp_mst_topology_put_mstb(mstb);
>  clear_down_rep_recv:
> +	mutex_lock(&mgr->qlock);
> +	mgr->is_waiting_for_dwn_reply = false;
> +	mutex_unlock(&mgr->qlock);
>  	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
>  
>  	return 0;
> @@ -4591,7 +4601,7 @@ static void drm_dp_tx_work(struct work_struct *work)
>  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct
> drm_dp_mst_topology_mgr, tx_work);
>  
>  	mutex_lock(&mgr->qlock);
> -	if (!list_empty(&mgr->tx_msg_downq))
> +	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
>  		process_single_down_tx_qlock(mgr);
>  	mutex_unlock(&mgr->qlock);
>  }
> diff --git a/include/drm/drm_dp_mst_helper.h
> b/include/drm/drm_dp_mst_helper.h
> index 942575de86a0..d0b45468135a 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -610,6 +610,12 @@ struct drm_dp_mst_topology_mgr {
>  	 * &drm_dp_sideband_msg_tx.state once they are queued
>  	 */
>  	struct mutex qlock;
> +
> +	/**
> +	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down
> reply
> +	 */
> +	bool is_waiting_for_dwn_reply;
> +
>  	/**
>  	 * @tx_msg_downq: List of pending down replies.
>  	 */
-- 
Cheers,
	Lyude Paul

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

* RE: [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time
  2020-01-15 21:58 ` Lyude Paul
@ 2020-01-16  2:09   ` Lin, Wayne
  0 siblings, 0 replies; 5+ messages in thread
From: Lin, Wayne @ 2020-01-16  2:09 UTC (permalink / raw)
  To: Lyude Paul, dri-devel, amd-gfx; +Cc: Zuo, Jerry, Kazlauskas, Nicholas

[AMD Public Use]

Appreciate for your time.
Thanks!

> -----Original Message-----
> From: Lyude Paul <lyude@redhat.com>
> Sent: Thursday, January 16, 2020 5:58 AM
> To: Lin, Wayne <Wayne.Lin@amd.com>; dri-devel@lists.freedesktop.org;
> amd-gfx@lists.freedesktop.org
> Cc: Kazlauskas, Nicholas <Nicholas.Kazlauskas@amd.com>; Wentland, Harry
> <Harry.Wentland@amd.com>; Zuo, Jerry <Jerry.Zuo@amd.com>
> Subject: Re: [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time
> 
> Reviewed-by: Lyude Paul <lyude@redhat.com>
> 
> I will push this to drm-misc-fixes in just a moment, thanks!
> 
> On Mon, 2020-01-13 at 17:36 +0800, Wayne Lin wrote:
> > [Why]
> > Noticed this while testing MST with the 4 ports MST hub from
> > StarTech.com. Sometimes can't light up monitors normally and get the
> > error message as 'sideband msg build failed'.
> >
> > Look into aux transactions, found out that source sometimes will send
> > out another down request before receiving the down reply of the
> > previous down request. On the other hand, in drm_dp_get_one_sb_msg(),
> > current code doesn't handle the interleaved replies case. Hence,
> > source can't build up message completely and can't light up monitors.
> >
> > [How]
> > For good compatibility, enforce source to send out one down request at
> > a time. Add a flag, is_waiting_for_dwn_reply, to determine if the
> > source can send out a down request immediately or not.
> >
> > - Check the flag before calling process_single_down_tx_qlock to send
> > out a msg
> > - Set the flag when successfully send out a down request
> > - Clear the flag when successfully build up a down reply
> > - Clear the flag when find erros during sending out a down request
> > - Clear the flag when find errors during building up a down reply
> > - Clear the flag when timeout occurs during waiting for a down reply
> > - Use drm_dp_mst_kick_tx() to try to send another down request in
> > queue at the end of drm_dp_mst_wait_tx_reply() (attempt to send out
> > messages in queue when errors occur)
> >
> > Cc: Lyude Paul <lyude@redhat.com>
> > Signed-off-by: Wayne Lin <Wayne.Lin@amd.com>
> > ---
> >  drivers/gpu/drm/drm_dp_mst_topology.c | 14 ++++++++++++--
> >  include/drm/drm_dp_mst_helper.h       |  6 ++++++
> >  2 files changed, 18 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> > b/drivers/gpu/drm/drm_dp_mst_topology.c
> > index 4395d5cc0645..3542af15387a 100644
> > --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> > @@ -1195,6 +1195,8 @@ static int drm_dp_mst_wait_tx_reply(struct
> > drm_dp_mst_branch *mstb,
> >  		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
> >  			mstb->tx_slots[txmsg->seqno] = NULL;
> >  		}
> > +		mgr->is_waiting_for_dwn_reply = false;
> > +
> >  	}
> >  out:
> >  	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@
> > -1204,6 +1206,7 @@ static int drm_dp_mst_wait_tx_reply(struct
> > drm_dp_mst_branch *mstb,
> >  	}
> >  	mutex_unlock(&mgr->qlock);
> >
> > +	drm_dp_mst_kick_tx(mgr);
> >  	return ret;
> >  }
> >
> > @@ -2770,9 +2773,11 @@ static void process_single_down_tx_qlock(struct
> > drm_dp_mst_topology_mgr *mgr)
> >  	ret = process_single_tx_qlock(mgr, txmsg, false);
> >  	if (ret == 1) {
> >  		/* txmsg is sent it should be in the slots now */
> > +		mgr->is_waiting_for_dwn_reply = true;
> >  		list_del(&txmsg->next);
> >  	} else if (ret) {
> >  		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
> > +		mgr->is_waiting_for_dwn_reply = false;
> >  		list_del(&txmsg->next);
> >  		if (txmsg->seqno != -1)
> >  			txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2812,7
> +2817,8 @@
> > static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
> >  		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
> >  	}
> >
> > -	if (list_is_singular(&mgr->tx_msg_downq))
> > +	if (list_is_singular(&mgr->tx_msg_downq) &&
> > +	    !mgr->is_waiting_for_dwn_reply)
> >  		process_single_down_tx_qlock(mgr);
> >  	mutex_unlock(&mgr->qlock);
> >  }
> > @@ -3743,6 +3749,7 @@ static int drm_dp_mst_handle_down_rep(struct
> > drm_dp_mst_topology_mgr *mgr)
> >  	mutex_lock(&mgr->qlock);
> >  	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
> >  	mstb->tx_slots[slot] = NULL;
> > +	mgr->is_waiting_for_dwn_reply = false;
> >  	mutex_unlock(&mgr->qlock);
> >
> >  	wake_up_all(&mgr->tx_waitq);
> > @@ -3752,6 +3759,9 @@ static int drm_dp_mst_handle_down_rep(struct
> > drm_dp_mst_topology_mgr *mgr)
> >  no_msg:
> >  	drm_dp_mst_topology_put_mstb(mstb);
> >  clear_down_rep_recv:
> > +	mutex_lock(&mgr->qlock);
> > +	mgr->is_waiting_for_dwn_reply = false;
> > +	mutex_unlock(&mgr->qlock);
> >  	memset(&mgr->down_rep_recv, 0, sizeof(struct
> > drm_dp_sideband_msg_rx));
> >
> >  	return 0;
> > @@ -4591,7 +4601,7 @@ static void drm_dp_tx_work(struct work_struct
> *work)
> >  	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct
> > drm_dp_mst_topology_mgr, tx_work);
> >
> >  	mutex_lock(&mgr->qlock);
> > -	if (!list_empty(&mgr->tx_msg_downq))
> > +	if (!list_empty(&mgr->tx_msg_downq) &&
> > +!mgr->is_waiting_for_dwn_reply)
> >  		process_single_down_tx_qlock(mgr);
> >  	mutex_unlock(&mgr->qlock);
> >  }
> > diff --git a/include/drm/drm_dp_mst_helper.h
> > b/include/drm/drm_dp_mst_helper.h index 942575de86a0..d0b45468135a
> > 100644
> > --- a/include/drm/drm_dp_mst_helper.h
> > +++ b/include/drm/drm_dp_mst_helper.h
> > @@ -610,6 +610,12 @@ struct drm_dp_mst_topology_mgr {
> >  	 * &drm_dp_sideband_msg_tx.state once they are queued
> >  	 */
> >  	struct mutex qlock;
> > +
> > +	/**
> > +	 * @is_waiting_for_dwn_reply: indicate whether is waiting for down
> > reply
> > +	 */
> > +	bool is_waiting_for_dwn_reply;
> > +
> >  	/**
> >  	 * @tx_msg_downq: List of pending down replies.
> >  	 */
> --
> Cheers,
> 	Lyude Paul
--
Best regards,
Wayne Lin
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-01-16  2:09 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-13  9:36 [PATCH] drm/dp_mst: Have DP_Tx send one msg at a time Wayne Lin
2020-01-13 17:59 ` Lyude Paul
2020-01-14  3:01   ` Lin, Wayne
2020-01-15 21:58 ` Lyude Paul
2020-01-16  2:09   ` Lin, Wayne

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).