All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] Fix MST with downstream branches
@ 2015-12-18 22:14 Harry Wentland
  2015-12-18 22:14 ` [PATCH 1/2] drm/dp/mst: process broadcast messages correctly Harry Wentland
  2015-12-18 22:14 ` [PATCH 2/2] drm/dp/mst: always send reply for UP request Harry Wentland
  0 siblings, 2 replies; 7+ messages in thread
From: Harry Wentland @ 2015-12-18 22:14 UTC (permalink / raw)
  To: dri-devel

Two MST patches that we discovered during MST bringup
with two daisy-chained MST displays.

Mykola Lysenko (2):
  drm/dp/mst: process broadcast messages correctly
  drm/dp/mst: always send reply for UP request

 drivers/gpu/drm/drm_dp_mst_topology.c | 125 ++++++++++++++++++++++++++--------
 include/drm/drm_dp_mst_helper.h       |   2 -
 2 files changed, 95 insertions(+), 32 deletions(-)

-- 
2.1.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/2] drm/dp/mst: process broadcast messages correctly
  2015-12-18 22:14 [PATCH 0/2] Fix MST with downstream branches Harry Wentland
@ 2015-12-18 22:14 ` Harry Wentland
  2015-12-18 22:14 ` [PATCH 2/2] drm/dp/mst: always send reply for UP request Harry Wentland
  1 sibling, 0 replies; 7+ messages in thread
From: Harry Wentland @ 2015-12-18 22:14 UTC (permalink / raw)
  To: dri-devel; +Cc: Mykola Lysenko

From: Mykola Lysenko <Mykola.Lysenko@amd.com>

In case broadcast message received in UP request,
RAD cannot be used to identify message originator.
Message should be parsed, originator should be found
by GUID from parsed message.

Also reply with broadcast in case broadcast message
received (for now it is always broadcast)

Signed-off-by: Mykola Lysenko <Mykola.Lysenko@amd.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 95 +++++++++++++++++++++++++++++++----
 1 file changed, 84 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 809959d56d78..3baa95c1b14b 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1210,6 +1210,50 @@ out:
 	return mstb;
 }
 
+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+	struct drm_dp_mst_branch *mstb,
+	uint8_t *guid)
+{
+	struct drm_dp_mst_branch *found_mstb;
+	struct drm_dp_mst_port *port;
+
+	list_for_each_entry(port, &mstb->ports, next) {
+		if (!port->mstb)
+			continue;
+
+		if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
+			return port->mstb;
+
+		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+
+		if (found_mstb)
+			return found_mstb;
+	}
+
+	return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
+	struct drm_dp_mst_topology_mgr *mgr,
+	uint8_t *guid)
+{
+	struct drm_dp_mst_branch *mstb;
+
+	/* find the port by iterating down */
+	mutex_lock(&mgr->lock);
+
+	if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
+		mstb = mgr->mst_primary;
+	else
+		mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+
+	if (mstb)
+		kref_get(&mstb->kref);
+
+	mutex_unlock(&mgr->lock);
+	return mstb;
+}
+
 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
 					       struct drm_dp_mst_branch *mstb)
 {
@@ -1320,6 +1364,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
 				  struct drm_dp_sideband_msg_tx *txmsg)
 {
 	struct drm_dp_mst_branch *mstb = txmsg->dst;
+	u8 req_type;
 
 	/* both msg slots are full */
 	if (txmsg->seqno == -1) {
@@ -1336,7 +1381,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
 			txmsg->seqno = 1;
 		mstb->tx_slots[txmsg->seqno] = txmsg;
 	}
-	hdr->broadcast = 0;
+
+	req_type = txmsg->msg[0] & 0x7f;
+	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
+		req_type == DP_RESOURCE_STATUS_NOTIFY)
+		hdr->broadcast = 1;
+	else
+		hdr->broadcast = 0;
 	hdr->path_msg = txmsg->path_msg;
 	hdr->lct = mstb->lct;
 	hdr->lcr = mstb->lct - 1;
@@ -2145,28 +2196,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
 
 	if (mgr->up_req_recv.have_eomt) {
 		struct drm_dp_sideband_msg_req_body msg;
-		struct drm_dp_mst_branch *mstb;
+		struct drm_dp_mst_branch *mstb = NULL;
 		bool seqno;
-		mstb = drm_dp_get_mst_branch_device(mgr,
-						    mgr->up_req_recv.initial_hdr.lct,
-						    mgr->up_req_recv.initial_hdr.rad);
-		if (!mstb) {
-			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
-			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
-			return 0;
+
+		if (!mgr->up_req_recv.initial_hdr.broadcast) {
+			mstb = drm_dp_get_mst_branch_device(mgr,
+							    mgr->up_req_recv.initial_hdr.lct,
+							    mgr->up_req_recv.initial_hdr.rad);
+			if (!mstb) {
+				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+				return 0;
+			}
 		}
 
 		seqno = mgr->up_req_recv.initial_hdr.seqno;
 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
 
 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
-			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+
+			if (!mstb)
+				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
+
+			if (!mstb) {
+				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+				return 0;
+			}
+
 			drm_dp_update_port(mstb, &msg.u.conn_stat);
 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
 			(*mgr->cbs->hotplug)(mgr);
 
 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
-			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
+			if (!mstb)
+				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
+
+			if (!mstb) {
+				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+				return 0;
+			}
+
 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
 		}
 
-- 
2.1.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] drm/dp/mst: always send reply for UP request
  2015-12-18 22:14 [PATCH 0/2] Fix MST with downstream branches Harry Wentland
  2015-12-18 22:14 ` [PATCH 1/2] drm/dp/mst: process broadcast messages correctly Harry Wentland
@ 2015-12-18 22:14 ` Harry Wentland
  2015-12-19  3:57   ` Dave Airlie
  1 sibling, 1 reply; 7+ messages in thread
From: Harry Wentland @ 2015-12-18 22:14 UTC (permalink / raw)
  To: dri-devel; +Cc: Mykola Lysenko

From: Mykola Lysenko <Mykola.Lysenko@amd.com>

We should always send reply for UP request in order
to make downstream device clean-up resources appropriately.

Issue was that reply for UP request was sent only once.

Signed-off-by: Mykola Lysenko <Mykola.Lysenko@amd.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 30 +++++++++++-------------------
 include/drm/drm_dp_mst_helper.h       |  2 --
 2 files changed, 11 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 3baa95c1b14b..bda9be9a3087 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1489,26 +1489,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 }
 
 /* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+				       struct drm_dp_sideband_msg_tx *txmsg)
 {
-	struct drm_dp_sideband_msg_tx *txmsg;
 	int ret;
 
 	/* construct a chunk from the first msg in the tx_msg queue */
-	if (list_empty(&mgr->tx_msg_upq)) {
-		mgr->tx_up_in_progress = false;
-		return;
-	}
-
-	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
 	ret = process_single_tx_qlock(mgr, txmsg, true);
-	if (ret == 1) {
-		/* up txmsgs aren't put in slots - so free after we send it */
-		list_del(&txmsg->next);
-		kfree(txmsg);
-	} else if (ret)
+
+	if (ret != 1)
 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-	mgr->tx_up_in_progress = true;
+
+	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
 }
 
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1895,11 +1887,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
 	drm_dp_encode_up_ack_reply(txmsg, req_type);
 
 	mutex_lock(&mgr->qlock);
-	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
-	if (!mgr->tx_up_in_progress) {
-		process_single_up_tx_qlock(mgr);
-	}
+
+	process_single_up_tx_qlock(mgr, txmsg);
+
 	mutex_unlock(&mgr->qlock);
+
+	kfree(txmsg);
 	return 0;
 }
 
@@ -2809,7 +2802,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
 	mutex_init(&mgr->qlock);
 	mutex_init(&mgr->payload_lock);
 	mutex_init(&mgr->destroy_connector_lock);
-	INIT_LIST_HEAD(&mgr->tx_msg_upq);
 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 5340099741ae..006062a27639 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -450,9 +450,7 @@ struct drm_dp_mst_topology_mgr {
 	   the mstb tx_slots and txmsg->state once they are queued */
 	struct mutex qlock;
 	struct list_head tx_msg_downq;
-	struct list_head tx_msg_upq;
 	bool tx_down_in_progress;
-	bool tx_up_in_progress;
 
 	/* payload info + lock for it */
 	struct mutex payload_lock;
-- 
2.1.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] drm/dp/mst: always send reply for UP request
  2015-12-18 22:14 ` [PATCH 2/2] drm/dp/mst: always send reply for UP request Harry Wentland
@ 2015-12-19  3:57   ` Dave Airlie
  2015-12-19 16:20     ` Wentland, Harry
  0 siblings, 1 reply; 7+ messages in thread
From: Dave Airlie @ 2015-12-19  3:57 UTC (permalink / raw)
  To: Harry Wentland; +Cc: Mykola Lysenko, dri-devel

[-- Attachment #1: Type: text/plain, Size: 591 bytes --]

On 19 December 2015 at 08:14, Harry Wentland <harry.wentland@amd.com> wrote:
> From: Mykola Lysenko <Mykola.Lysenko@amd.com>
>
> We should always send reply for UP request in order
> to make downstream device clean-up resources appropriately.
>
> Issue was that reply for UP request was sent only once.

What happens though if the up reply is too big and needs to be cut into pieces,

Since the return value of process_single_tx_qlock is
-value - errno.
0 - part of msg sent
1 - all of msg sent

Does the attached patch make any difference?

It handles the queue like the down queue.

Dave.

[-- Attachment #2: 0001-drm-dp-mst-check-up-transmission-queue-in-transmitte.patch --]
[-- Type: text/x-patch, Size: 1599 bytes --]

From 5e731acaca9d58bc9fba8d34334ace09cfff04ea Mon Sep 17 00:00:00 2001
From: Dave Airlie <airlied@redhat.com>
Date: Sat, 19 Dec 2015 13:50:56 +1000
Subject: [PATCH] drm/dp/mst: check up transmission queue in transmitter work
 function.

This checks the up transmission.

Signed-off-by: Dave Airlie <airlied@redhat.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 64a0a37..fdc3216 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1448,6 +1448,7 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 		mgr->tx_up_in_progress = false;
 		return;
 	}
+	mgr->tx_up_in_progress = true;
 
 	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
 	ret = process_single_tx_qlock(mgr, txmsg, true);
@@ -1457,7 +1458,8 @@ static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
 		kfree(txmsg);
 	} else if (ret)
 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-	mgr->tx_up_in_progress = true;
+	if (list_empty(&mgr->tx_msg_upq))
+		mgr->tx_up_in_progress = false;
 }
 
 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -2680,6 +2682,8 @@ static void drm_dp_tx_work(struct work_struct *work)
 	mutex_lock(&mgr->qlock);
 	if (mgr->tx_down_in_progress)
 		process_single_down_tx_qlock(mgr);
+	if (mgr->tx_up_in_progress)
+		process_single_up_tx_qlock(mgr);
 	mutex_unlock(&mgr->qlock);
 }
 
-- 
2.5.0


[-- Attachment #3: Type: text/plain, Size: 159 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] drm/dp/mst: always send reply for UP request
  2015-12-19  3:57   ` Dave Airlie
@ 2015-12-19 16:20     ` Wentland, Harry
  2015-12-21  8:43       ` Lysenko, Mykola
  0 siblings, 1 reply; 7+ messages in thread
From: Wentland, Harry @ 2015-12-19 16:20 UTC (permalink / raw)
  To: airlied, Lysenko, Mykola; +Cc: dri-devel


[-- Attachment #1.1: Type: text/plain, Size: 953 bytes --]

Thanks, Dave.

Do you have an example when the up reply would be too big to fit into one sideband message? We don't expect that to happen.

Mykola, can you see if Dave's patch is a good alternative to your patch?

Harry



On Fri, Dec 18, 2015 at 7:57 PM -0800, "Dave Airlie" <airlied@gmail.com<mailto:airlied@gmail.com>> wrote:

On 19 December 2015 at 08:14, Harry Wentland <harry.wentland@amd.com> wrote:
> From: Mykola Lysenko <Mykola.Lysenko@amd.com>
>
> We should always send reply for UP request in order
> to make downstream device clean-up resources appropriately.
>
> Issue was that reply for UP request was sent only once.

What happens though if the up reply is too big and needs to be cut into pieces,

Since the return value of process_single_tx_qlock is
-value - errno.
0 - part of msg sent
1 - all of msg sent

Does the attached patch make any difference?

It handles the queue like the down queue.

Dave.

[-- Attachment #1.2: Type: text/html, Size: 1681 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* RE: [PATCH 2/2] drm/dp/mst: always send reply for UP request
  2015-12-19 16:20     ` Wentland, Harry
@ 2015-12-21  8:43       ` Lysenko, Mykola
  2015-12-29 21:07         ` Dave Airlie
  0 siblings, 1 reply; 7+ messages in thread
From: Lysenko, Mykola @ 2015-12-21  8:43 UTC (permalink / raw)
  To: airlied; +Cc: dri-devel


[-- Attachment #1.1: Type: text/plain, Size: 2506 bytes --]

Hi Dave,

there are few comments from my side:


1.       As UP replies also got to slots (even as mentioned in code it should not happen), we should clean them somewhere. For DOWN requests they are cleaned on DOWN reply when full reply received. However we will not receive interrupt when UP reply sent;

2.       I am not sure if DRM process DOWN requests or UP reply with length more than 1 SB currently, though it does process DOWN replies. First SB is sent right away message is submit. Left part intended to be send in drm_dp_tx_work, which is scheduled from drm_dp_mst_hpd_irq, which should be called from DP short pulse interrupt. However short pulse will not come for the part of DOWN request, nor for part of UP reply. As I understand we do not have issues right now as there is no DOWN request or UP reply longer than 1 SB;

3.       not sure if it is needed to use queue for UP replies. As soon as full UP request received, we should send full reply right away, processing AUX_DEFER just in case;

4.       If above points right, sending of MST messages bigger than 1 SB should be addressed as a separate patch, not in topic patch.

Please, let me know if points above make sense or I misunderstood code somewhere.

Mykola

From: Wentland, Harry
Sent: Sunday, December 20, 2015 12:21 AM
To: airlied@gmail.com; Lysenko, Mykola <Mykola.Lysenko@amd.com>
Cc: dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 2/2] drm/dp/mst: always send reply for UP request


Thanks, Dave.

Do you have an example when the up reply would be too big to fit into one sideband message? We don't expect that to happen.

Mykola, can you see if Dave's patch is a good alternative to your patch?

Harry


On Fri, Dec 18, 2015 at 7:57 PM -0800, "Dave Airlie" <airlied@gmail.com<mailto:airlied@gmail.com>> wrote:
On 19 December 2015 at 08:14, Harry Wentland <harry.wentland@amd.com<mailto:harry.wentland@amd.com>> wrote:
> From: Mykola Lysenko <Mykola.Lysenko@amd.com<mailto:Mykola.Lysenko@amd.com>>
>
> We should always send reply for UP request in order
> to make downstream device clean-up resources appropriately.
>
> Issue was that reply for UP request was sent only once.

What happens though if the up reply is too big and needs to be cut into pieces,

Since the return value of process_single_tx_qlock is
-value - errno.
0 - part of msg sent
1 - all of msg sent

Does the attached patch make any difference?

It handles the queue like the down queue.

Dave.

[-- Attachment #1.2: Type: text/html, Size: 10200 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] drm/dp/mst: always send reply for UP request
  2015-12-21  8:43       ` Lysenko, Mykola
@ 2015-12-29 21:07         ` Dave Airlie
  0 siblings, 0 replies; 7+ messages in thread
From: Dave Airlie @ 2015-12-29 21:07 UTC (permalink / raw)
  To: Lysenko, Mykola; +Cc: dri-devel

>
> 1.       As UP replies also got to slots (even as mentioned in code it
> should not happen), we should clean them somewhere. For DOWN requests they
> are cleaned on DOWN reply when full reply received. However we will not
> receive interrupt when UP reply sent;
>
> 2.       I am not sure if DRM process DOWN requests or UP reply with length
> more than 1 SB currently, though it does process DOWN replies. First SB is
> sent right away message is submit. Left part intended to be send in
> drm_dp_tx_work, which is scheduled from drm_dp_mst_hpd_irq, which should be
> called from DP short pulse interrupt. However short pulse will not come for
> the part of DOWN request, nor for part of UP reply. As I understand we do
> not have issues right now as there is no DOWN request or UP reply longer
> than 1 SB;
>
> 3.       not sure if it is needed to use queue for UP replies. As soon as
> full UP request received, we should send full reply right away, processing
> AUX_DEFER just in case;
>
> 4.       If above points right, sending of MST messages bigger than 1 SB
> should be addressed as a separate patch, not in topic patch.
>

Okay this makes sense to me,

Ack for all four patches to go via Alex for this merge window,

Alex can you add cc stable to these and send at least these ones
and the two latest ones.

Thanks,
Dave.
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2015-12-29 21:07 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-12-18 22:14 [PATCH 0/2] Fix MST with downstream branches Harry Wentland
2015-12-18 22:14 ` [PATCH 1/2] drm/dp/mst: process broadcast messages correctly Harry Wentland
2015-12-18 22:14 ` [PATCH 2/2] drm/dp/mst: always send reply for UP request Harry Wentland
2015-12-19  3:57   ` Dave Airlie
2015-12-19 16:20     ` Wentland, Harry
2015-12-21  8:43       ` Lysenko, Mykola
2015-12-29 21:07         ` Dave Airlie

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.