All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup
@ 2018-11-06 22:36 Jeykumar Sankaran
  2018-11-06 22:36 ` [PATCH v2 3/5] drm/msm/dpu: use system wq for idle power collapse Jeykumar Sankaran
       [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  0 siblings, 2 replies; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-06 22:36 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA
  Cc: hoegsberg-hpIqsD4AKlfQT0dZR+AlfA, Jeykumar Sankaran,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w

To avoid any possible work queues to msm threads, clean up
the threads after the CRTC objects are released in
config cleanup.

changes in v2:
	- fix race condition before kthread flush and stop (Sean Paul)
	- use kthread_destroy_worker for cleaning up kthread (Sean Paul)

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c | 36 +++++++++++++++++-------------------
 1 file changed, 17 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c9f7ff..e913059 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -278,6 +278,21 @@ static int msm_drm_uninit(struct device *dev)
 	 * work before drm_irq_uninstall() to avoid work re-enabling an
 	 * irq after uninstall has disabled it.
 	 */
+	msm_gem_shrinker_cleanup(ddev);
+
+	drm_kms_helper_poll_fini(ddev);
+
+	drm_dev_unregister(ddev);
+
+	msm_perf_debugfs_cleanup(priv);
+	msm_rd_debugfs_cleanup(priv);
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+	if (fbdev && priv->fbdev)
+		msm_fbdev_free(ddev);
+#endif
+	drm_mode_config_cleanup(ddev);
+
 	kthread_flush_work(&vbl_ctrl->work);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
@@ -287,33 +302,16 @@ static int msm_drm_uninit(struct device *dev)
 	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->disp_thread[i].thread) {
-			kthread_flush_worker(&priv->disp_thread[i].worker);
-			kthread_stop(priv->disp_thread[i].thread);
+			kthread_destroy_worker(&priv->disp_thread[i].worker);
 			priv->disp_thread[i].thread = NULL;
 		}
 
 		if (priv->event_thread[i].thread) {
-			kthread_flush_worker(&priv->event_thread[i].worker);
-			kthread_stop(priv->event_thread[i].thread);
+			kthread_destroy_worker(&priv->event_thread[i].worker);
 			priv->event_thread[i].thread = NULL;
 		}
 	}
 
-	msm_gem_shrinker_cleanup(ddev);
-
-	drm_kms_helper_poll_fini(ddev);
-
-	drm_dev_unregister(ddev);
-
-	msm_perf_debugfs_cleanup(priv);
-	msm_rd_debugfs_cleanup(priv);
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-	if (fbdev && priv->fbdev)
-		msm_fbdev_free(ddev);
-#endif
-	drm_mode_config_cleanup(ddev);
-
 	pm_runtime_get_sync(dev);
 	drm_irq_uninstall(ddev);
 	pm_runtime_put_sync(dev);
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2 2/5] drm/msm/dpu: use system wq for vblank events
       [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-11-06 22:36   ` Jeykumar Sankaran
  2018-11-06 22:36   ` [PATCH v2 4/5] drm/msm: clean up display thread Jeykumar Sankaran
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-06 22:36 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA
  Cc: hoegsberg-hpIqsD4AKlfQT0dZR+AlfA, Jeykumar Sankaran,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w

DPU was using one thread per display to dispatch async commits and
vblank requests. Since clean up already happened in msm to use the
common thread for all the display commits, display threads are only
used to cater vblank requests. Since a single thread is sufficient
to do the job without any performance hits, use system workqueue
to queue requests. A separate patch is submitted later in this
series to remove the display threads altogether.

changes in v2:
	- switch to system wq before removing disp threads (Sean Paul)

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c | 9 ++++-----
 drivers/gpu/drm/msm/msm_drv.h | 2 +-
 2 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e913059..7d3ca99 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -209,7 +209,7 @@ struct vblank_event {
 	bool enable;
 };
 
-static void vblank_ctrl_worker(struct kthread_work *work)
+static void vblank_ctrl_worker(struct work_struct *work)
 {
 	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
 						struct msm_vblank_ctrl, work);
@@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
-			&vbl_ctrl->work);
+	schedule_work(&vbl_ctrl->work);
 
 	return 0;
 }
@@ -293,7 +292,7 @@ static int msm_drm_uninit(struct device *dev)
 #endif
 	drm_mode_config_cleanup(ddev);
 
-	kthread_flush_work(&vbl_ctrl->work);
+	flush_work(&vbl_ctrl->work);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
 		kfree(vbl_ev);
@@ -476,7 +475,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
 	INIT_LIST_HEAD(&priv->inactive_list);
 	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
 	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(ddev);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f32..126345c4 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -78,7 +78,7 @@ enum msm_mdp_plane_property {
 };
 
 struct msm_vblank_ctrl {
-	struct kthread_work work;
+	struct work_struct work;
 	struct list_head event_list;
 	spinlock_t lock;
 };
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2 3/5] drm/msm/dpu: use system wq for idle power collapse
  2018-11-06 22:36 [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup Jeykumar Sankaran
@ 2018-11-06 22:36 ` Jeykumar Sankaran
       [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  1 sibling, 0 replies; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-06 22:36 UTC (permalink / raw)
  To: dri-devel, freedreno, linux-arm-msm; +Cc: hoegsberg, seanpaul

msm is using system wq for dispatching commit and vblank
events. Switch idle power collapse feature also to use
system wq to handle delayed work handlers so that
msm can get rid of redundant display threads.

changes in v2:
	- patch introduced in v2

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 26 +++++++-------------------
 1 file changed, 7 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 82c55ef..9b3d1f2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -201,7 +201,7 @@ struct dpu_encoder_virt {
 	bool idle_pc_supported;
 	struct mutex rc_lock;
 	enum dpu_enc_rc_states rc_state;
-	struct kthread_delayed_work delayed_off_work;
+	struct delayed_work delayed_off_work;
 	struct kthread_work vsync_event_work;
 	struct msm_display_topology topology;
 	bool mode_set_complete;
@@ -740,7 +740,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 {
 	struct dpu_encoder_virt *dpu_enc;
 	struct msm_drm_private *priv;
-	struct msm_drm_thread *disp_thread;
 	bool is_vid_mode = false;
 
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
@@ -753,12 +752,6 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 	is_vid_mode = dpu_enc->disp_info.capabilities &
 						MSM_DISPLAY_CAP_VID_MODE;
 
-	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
-		DPU_ERROR("invalid crtc index\n");
-		return -EINVAL;
-	}
-	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
-
 	/*
 	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
 	 * events and return early for other events (ie wb display).
@@ -775,8 +768,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 	switch (sw_event) {
 	case DPU_ENC_RC_EVENT_KICKOFF:
 		/* cancel delayed off work, if any */
-		if (kthread_cancel_delayed_work_sync(
-				&dpu_enc->delayed_off_work))
+		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
 					sw_event);
 
@@ -835,10 +827,8 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 			return 0;
 		}
 
-		kthread_queue_delayed_work(
-			&disp_thread->worker,
-			&dpu_enc->delayed_off_work,
-			msecs_to_jiffies(dpu_enc->idle_timeout));
+		schedule_delayed_work(&dpu_enc->delayed_off_work,
+				      msecs_to_jiffies(dpu_enc->idle_timeout));
 
 		trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
 				 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
@@ -847,8 +837,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 
 	case DPU_ENC_RC_EVENT_PRE_STOP:
 		/* cancel delayed off work, if any */
-		if (kthread_cancel_delayed_work_sync(
-				&dpu_enc->delayed_off_work))
+		if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
 			DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
 					sw_event);
 
@@ -1351,7 +1340,7 @@ static void dpu_encoder_frame_done_callback(
 	}
 }
 
-static void dpu_encoder_off_work(struct kthread_work *work)
+static void dpu_encoder_off_work(struct work_struct *work)
 {
 	struct dpu_encoder_virt *dpu_enc = container_of(work,
 			struct dpu_encoder_virt, delayed_off_work.work);
@@ -2191,8 +2180,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
 
 
 	mutex_init(&dpu_enc->rc_lock);
-	kthread_init_delayed_work(&dpu_enc->delayed_off_work,
-			dpu_encoder_off_work);
+	INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, dpu_encoder_off_work);
 	dpu_enc->idle_timeout = IDLE_TIMEOUT;
 
 	kthread_init_work(&dpu_enc->vsync_event_work,
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2 4/5] drm/msm: clean up display thread
       [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-06 22:36   ` [PATCH v2 2/5] drm/msm/dpu: use system wq for vblank events Jeykumar Sankaran
@ 2018-11-06 22:36   ` Jeykumar Sankaran
  2018-11-06 22:36   ` [PATCH v2 5/5] drm/msm: subclass work object for vblank events Jeykumar Sankaran
  2018-11-07 15:42   ` [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup Sean Paul
  3 siblings, 0 replies; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-06 22:36 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA
  Cc: hoegsberg-hpIqsD4AKlfQT0dZR+AlfA, Jeykumar Sankaran,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w

Since there are no clients using these threads,
cleaning it up.

changes in v2:
	- switch all the dependent clients to use system wq
	  before removing the disp_threads (Sean Paul)

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c | 35 +----------------------------------
 drivers/gpu/drm/msm/msm_drv.h |  1 -
 2 files changed, 1 insertion(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 7d3ca99..6d6c73b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -298,13 +298,8 @@ static int msm_drm_uninit(struct device *dev)
 		kfree(vbl_ev);
 	}
 
-	/* clean up display commit/event worker threads */
+	/* clean up event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
-		if (priv->disp_thread[i].thread) {
-			kthread_destroy_worker(&priv->disp_thread[i].worker);
-			priv->disp_thread[i].thread = NULL;
-		}
-
 		if (priv->event_thread[i].thread) {
 			kthread_destroy_worker(&priv->event_thread[i].worker);
 			priv->event_thread[i].thread = NULL;
@@ -541,27 +536,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	 */
 	param.sched_priority = 16;
 	for (i = 0; i < priv->num_crtcs; i++) {
-
-		/* initialize display thread */
-		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
-		kthread_init_worker(&priv->disp_thread[i].worker);
-		priv->disp_thread[i].dev = ddev;
-		priv->disp_thread[i].thread =
-			kthread_run(kthread_worker_fn,
-				&priv->disp_thread[i].worker,
-				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
-		if (IS_ERR(priv->disp_thread[i].thread)) {
-			DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
-			priv->disp_thread[i].thread = NULL;
-			goto err_msm_uninit;
-		}
-
-		ret = sched_setscheduler(priv->disp_thread[i].thread,
-					 SCHED_FIFO, &param);
-		if (ret)
-			dev_warn(dev, "disp_thread set priority failed: %d\n",
-				 ret);
-
 		/* initialize event thread */
 		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
 		kthread_init_worker(&priv->event_thread[i].worker);
@@ -576,13 +550,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 			goto err_msm_uninit;
 		}
 
-		/**
-		 * event thread should also run at same priority as disp_thread
-		 * because it is handling frame_done events. A lower priority
-		 * event thread and higher priority disp_thread can causes
-		 * frame_pending counters beyond 2. This can lead to commit
-		 * failure at crtc commit level.
-		 */
 		ret = sched_setscheduler(priv->event_thread[i].thread,
 					 SCHED_FIFO, &param);
 		if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 126345c4..05d33a7 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -197,7 +197,6 @@ struct msm_drm_private {
 	unsigned int num_crtcs;
 	struct drm_crtc *crtcs[MAX_CRTCS];
 
-	struct msm_drm_thread disp_thread[MAX_CRTCS];
 	struct msm_drm_thread event_thread[MAX_CRTCS];
 
 	unsigned int num_encoders;
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH v2 5/5] drm/msm: subclass work object for vblank events
       [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-06 22:36   ` [PATCH v2 2/5] drm/msm/dpu: use system wq for vblank events Jeykumar Sankaran
  2018-11-06 22:36   ` [PATCH v2 4/5] drm/msm: clean up display thread Jeykumar Sankaran
@ 2018-11-06 22:36   ` Jeykumar Sankaran
       [not found]     ` <1541543790-748-5-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-07 15:42   ` [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup Sean Paul
  3 siblings, 1 reply; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-06 22:36 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA
  Cc: hoegsberg-hpIqsD4AKlfQT0dZR+AlfA, Jeykumar Sankaran,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w

msm maintains a separate structure to define vblank
work definitions and a list to track events submitted
to the workqueue. We can avoid this redundant list
and its protection mechanism, if we subclass the
work object to encapsulate vblank event parameters.

changes in v2:
	- subclass optimization on system wq (Sean Paul)

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c | 67 +++++++++++++------------------------------
 drivers/gpu/drm/msm/msm_drv.h |  7 -----
 2 files changed, 20 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 6d6c73b..8da5be2 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
 	return val;
 }
 
-struct vblank_event {
-	struct list_head node;
+struct msm_vblank_work {
+	struct work_struct work;
 	int crtc_id;
 	bool enable;
+	struct msm_drm_private *priv;
 };
 
 static void vblank_ctrl_worker(struct work_struct *work)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
-						struct msm_vblank_ctrl, work);
-	struct msm_drm_private *priv = container_of(vbl_ctrl,
-					struct msm_drm_private, vblank_ctrl);
+	struct msm_vblank_work *vbl_work = container_of(work,
+						struct msm_vblank_work, work);
+	struct msm_drm_private *priv = vbl_work->priv;
 	struct msm_kms *kms = priv->kms;
-	struct vblank_event *vbl_ev, *tmp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
-		if (vbl_ev->enable)
-			kms->funcs->enable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
-		else
-			kms->funcs->disable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
 
-		kfree(vbl_ev);
-
-		spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	}
+	if (vbl_work->enable)
+		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+	else
+		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
 
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	kfree(vbl_work);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev;
-	unsigned long flags;
+	struct msm_vblank_work *vbl_work;
 
-	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
-	if (!vbl_ev)
+	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+	if (!vbl_work)
 		return -ENOMEM;
 
-	vbl_ev->crtc_id = crtc_id;
-	vbl_ev->enable = enable;
+	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
 
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	vbl_work->crtc_id = crtc_id;
+	vbl_work->enable = enable;
+	vbl_work->priv = priv;
 
-	schedule_work(&vbl_ctrl->work);
+	schedule_work(&vbl_work->work);
 
 	return 0;
 }
@@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
 	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_mdss *mdss = priv->mdss;
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev, *tmp;
 	int i;
 
 	/* We must cancel and cleanup any pending vblank enable/disable
 	 * work before drm_irq_uninstall() to avoid work re-enabling an
 	 * irq after uninstall has disabled it.
 	 */
+
 	msm_gem_shrinker_cleanup(ddev);
 
 	drm_kms_helper_poll_fini(ddev);
@@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
 #endif
 	drm_mode_config_cleanup(ddev);
 
-	flush_work(&vbl_ctrl->work);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		kfree(vbl_ev);
-	}
-
 	/* clean up event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
 		if (priv->event_thread[i].thread) {
@@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	priv->wq = alloc_ordered_workqueue("msm", 0);
 
 	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(ddev);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 05d33a7..d4cbde2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
 	PLANE_PROP_MAX_NUM
 };
 
-struct msm_vblank_ctrl {
-	struct work_struct work;
-	struct list_head event_list;
-	spinlock_t lock;
-};
-
 #define MSM_GPU_MAX_RINGS 4
 #define MAX_H_TILES_PER_DISPLAY 2
 
@@ -225,7 +219,6 @@ struct msm_drm_private {
 	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
 
-	struct msm_vblank_ctrl vblank_ctrl;
 	struct drm_atomic_state *pm_state;
 };
 
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
       [not found]     ` <1541543790-748-5-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-11-06 23:15       ` Jordan Crouse
  2018-11-07 15:55       ` Sean Paul
  1 sibling, 0 replies; 19+ messages in thread
From: Jordan Crouse @ 2018-11-06 23:15 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
> msm maintains a separate structure to define vblank
> work definitions and a list to track events submitted
> to the workqueue. We can avoid this redundant list
> and its protection mechanism, if we subclass the
> work object to encapsulate vblank event parameters.
> 
> changes in v2:
> 	- subclass optimization on system wq (Sean Paul)
> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/msm_drv.c | 67 +++++++++++++------------------------------
>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>  2 files changed, 20 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 6d6c73b..8da5be2 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>  	return val;
>  }
>  
> -struct vblank_event {
> -	struct list_head node;
> +struct msm_vblank_work {
> +	struct work_struct work;
>  	int crtc_id;
>  	bool enable;
> +	struct msm_drm_private *priv;
>  };
>  
>  static void vblank_ctrl_worker(struct work_struct *work)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> -						struct msm_vblank_ctrl, work);
> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> -					struct msm_drm_private, vblank_ctrl);
> +	struct msm_vblank_work *vbl_work = container_of(work,
> +						struct msm_vblank_work, work);
> +	struct msm_drm_private *priv = vbl_work->priv;
>  	struct msm_kms *kms = priv->kms;
> -	struct vblank_event *vbl_ev, *tmp;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> -
> -		if (vbl_ev->enable)
> -			kms->funcs->enable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
> -		else
> -			kms->funcs->disable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
>  
> -		kfree(vbl_ev);
> -
> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	}
> +	if (vbl_work->enable)
> +		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
> +	else
> +		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
>  
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	kfree(vbl_work);
>  }
>  
>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  					int crtc_id, bool enable)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev;
> -	unsigned long flags;
> +	struct msm_vblank_work *vbl_work;
>  
> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> -	if (!vbl_ev)
> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> +	if (!vbl_work)
>  		return -ENOMEM;
>  
> -	vbl_ev->crtc_id = crtc_id;
> -	vbl_ev->enable = enable;
> +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>  
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	vbl_work->crtc_id = crtc_id;
> +	vbl_work->enable = enable;
> +	vbl_work->priv = priv;
>  
> -	schedule_work(&vbl_ctrl->work);
> +	schedule_work(&vbl_work->work);
>  
>  	return 0;
>  }
> @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>  	struct msm_drm_private *priv = ddev->dev_private;
>  	struct msm_kms *kms = priv->kms;
>  	struct msm_mdss *mdss = priv->mdss;
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev, *tmp;
>  	int i;
>  
>  	/* We must cancel and cleanup any pending vblank enable/disable
>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>  	 * irq after uninstall has disabled it.
>  	 */
> +

Uneeded new line here, but does the comment above still make any sense?  It
doesn't havec anything to do with the shrinker for sure.

>  	msm_gem_shrinker_cleanup(ddev);
>  
>  	drm_kms_helper_poll_fini(ddev);
> @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>  #endif
>  	drm_mode_config_cleanup(ddev);
>  
> -	flush_work(&vbl_ctrl->work);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		kfree(vbl_ev);
> -	}
> -
>  	/* clean up event worker threads */
>  	for (i = 0; i < priv->num_crtcs; i++) {
>  		if (priv->event_thread[i].thread) {
> @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>  
>  	INIT_LIST_HEAD(&priv->inactive_list);
> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> -	spin_lock_init(&priv->vblank_ctrl.lock);
>  
>  	drm_mode_config_init(ddev);
>  
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index 05d33a7..d4cbde2 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>  	PLANE_PROP_MAX_NUM
>  };
>  
> -struct msm_vblank_ctrl {
> -	struct work_struct work;
> -	struct list_head event_list;
> -	spinlock_t lock;
> -};
> -
>  #define MSM_GPU_MAX_RINGS 4
>  #define MAX_H_TILES_PER_DISPLAY 2
>  
> @@ -225,7 +219,6 @@ struct msm_drm_private {
>  	struct notifier_block vmap_notifier;
>  	struct shrinker shrinker;
>  
> -	struct msm_vblank_ctrl vblank_ctrl;
>  	struct drm_atomic_state *pm_state;
>  };

-- 
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup
       [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
                     ` (2 preceding siblings ...)
  2018-11-06 22:36   ` [PATCH v2 5/5] drm/msm: subclass work object for vblank events Jeykumar Sankaran
@ 2018-11-07 15:42   ` Sean Paul
  3 siblings, 0 replies; 19+ messages in thread
From: Sean Paul @ 2018-11-07 15:42 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Tue, Nov 06, 2018 at 02:36:26PM -0800, Jeykumar Sankaran wrote:
> To avoid any possible work queues to msm threads, clean up
> the threads after the CRTC objects are released in
> config cleanup.
> 
> changes in v2:
> 	- fix race condition before kthread flush and stop (Sean Paul)
> 	- use kthread_destroy_worker for cleaning up kthread (Sean Paul)
> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/msm_drv.c | 36 +++++++++++++++++-------------------
>  1 file changed, 17 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 9c9f7ff..e913059 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -278,6 +278,21 @@ static int msm_drm_uninit(struct device *dev)
>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>  	 * irq after uninstall has disabled it.
>  	 */
> +	msm_gem_shrinker_cleanup(ddev);
> +
> +	drm_kms_helper_poll_fini(ddev);
> +
> +	drm_dev_unregister(ddev);
> +
> +	msm_perf_debugfs_cleanup(priv);
> +	msm_rd_debugfs_cleanup(priv);
> +
> +#ifdef CONFIG_DRM_FBDEV_EMULATION
> +	if (fbdev && priv->fbdev)
> +		msm_fbdev_free(ddev);
> +#endif
> +	drm_mode_config_cleanup(ddev);
> +
>  	kthread_flush_work(&vbl_ctrl->work);

What happens in the vbl_ctrl thread when it runs with drm resources cleaned up?

Sean

>  	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
>  		list_del(&vbl_ev->node);
> @@ -287,33 +302,16 @@ static int msm_drm_uninit(struct device *dev)
>  	/* clean up display commit/event worker threads */
>  	for (i = 0; i < priv->num_crtcs; i++) {
>  		if (priv->disp_thread[i].thread) {
> -			kthread_flush_worker(&priv->disp_thread[i].worker);
> -			kthread_stop(priv->disp_thread[i].thread);
> +			kthread_destroy_worker(&priv->disp_thread[i].worker);
>  			priv->disp_thread[i].thread = NULL;
>  		}
>  
>  		if (priv->event_thread[i].thread) {
> -			kthread_flush_worker(&priv->event_thread[i].worker);
> -			kthread_stop(priv->event_thread[i].thread);
> +			kthread_destroy_worker(&priv->event_thread[i].worker);
>  			priv->event_thread[i].thread = NULL;
>  		}
>  	}
>  
> -	msm_gem_shrinker_cleanup(ddev);
> -
> -	drm_kms_helper_poll_fini(ddev);
> -
> -	drm_dev_unregister(ddev);
> -
> -	msm_perf_debugfs_cleanup(priv);
> -	msm_rd_debugfs_cleanup(priv);
> -
> -#ifdef CONFIG_DRM_FBDEV_EMULATION
> -	if (fbdev && priv->fbdev)
> -		msm_fbdev_free(ddev);
> -#endif
> -	drm_mode_config_cleanup(ddev);
> -
>  	pm_runtime_get_sync(dev);
>  	drm_irq_uninstall(ddev);
>  	pm_runtime_put_sync(dev);
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
       [not found]     ` <1541543790-748-5-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-06 23:15       ` Jordan Crouse
@ 2018-11-07 15:55       ` Sean Paul
  2018-11-20 22:04         ` Jeykumar Sankaran
  2018-12-06 18:56         ` Jeykumar Sankaran
  1 sibling, 2 replies; 19+ messages in thread
From: Sean Paul @ 2018-11-07 15:55 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
> msm maintains a separate structure to define vblank
> work definitions and a list to track events submitted
> to the workqueue. We can avoid this redundant list
> and its protection mechanism, if we subclass the
> work object to encapsulate vblank event parameters.
> 
> changes in v2:
> 	- subclass optimization on system wq (Sean Paul)

I wouldn't do it like this, tbh. One problem is that you've lost your flush() on
unbind, so there's no way to know if you have workers in the wild waiting to
enable/disable vblank.

Another issues is that AFAICT, we don't need a queue of enables/disables, but
rather just the last requested state (ie: should we be on or off). So things
don't need to be this complicated (and we're possibly thrashing vblank on/off
for no reason).

I'm still of the mind that you should just make this synchronous and be done
with the threads (especially since we're still uncovering/introducing races!).

Sean

> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/msm_drv.c | 67 +++++++++++++------------------------------
>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>  2 files changed, 20 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 6d6c73b..8da5be2 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>  	return val;
>  }
>  
> -struct vblank_event {
> -	struct list_head node;
> +struct msm_vblank_work {
> +	struct work_struct work;
>  	int crtc_id;
>  	bool enable;
> +	struct msm_drm_private *priv;
>  };
>  
>  static void vblank_ctrl_worker(struct work_struct *work)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> -						struct msm_vblank_ctrl, work);
> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> -					struct msm_drm_private, vblank_ctrl);
> +	struct msm_vblank_work *vbl_work = container_of(work,
> +						struct msm_vblank_work, work);
> +	struct msm_drm_private *priv = vbl_work->priv;
>  	struct msm_kms *kms = priv->kms;
> -	struct vblank_event *vbl_ev, *tmp;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> -
> -		if (vbl_ev->enable)
> -			kms->funcs->enable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
> -		else
> -			kms->funcs->disable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
>  
> -		kfree(vbl_ev);
> -
> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	}
> +	if (vbl_work->enable)
> +		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
> +	else
> +		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
>  
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	kfree(vbl_work);
>  }
>  
>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  					int crtc_id, bool enable)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev;
> -	unsigned long flags;
> +	struct msm_vblank_work *vbl_work;
>  
> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> -	if (!vbl_ev)
> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> +	if (!vbl_work)
>  		return -ENOMEM;
>  
> -	vbl_ev->crtc_id = crtc_id;
> -	vbl_ev->enable = enable;
> +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>  
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	vbl_work->crtc_id = crtc_id;
> +	vbl_work->enable = enable;
> +	vbl_work->priv = priv;
>  
> -	schedule_work(&vbl_ctrl->work);
> +	schedule_work(&vbl_work->work);
>  
>  	return 0;
>  }
> @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>  	struct msm_drm_private *priv = ddev->dev_private;
>  	struct msm_kms *kms = priv->kms;
>  	struct msm_mdss *mdss = priv->mdss;
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev, *tmp;
>  	int i;
>  
>  	/* We must cancel and cleanup any pending vblank enable/disable
>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>  	 * irq after uninstall has disabled it.
>  	 */
> +
>  	msm_gem_shrinker_cleanup(ddev);
>  
>  	drm_kms_helper_poll_fini(ddev);
> @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>  #endif
>  	drm_mode_config_cleanup(ddev);
>  
> -	flush_work(&vbl_ctrl->work);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		kfree(vbl_ev);
> -	}
> -
>  	/* clean up event worker threads */
>  	for (i = 0; i < priv->num_crtcs; i++) {
>  		if (priv->event_thread[i].thread) {
> @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>  
>  	INIT_LIST_HEAD(&priv->inactive_list);
> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> -	spin_lock_init(&priv->vblank_ctrl.lock);
>  
>  	drm_mode_config_init(ddev);
>  
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index 05d33a7..d4cbde2 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>  	PLANE_PROP_MAX_NUM
>  };
>  
> -struct msm_vblank_ctrl {
> -	struct work_struct work;
> -	struct list_head event_list;
> -	spinlock_t lock;
> -};
> -
>  #define MSM_GPU_MAX_RINGS 4
>  #define MAX_H_TILES_PER_DISPLAY 2
>  
> @@ -225,7 +219,6 @@ struct msm_drm_private {
>  	struct notifier_block vmap_notifier;
>  	struct shrinker shrinker;
>  
> -	struct msm_vblank_ctrl vblank_ctrl;
>  	struct drm_atomic_state *pm_state;
>  };
>  
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-11-07 15:55       ` Sean Paul
@ 2018-11-20 22:04         ` Jeykumar Sankaran
       [not found]           ` <86c75419b86da1a3f538638ef0004203-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-12-06 18:56         ` Jeykumar Sankaran
  1 sibling, 1 reply; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-20 22:04 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-07 07:55, Sean Paul wrote:
> On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
>> msm maintains a separate structure to define vblank
>> work definitions and a list to track events submitted
>> to the workqueue. We can avoid this redundant list
>> and its protection mechanism, if we subclass the
>> work object to encapsulate vblank event parameters.
>> 
>> changes in v2:
>> 	- subclass optimization on system wq (Sean Paul)
> 
> I wouldn't do it like this, tbh. One problem is that you've lost your
> flush() on
> unbind, so there's no way to know if you have workers in the wild 
> waiting
> to
> enable/disable vblank.
> 
> Another issues is that AFAICT, we don't need a queue of 
> enables/disables,
> but
> rather just the last requested state (ie: should we be on or off). So
> things
> don't need to be this complicated (and we're possibly thrashing vblank
> on/off
> for no reason).
> 
> I'm still of the mind that you should just make this synchronous and be
> done
> with the threads (especially since we're still uncovering/introducing
> races!).
> 
While scoping out the effort to make vblank events synchronous, I found
that the spinlock locking order of vblank request sequence and vblank 
callback
sequences are the opposite.

In DPU, drm_vblank_enable acquires vblank_time_lock before registering
the crtc to encoder which happens after acquiring encoder_spinlock. But
the vblank_callback acquires encoder_spinlock before accessing the 
registered
crtc and calling into drm_vblank_handler which tries to acquire 
vblank_time_lock.
Acquiring both vblank_time_lock and encoder_spinlock in the same thread
is leading to deadlock.

In MDP5, I see the same pattern between vblank_time_lock and list_lock 
which
is used to track the irq handlers.

I believe that explains why msm_drv is queuing the vblank enable/disable
works to WQ after acquiring vblank_time_lock.

Thanks,
Jeykumar S.

> Sean
> 
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/msm_drv.c | 67
> +++++++++++++------------------------------
>>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>>  2 files changed, 20 insertions(+), 54 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
>> index 6d6c73b..8da5be2 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>>  	return val;
>>  }
>> 
>> -struct vblank_event {
>> -	struct list_head node;
>> +struct msm_vblank_work {
>> +	struct work_struct work;
>>  	int crtc_id;
>>  	bool enable;
>> +	struct msm_drm_private *priv;
>>  };
>> 
>>  static void vblank_ctrl_worker(struct work_struct *work)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> -						struct msm_vblank_ctrl,
> work);
>> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> -					struct msm_drm_private,
> vblank_ctrl);
>> +	struct msm_vblank_work *vbl_work = container_of(work,
>> +						struct msm_vblank_work,
> work);
>> +	struct msm_drm_private *priv = vbl_work->priv;
>>  	struct msm_kms *kms = priv->kms;
>> -	struct vblank_event *vbl_ev, *tmp;
>> -	unsigned long flags;
>> -
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> -
>> -		if (vbl_ev->enable)
>> -			kms->funcs->enable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> -		else
>> -			kms->funcs->disable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> 
>> -		kfree(vbl_ev);
>> -
>> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	}
>> +	if (vbl_work->enable)
>> +		kms->funcs->enable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> +	else
>> +		kms->funcs->disable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> 
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	kfree(vbl_work);
>>  }
>> 
>>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>>  					int crtc_id, bool enable)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev;
>> -	unsigned long flags;
>> +	struct msm_vblank_work *vbl_work;
>> 
>> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> -	if (!vbl_ev)
>> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> +	if (!vbl_work)
>>  		return -ENOMEM;
>> 
>> -	vbl_ev->crtc_id = crtc_id;
>> -	vbl_ev->enable = enable;
>> +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>> 
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	vbl_work->crtc_id = crtc_id;
>> +	vbl_work->enable = enable;
>> +	vbl_work->priv = priv;
>> 
>> -	schedule_work(&vbl_ctrl->work);
>> +	schedule_work(&vbl_work->work);
>> 
>>  	return 0;
>>  }
>> @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>>  	struct msm_drm_private *priv = ddev->dev_private;
>>  	struct msm_kms *kms = priv->kms;
>>  	struct msm_mdss *mdss = priv->mdss;
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev, *tmp;
>>  	int i;
>> 
>>  	/* We must cancel and cleanup any pending vblank enable/disable
>>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>>  	 * irq after uninstall has disabled it.
>>  	 */
>> +
>>  	msm_gem_shrinker_cleanup(ddev);
>> 
>>  	drm_kms_helper_poll_fini(ddev);
>> @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>>  #endif
>>  	drm_mode_config_cleanup(ddev);
>> 
>> -	flush_work(&vbl_ctrl->work);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		kfree(vbl_ev);
>> -	}
>> -
>>  	/* clean up event worker threads */
>>  	for (i = 0; i < priv->num_crtcs; i++) {
>>  		if (priv->event_thread[i].thread) {
>> @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> drm_driver *drv)
>>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> 
>>  	INIT_LIST_HEAD(&priv->inactive_list);
>> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> -	spin_lock_init(&priv->vblank_ctrl.lock);
>> 
>>  	drm_mode_config_init(ddev);
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
>> index 05d33a7..d4cbde2 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.h
>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>>  	PLANE_PROP_MAX_NUM
>>  };
>> 
>> -struct msm_vblank_ctrl {
>> -	struct work_struct work;
>> -	struct list_head event_list;
>> -	spinlock_t lock;
>> -};
>> -
>>  #define MSM_GPU_MAX_RINGS 4
>>  #define MAX_H_TILES_PER_DISPLAY 2
>> 
>> @@ -225,7 +219,6 @@ struct msm_drm_private {
>>  	struct notifier_block vmap_notifier;
>>  	struct shrinker shrinker;
>> 
>> -	struct msm_vblank_ctrl vblank_ctrl;
>>  	struct drm_atomic_state *pm_state;
>>  };
>> 
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 
>> _______________________________________________
>> Freedreno mailing list
>> Freedreno@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
       [not found]           ` <86c75419b86da1a3f538638ef0004203-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-11-29 22:15             ` Sean Paul
  2018-11-30 19:45               ` Jeykumar Sankaran
  0 siblings, 1 reply; 19+ messages in thread
From: Sean Paul @ 2018-11-29 22:15 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: Sean Paul, robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
> On 2018-11-07 07:55, Sean Paul wrote:
> > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
> > > msm maintains a separate structure to define vblank
> > > work definitions and a list to track events submitted
> > > to the workqueue. We can avoid this redundant list
> > > and its protection mechanism, if we subclass the
> > > work object to encapsulate vblank event parameters.
> > > 
> > > changes in v2:
> > > 	- subclass optimization on system wq (Sean Paul)
> > 
> > I wouldn't do it like this, tbh. One problem is that you've lost your
> > flush() on
> > unbind, so there's no way to know if you have workers in the wild
> > waiting
> > to
> > enable/disable vblank.
> > 
> > Another issues is that AFAICT, we don't need a queue of
> > enables/disables,
> > but
> > rather just the last requested state (ie: should we be on or off). So
> > things
> > don't need to be this complicated (and we're possibly thrashing vblank
> > on/off
> > for no reason).
> > 
> > I'm still of the mind that you should just make this synchronous and be
> > done
> > with the threads (especially since we're still uncovering/introducing
> > races!).
> > 
> While scoping out the effort to make vblank events synchronous, I found
> that the spinlock locking order of vblank request sequence and vblank
> callback
> sequences are the opposite.
> 
> In DPU, drm_vblank_enable acquires vblank_time_lock before registering
> the crtc to encoder which happens after acquiring encoder_spinlock. But
> the vblank_callback acquires encoder_spinlock before accessing the
> registered
> crtc and calling into drm_vblank_handler which tries to acquire
> vblank_time_lock.
> Acquiring both vblank_time_lock and encoder_spinlock in the same thread
> is leading to deadlock.

Hmm, I'm not sure I follow. Are you seeing issues where irq overlaps with
enable/disable? I hacked in sync vblank enable/disable quickly to see if I could
reproduce what you're seeing, but things seemed well behaved.

I do see that there is a chance to call drm_handle_vblank() while holding
enc_spinlock, but couldn't find any obvious lock recursion there.

Maybe a callstack or lockdep splat would help?

Sean


Here's my hack to bypass the display thread:

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c9f7ff6960b38..5a3cac5825319e 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct kthread_work *work)
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
+	struct msm_kms *kms = priv->kms;
 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev;
 	unsigned long flags;
 
-	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
-	if (!vbl_ev)
-		return -ENOMEM;
+	spin_lock_irqsave(&vbl_ctrl->lock, flags);
 
-	vbl_ev->crtc_id = crtc_id;
-	vbl_ev->enable = enable;
+	if (enable)
+		kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+	else
+		kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
 
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
-			&vbl_ctrl->work);
-
 	return 0;
 }



> 
> In MDP5, I see the same pattern between vblank_time_lock and list_lock which
> is used to track the irq handlers.
> 
> I believe that explains why msm_drv is queuing the vblank enable/disable
> works to WQ after acquiring vblank_time_lock.
> 
> Thanks,
> Jeykumar S.
> 
> > Sean
> > 
> > > 
> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > ---
> > >  drivers/gpu/drm/msm/msm_drv.c | 67
> > +++++++++++++------------------------------
> > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
> > >  2 files changed, 20 insertions(+), 54 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > b/drivers/gpu/drm/msm/msm_drv.c
> > > index 6d6c73b..8da5be2 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
> > >  	return val;
> > >  }
> > > 
> > > -struct vblank_event {
> > > -	struct list_head node;
> > > +struct msm_vblank_work {
> > > +	struct work_struct work;
> > >  	int crtc_id;
> > >  	bool enable;
> > > +	struct msm_drm_private *priv;
> > >  };
> > > 
> > >  static void vblank_ctrl_worker(struct work_struct *work)
> > >  {
> > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> > > -						struct msm_vblank_ctrl,
> > work);
> > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> > > -					struct msm_drm_private,
> > vblank_ctrl);
> > > +	struct msm_vblank_work *vbl_work = container_of(work,
> > > +						struct msm_vblank_work,
> > work);
> > > +	struct msm_drm_private *priv = vbl_work->priv;
> > >  	struct msm_kms *kms = priv->kms;
> > > -	struct vblank_event *vbl_ev, *tmp;
> > > -	unsigned long flags;
> > > -
> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> > {
> > > -		list_del(&vbl_ev->node);
> > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > -
> > > -		if (vbl_ev->enable)
> > > -			kms->funcs->enable_vblank(kms,
> > > -
> > priv->crtcs[vbl_ev->crtc_id]);
> > > -		else
> > > -			kms->funcs->disable_vblank(kms,
> > > -
> > priv->crtcs[vbl_ev->crtc_id]);
> > > 
> > > -		kfree(vbl_ev);
> > > -
> > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > -	}
> > > +	if (vbl_work->enable)
> > > +		kms->funcs->enable_vblank(kms,
> > priv->crtcs[vbl_work->crtc_id]);
> > > +	else
> > > +		kms->funcs->disable_vblank(kms,
> > priv->crtcs[vbl_work->crtc_id]);
> > > 
> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > +	kfree(vbl_work);
> > >  }
> > > 
> > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > >  					int crtc_id, bool enable)
> > >  {
> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > -	struct vblank_event *vbl_ev;
> > > -	unsigned long flags;
> > > +	struct msm_vblank_work *vbl_work;
> > > 
> > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > -	if (!vbl_ev)
> > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> > > +	if (!vbl_work)
> > >  		return -ENOMEM;
> > > 
> > > -	vbl_ev->crtc_id = crtc_id;
> > > -	vbl_ev->enable = enable;
> > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
> > > 
> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > +	vbl_work->crtc_id = crtc_id;
> > > +	vbl_work->enable = enable;
> > > +	vbl_work->priv = priv;
> > > 
> > > -	schedule_work(&vbl_ctrl->work);
> > > +	schedule_work(&vbl_work->work);
> > > 
> > >  	return 0;
> > >  }
> > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
> > >  	struct msm_drm_private *priv = ddev->dev_private;
> > >  	struct msm_kms *kms = priv->kms;
> > >  	struct msm_mdss *mdss = priv->mdss;
> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > -	struct vblank_event *vbl_ev, *tmp;
> > >  	int i;
> > > 
> > >  	/* We must cancel and cleanup any pending vblank enable/disable
> > >  	 * work before drm_irq_uninstall() to avoid work re-enabling an
> > >  	 * irq after uninstall has disabled it.
> > >  	 */
> > > +
> > >  	msm_gem_shrinker_cleanup(ddev);
> > > 
> > >  	drm_kms_helper_poll_fini(ddev);
> > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
> > >  #endif
> > >  	drm_mode_config_cleanup(ddev);
> > > 
> > > -	flush_work(&vbl_ctrl->work);
> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> > {
> > > -		list_del(&vbl_ev->node);
> > > -		kfree(vbl_ev);
> > > -	}
> > > -
> > >  	/* clean up event worker threads */
> > >  	for (i = 0; i < priv->num_crtcs; i++) {
> > >  		if (priv->event_thread[i].thread) {
> > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> > drm_driver *drv)
> > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
> > > 
> > >  	INIT_LIST_HEAD(&priv->inactive_list);
> > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> > > -	spin_lock_init(&priv->vblank_ctrl.lock);
> > > 
> > >  	drm_mode_config_init(ddev);
> > > 
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > b/drivers/gpu/drm/msm/msm_drv.h
> > > index 05d33a7..d4cbde2 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
> > >  	PLANE_PROP_MAX_NUM
> > >  };
> > > 
> > > -struct msm_vblank_ctrl {
> > > -	struct work_struct work;
> > > -	struct list_head event_list;
> > > -	spinlock_t lock;
> > > -};
> > > -
> > >  #define MSM_GPU_MAX_RINGS 4
> > >  #define MAX_H_TILES_PER_DISPLAY 2
> > > 
> > > @@ -225,7 +219,6 @@ struct msm_drm_private {
> > >  	struct notifier_block vmap_notifier;
> > >  	struct shrinker shrinker;
> > > 
> > > -	struct msm_vblank_ctrl vblank_ctrl;
> > >  	struct drm_atomic_state *pm_state;
> > >  };
> > > 
> > > --
> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> > Forum,
> > > a Linux Foundation Collaborative Project
> > > 
> > > _______________________________________________
> > > Freedreno mailing list
> > > Freedreno@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> 
> -- 
> Jeykumar S

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-11-29 22:15             ` Sean Paul
@ 2018-11-30 19:45               ` Jeykumar Sankaran
       [not found]                 ` <126d5b3a93c1827aaf10cd64486d4967-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  0 siblings, 1 reply; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-11-30 19:45 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-29 14:15, Sean Paul wrote:
> On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
>> On 2018-11-07 07:55, Sean Paul wrote:
>> > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
>> > > msm maintains a separate structure to define vblank
>> > > work definitions and a list to track events submitted
>> > > to the workqueue. We can avoid this redundant list
>> > > and its protection mechanism, if we subclass the
>> > > work object to encapsulate vblank event parameters.
>> > >
>> > > changes in v2:
>> > > 	- subclass optimization on system wq (Sean Paul)
>> >
>> > I wouldn't do it like this, tbh. One problem is that you've lost your
>> > flush() on
>> > unbind, so there's no way to know if you have workers in the wild
>> > waiting
>> > to
>> > enable/disable vblank.
>> >
>> > Another issues is that AFAICT, we don't need a queue of
>> > enables/disables,
>> > but
>> > rather just the last requested state (ie: should we be on or off). So
>> > things
>> > don't need to be this complicated (and we're possibly thrashing vblank
>> > on/off
>> > for no reason).
>> >
>> > I'm still of the mind that you should just make this synchronous and
> be
>> > done
>> > with the threads (especially since we're still uncovering/introducing
>> > races!).
>> >
>> While scoping out the effort to make vblank events synchronous, I 
>> found
>> that the spinlock locking order of vblank request sequence and vblank
>> callback
>> sequences are the opposite.
>> 
>> In DPU, drm_vblank_enable acquires vblank_time_lock before registering
>> the crtc to encoder which happens after acquiring encoder_spinlock. 
>> But
>> the vblank_callback acquires encoder_spinlock before accessing the
>> registered
>> crtc and calling into drm_vblank_handler which tries to acquire
>> vblank_time_lock.
>> Acquiring both vblank_time_lock and encoder_spinlock in the same 
>> thread
>> is leading to deadlock.
> 
> Hmm, I'm not sure I follow. Are you seeing issues where irq overlaps 
> with
> enable/disable? I hacked in sync vblank enable/disable quickly to see 
> if I
> could
> reproduce what you're seeing, but things seemed well behaved.
> 

The race is between drm_vblank_get/put and vblank_handler contexts.

When made synchronous:

while calling drm_vblank_get, the callstack looks like below:
drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock) -> 
__enable_vblank -> dpu_crtc_vblank -> dpu_encoder_toggle_vblank_for_crtc 
(tries to acquire enc_spinlock)

In vblank handler, the call stack will be:
dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback (acquires 
enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank (tries to 
acquire vblank_time_lock)


> I do see that there is a chance to call drm_handle_vblank() while 
> holding
> enc_spinlock, but couldn't find any obvious lock recursion there.
> 
> Maybe a callstack or lockdep splat would help?
> 
> Sean
> 
> 
> Here's my hack to bypass the display thread:
> 
> diff --git a/drivers/gpu/drm/msm/msm_drv.c 
> b/drivers/gpu/drm/msm/msm_drv.c
> index 9c9f7ff6960b38..5a3cac5825319e 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct 
> kthread_work
> *work)
>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  					int crtc_id, bool enable)
>  {
> +	struct msm_kms *kms = priv->kms;
>  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev;
>  	unsigned long flags;
> 
> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> -	if (!vbl_ev)
> -		return -ENOMEM;
> +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> 
> -	vbl_ev->crtc_id = crtc_id;
> -	vbl_ev->enable = enable;
> +	if (enable)
> +		kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
> +	else
> +		kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
> 
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> 
> -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> -			&vbl_ctrl->work);
> -
>  	return 0;
>  }
> 
Even with your patch above, I see frame is getting stuck but it recovers 
in a while.
The patch I tried was assigning 
crtc->funcs->enable_vblank/disable_vblank so that
__enable_vblank can call crtc directly. But the above callstack is still
valid for your patch.

Thanks,
Jeykumar S.
> 
> 
>> 
>> In MDP5, I see the same pattern between vblank_time_lock and list_lock
> which
>> is used to track the irq handlers.
>> 
>> I believe that explains why msm_drv is queuing the vblank 
>> enable/disable
>> works to WQ after acquiring vblank_time_lock.
>> 
>> Thanks,
>> Jeykumar S.
>> 
>> > Sean
>> >
>> > >
>> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> > > ---
>> > >  drivers/gpu/drm/msm/msm_drv.c | 67
>> > +++++++++++++------------------------------
>> > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>> > >  2 files changed, 20 insertions(+), 54 deletions(-)
>> > >
>> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > b/drivers/gpu/drm/msm/msm_drv.c
>> > > index 6d6c73b..8da5be2 100644
>> > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>> > >  	return val;
>> > >  }
>> > >
>> > > -struct vblank_event {
>> > > -	struct list_head node;
>> > > +struct msm_vblank_work {
>> > > +	struct work_struct work;
>> > >  	int crtc_id;
>> > >  	bool enable;
>> > > +	struct msm_drm_private *priv;
>> > >  };
>> > >
>> > >  static void vblank_ctrl_worker(struct work_struct *work)
>> > >  {
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> > > -						struct msm_vblank_ctrl,
>> > work);
>> > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> > > -					struct msm_drm_private,
>> > vblank_ctrl);
>> > > +	struct msm_vblank_work *vbl_work = container_of(work,
>> > > +						struct msm_vblank_work,
>> > work);
>> > > +	struct msm_drm_private *priv = vbl_work->priv;
>> > >  	struct msm_kms *kms = priv->kms;
>> > > -	struct vblank_event *vbl_ev, *tmp;
>> > > -	unsigned long flags;
>> > > -
>> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> > {
>> > > -		list_del(&vbl_ev->node);
>> > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > -
>> > > -		if (vbl_ev->enable)
>> > > -			kms->funcs->enable_vblank(kms,
>> > > -
>> > priv->crtcs[vbl_ev->crtc_id]);
>> > > -		else
>> > > -			kms->funcs->disable_vblank(kms,
>> > > -
>> > priv->crtcs[vbl_ev->crtc_id]);
>> > >
>> > > -		kfree(vbl_ev);
>> > > -
>> > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > -	}
>> > > +	if (vbl_work->enable)
>> > > +		kms->funcs->enable_vblank(kms,
>> > priv->crtcs[vbl_work->crtc_id]);
>> > > +	else
>> > > +		kms->funcs->disable_vblank(kms,
>> > priv->crtcs[vbl_work->crtc_id]);
>> > >
>> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > +	kfree(vbl_work);
>> > >  }
>> > >
>> > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>> > >  					int crtc_id, bool enable)
>> > >  {
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > -	struct vblank_event *vbl_ev;
>> > > -	unsigned long flags;
>> > > +	struct msm_vblank_work *vbl_work;
>> > >
>> > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > -	if (!vbl_ev)
>> > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> > > +	if (!vbl_work)
>> > >  		return -ENOMEM;
>> > >
>> > > -	vbl_ev->crtc_id = crtc_id;
>> > > -	vbl_ev->enable = enable;
>> > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>> > >
>> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > +	vbl_work->crtc_id = crtc_id;
>> > > +	vbl_work->enable = enable;
>> > > +	vbl_work->priv = priv;
>> > >
>> > > -	schedule_work(&vbl_ctrl->work);
>> > > +	schedule_work(&vbl_work->work);
>> > >
>> > >  	return 0;
>> > >  }
>> > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>> > >  	struct msm_drm_private *priv = ddev->dev_private;
>> > >  	struct msm_kms *kms = priv->kms;
>> > >  	struct msm_mdss *mdss = priv->mdss;
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > -	struct vblank_event *vbl_ev, *tmp;
>> > >  	int i;
>> > >
>> > >  	/* We must cancel and cleanup any pending vblank enable/disable
>> > >  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>> > >  	 * irq after uninstall has disabled it.
>> > >  	 */
>> > > +
>> > >  	msm_gem_shrinker_cleanup(ddev);
>> > >
>> > >  	drm_kms_helper_poll_fini(ddev);
>> > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>> > >  #endif
>> > >  	drm_mode_config_cleanup(ddev);
>> > >
>> > > -	flush_work(&vbl_ctrl->work);
>> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> > {
>> > > -		list_del(&vbl_ev->node);
>> > > -		kfree(vbl_ev);
>> > > -	}
>> > > -
>> > >  	/* clean up event worker threads */
>> > >  	for (i = 0; i < priv->num_crtcs; i++) {
>> > >  		if (priv->event_thread[i].thread) {
>> > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev,
> struct
>> > drm_driver *drv)
>> > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> > >
>> > >  	INIT_LIST_HEAD(&priv->inactive_list);
>> > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> > > -	spin_lock_init(&priv->vblank_ctrl.lock);
>> > >
>> > >  	drm_mode_config_init(ddev);
>> > >
>> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> > b/drivers/gpu/drm/msm/msm_drv.h
>> > > index 05d33a7..d4cbde2 100644
>> > > --- a/drivers/gpu/drm/msm/msm_drv.h
>> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
>> > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>> > >  	PLANE_PROP_MAX_NUM
>> > >  };
>> > >
>> > > -struct msm_vblank_ctrl {
>> > > -	struct work_struct work;
>> > > -	struct list_head event_list;
>> > > -	spinlock_t lock;
>> > > -};
>> > > -
>> > >  #define MSM_GPU_MAX_RINGS 4
>> > >  #define MAX_H_TILES_PER_DISPLAY 2
>> > >
>> > > @@ -225,7 +219,6 @@ struct msm_drm_private {
>> > >  	struct notifier_block vmap_notifier;
>> > >  	struct shrinker shrinker;
>> > >
>> > > -	struct msm_vblank_ctrl vblank_ctrl;
>> > >  	struct drm_atomic_state *pm_state;
>> > >  };
>> > >
>> > > --
>> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
>> > Forum,
>> > > a Linux Foundation Collaborative Project
>> > >
>> > > _______________________________________________
>> > > Freedreno mailing list
>> > > Freedreno@lists.freedesktop.org
>> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
>> 
>> --
>> Jeykumar S

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
       [not found]                 ` <126d5b3a93c1827aaf10cd64486d4967-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-11-30 20:07                   ` Sean Paul
  2018-12-01  0:21                     ` Jeykumar Sankaran
  0 siblings, 1 reply; 19+ messages in thread
From: Sean Paul @ 2018-11-30 20:07 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: Sean Paul, robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Fri, Nov 30, 2018 at 11:45:55AM -0800, Jeykumar Sankaran wrote:
> On 2018-11-29 14:15, Sean Paul wrote:
> > On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
> > > On 2018-11-07 07:55, Sean Paul wrote:
> > > > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
> > > > > msm maintains a separate structure to define vblank
> > > > > work definitions and a list to track events submitted
> > > > > to the workqueue. We can avoid this redundant list
> > > > > and its protection mechanism, if we subclass the
> > > > > work object to encapsulate vblank event parameters.
> > > > >
> > > > > changes in v2:
> > > > > 	- subclass optimization on system wq (Sean Paul)
> > > >
> > > > I wouldn't do it like this, tbh. One problem is that you've lost your
> > > > flush() on
> > > > unbind, so there's no way to know if you have workers in the wild
> > > > waiting
> > > > to
> > > > enable/disable vblank.
> > > >
> > > > Another issues is that AFAICT, we don't need a queue of
> > > > enables/disables,
> > > > but
> > > > rather just the last requested state (ie: should we be on or off). So
> > > > things
> > > > don't need to be this complicated (and we're possibly thrashing vblank
> > > > on/off
> > > > for no reason).
> > > >
> > > > I'm still of the mind that you should just make this synchronous and
> > be
> > > > done
> > > > with the threads (especially since we're still uncovering/introducing
> > > > races!).
> > > >
> > > While scoping out the effort to make vblank events synchronous, I
> > > found
> > > that the spinlock locking order of vblank request sequence and vblank
> > > callback
> > > sequences are the opposite.
> > > 
> > > In DPU, drm_vblank_enable acquires vblank_time_lock before registering
> > > the crtc to encoder which happens after acquiring encoder_spinlock.
> > > But
> > > the vblank_callback acquires encoder_spinlock before accessing the
> > > registered
> > > crtc and calling into drm_vblank_handler which tries to acquire
> > > vblank_time_lock.
> > > Acquiring both vblank_time_lock and encoder_spinlock in the same
> > > thread
> > > is leading to deadlock.
> > 
> > Hmm, I'm not sure I follow. Are you seeing issues where irq overlaps
> > with
> > enable/disable? I hacked in sync vblank enable/disable quickly to see if
> > I
> > could
> > reproduce what you're seeing, but things seemed well behaved.
> > 
> 
> The race is between drm_vblank_get/put and vblank_handler contexts.
> 
> When made synchronous:
> 
> while calling drm_vblank_get, the callstack looks like below:
> drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock) ->
> __enable_vblank -> dpu_crtc_vblank -> dpu_encoder_toggle_vblank_for_crtc
> (tries to acquire enc_spinlock)
> 
> In vblank handler, the call stack will be:
> dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback (acquires
> enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank (tries to
> acquire vblank_time_lock)

Hmm, I'm not sure how this can happen. We acquire and release the enc_spinlock
before enabling the irq, yes we will hold on to the vbl_time_lock, but we
shouldn't be trying to reacquire an encoder's spinlock after we've enabled it.
I don't know how that can deadlock, since we should never be running enable and
the handler concurrently.

The only thing I can think of is that the vblank interrupts are firing after
vblank has been disabled? In that case, it seems like we should properly flush
them.

Sean


> 
> 
> > I do see that there is a chance to call drm_handle_vblank() while
> > holding
> > enc_spinlock, but couldn't find any obvious lock recursion there.
> > 
> > Maybe a callstack or lockdep splat would help?
> > 
> > Sean
> > 
> > 
> > Here's my hack to bypass the display thread:
> > 
> > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > b/drivers/gpu/drm/msm/msm_drv.c
> > index 9c9f7ff6960b38..5a3cac5825319e 100644
> > --- a/drivers/gpu/drm/msm/msm_drv.c
> > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct kthread_work
> > *work)
> >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> >  					int crtc_id, bool enable)
> >  {
> > +	struct msm_kms *kms = priv->kms;
> >  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > -	struct vblank_event *vbl_ev;
> >  	unsigned long flags;
> > 
> > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > -	if (!vbl_ev)
> > -		return -ENOMEM;
> > +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > 
> > -	vbl_ev->crtc_id = crtc_id;
> > -	vbl_ev->enable = enable;
> > +	if (enable)
> > +		kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
> > +	else
> > +		kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
> > 
> > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > 
> > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> > -			&vbl_ctrl->work);
> > -
> >  	return 0;
> >  }
> > 
> Even with your patch above, I see frame is getting stuck but it recovers in
> a while.
> The patch I tried was assigning crtc->funcs->enable_vblank/disable_vblank so
> that
> __enable_vblank can call crtc directly. But the above callstack is still
> valid for your patch.
> 
> Thanks,
> Jeykumar S.
> > 
> > 
> > > 
> > > In MDP5, I see the same pattern between vblank_time_lock and list_lock
> > which
> > > is used to track the irq handlers.
> > > 
> > > I believe that explains why msm_drv is queuing the vblank
> > > enable/disable
> > > works to WQ after acquiring vblank_time_lock.
> > > 
> > > Thanks,
> > > Jeykumar S.
> > > 
> > > > Sean
> > > >
> > > > >
> > > > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > > > ---
> > > > >  drivers/gpu/drm/msm/msm_drv.c | 67
> > > > +++++++++++++------------------------------
> > > > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
> > > > >  2 files changed, 20 insertions(+), 54 deletions(-)
> > > > >
> > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > > > b/drivers/gpu/drm/msm/msm_drv.c
> > > > > index 6d6c73b..8da5be2 100644
> > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
> > > > >  	return val;
> > > > >  }
> > > > >
> > > > > -struct vblank_event {
> > > > > -	struct list_head node;
> > > > > +struct msm_vblank_work {
> > > > > +	struct work_struct work;
> > > > >  	int crtc_id;
> > > > >  	bool enable;
> > > > > +	struct msm_drm_private *priv;
> > > > >  };
> > > > >
> > > > >  static void vblank_ctrl_worker(struct work_struct *work)
> > > > >  {
> > > > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> > > > > -						struct msm_vblank_ctrl,
> > > > work);
> > > > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> > > > > -					struct msm_drm_private,
> > > > vblank_ctrl);
> > > > > +	struct msm_vblank_work *vbl_work = container_of(work,
> > > > > +						struct msm_vblank_work,
> > > > work);
> > > > > +	struct msm_drm_private *priv = vbl_work->priv;
> > > > >  	struct msm_kms *kms = priv->kms;
> > > > > -	struct vblank_event *vbl_ev, *tmp;
> > > > > -	unsigned long flags;
> > > > > -
> > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> > > > {
> > > > > -		list_del(&vbl_ev->node);
> > > > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > -
> > > > > -		if (vbl_ev->enable)
> > > > > -			kms->funcs->enable_vblank(kms,
> > > > > -
> > > > priv->crtcs[vbl_ev->crtc_id]);
> > > > > -		else
> > > > > -			kms->funcs->disable_vblank(kms,
> > > > > -
> > > > priv->crtcs[vbl_ev->crtc_id]);
> > > > >
> > > > > -		kfree(vbl_ev);
> > > > > -
> > > > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > -	}
> > > > > +	if (vbl_work->enable)
> > > > > +		kms->funcs->enable_vblank(kms,
> > > > priv->crtcs[vbl_work->crtc_id]);
> > > > > +	else
> > > > > +		kms->funcs->disable_vblank(kms,
> > > > priv->crtcs[vbl_work->crtc_id]);
> > > > >
> > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > +	kfree(vbl_work);
> > > > >  }
> > > > >
> > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > > > >  					int crtc_id, bool enable)
> > > > >  {
> > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > -	struct vblank_event *vbl_ev;
> > > > > -	unsigned long flags;
> > > > > +	struct msm_vblank_work *vbl_work;
> > > > >
> > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > > > -	if (!vbl_ev)
> > > > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> > > > > +	if (!vbl_work)
> > > > >  		return -ENOMEM;
> > > > >
> > > > > -	vbl_ev->crtc_id = crtc_id;
> > > > > -	vbl_ev->enable = enable;
> > > > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
> > > > >
> > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > +	vbl_work->crtc_id = crtc_id;
> > > > > +	vbl_work->enable = enable;
> > > > > +	vbl_work->priv = priv;
> > > > >
> > > > > -	schedule_work(&vbl_ctrl->work);
> > > > > +	schedule_work(&vbl_work->work);
> > > > >
> > > > >  	return 0;
> > > > >  }
> > > > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
> > > > >  	struct msm_drm_private *priv = ddev->dev_private;
> > > > >  	struct msm_kms *kms = priv->kms;
> > > > >  	struct msm_mdss *mdss = priv->mdss;
> > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > -	struct vblank_event *vbl_ev, *tmp;
> > > > >  	int i;
> > > > >
> > > > >  	/* We must cancel and cleanup any pending vblank enable/disable
> > > > >  	 * work before drm_irq_uninstall() to avoid work re-enabling an
> > > > >  	 * irq after uninstall has disabled it.
> > > > >  	 */
> > > > > +
> > > > >  	msm_gem_shrinker_cleanup(ddev);
> > > > >
> > > > >  	drm_kms_helper_poll_fini(ddev);
> > > > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
> > > > >  #endif
> > > > >  	drm_mode_config_cleanup(ddev);
> > > > >
> > > > > -	flush_work(&vbl_ctrl->work);
> > > > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> > > > {
> > > > > -		list_del(&vbl_ev->node);
> > > > > -		kfree(vbl_ev);
> > > > > -	}
> > > > > -
> > > > >  	/* clean up event worker threads */
> > > > >  	for (i = 0; i < priv->num_crtcs; i++) {
> > > > >  		if (priv->event_thread[i].thread) {
> > > > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev,
> > struct
> > > > drm_driver *drv)
> > > > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
> > > > >
> > > > >  	INIT_LIST_HEAD(&priv->inactive_list);
> > > > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> > > > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> > > > > -	spin_lock_init(&priv->vblank_ctrl.lock);
> > > > >
> > > > >  	drm_mode_config_init(ddev);
> > > > >
> > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > > > b/drivers/gpu/drm/msm/msm_drv.h
> > > > > index 05d33a7..d4cbde2 100644
> > > > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
> > > > >  	PLANE_PROP_MAX_NUM
> > > > >  };
> > > > >
> > > > > -struct msm_vblank_ctrl {
> > > > > -	struct work_struct work;
> > > > > -	struct list_head event_list;
> > > > > -	spinlock_t lock;
> > > > > -};
> > > > > -
> > > > >  #define MSM_GPU_MAX_RINGS 4
> > > > >  #define MAX_H_TILES_PER_DISPLAY 2
> > > > >
> > > > > @@ -225,7 +219,6 @@ struct msm_drm_private {
> > > > >  	struct notifier_block vmap_notifier;
> > > > >  	struct shrinker shrinker;
> > > > >
> > > > > -	struct msm_vblank_ctrl vblank_ctrl;
> > > > >  	struct drm_atomic_state *pm_state;
> > > > >  };
> > > > >
> > > > > --
> > > > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> > > > Forum,
> > > > > a Linux Foundation Collaborative Project
> > > > >
> > > > > _______________________________________________
> > > > > Freedreno mailing list
> > > > > Freedreno@lists.freedesktop.org
> > > > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> > > 
> > > --
> > > Jeykumar S
> 
> -- 
> Jeykumar S

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-11-30 20:07                   ` Sean Paul
@ 2018-12-01  0:21                     ` Jeykumar Sankaran
       [not found]                       ` <e50d359b8cdd5fd0ccc975a791f65847-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  0 siblings, 1 reply; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-12-01  0:21 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-30 12:07, Sean Paul wrote:
> On Fri, Nov 30, 2018 at 11:45:55AM -0800, Jeykumar Sankaran wrote:
>> On 2018-11-29 14:15, Sean Paul wrote:
>> > On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
>> > > On 2018-11-07 07:55, Sean Paul wrote:
>> > > > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
>> > > > > msm maintains a separate structure to define vblank
>> > > > > work definitions and a list to track events submitted
>> > > > > to the workqueue. We can avoid this redundant list
>> > > > > and its protection mechanism, if we subclass the
>> > > > > work object to encapsulate vblank event parameters.
>> > > > >
>> > > > > changes in v2:
>> > > > > 	- subclass optimization on system wq (Sean Paul)
>> > > >
>> > > > I wouldn't do it like this, tbh. One problem is that you've lost
> your
>> > > > flush() on
>> > > > unbind, so there's no way to know if you have workers in the wild
>> > > > waiting
>> > > > to
>> > > > enable/disable vblank.
>> > > >
>> > > > Another issues is that AFAICT, we don't need a queue of
>> > > > enables/disables,
>> > > > but
>> > > > rather just the last requested state (ie: should we be on or off).
> So
>> > > > things
>> > > > don't need to be this complicated (and we're possibly thrashing
> vblank
>> > > > on/off
>> > > > for no reason).
>> > > >
>> > > > I'm still of the mind that you should just make this synchronous
> and
>> > be
>> > > > done
>> > > > with the threads (especially since we're still
> uncovering/introducing
>> > > > races!).
>> > > >
>> > > While scoping out the effort to make vblank events synchronous, I
>> > > found
>> > > that the spinlock locking order of vblank request sequence and
> vblank
>> > > callback
>> > > sequences are the opposite.
>> > >
>> > > In DPU, drm_vblank_enable acquires vblank_time_lock before
> registering
>> > > the crtc to encoder which happens after acquiring encoder_spinlock.
>> > > But
>> > > the vblank_callback acquires encoder_spinlock before accessing the
>> > > registered
>> > > crtc and calling into drm_vblank_handler which tries to acquire
>> > > vblank_time_lock.
>> > > Acquiring both vblank_time_lock and encoder_spinlock in the same
>> > > thread
>> > > is leading to deadlock.
>> >
>> > Hmm, I'm not sure I follow. Are you seeing issues where irq overlaps
>> > with
>> > enable/disable? I hacked in sync vblank enable/disable quickly to see
> if
>> > I
>> > could
>> > reproduce what you're seeing, but things seemed well behaved.
>> >
>> 
>> The race is between drm_vblank_get/put and vblank_handler contexts.
>> 
>> When made synchronous:
>> 
>> while calling drm_vblank_get, the callstack looks like below:
>> drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock) ->
>> __enable_vblank -> dpu_crtc_vblank -> 
>> dpu_encoder_toggle_vblank_for_crtc
>> (tries to acquire enc_spinlock)
>> 
>> In vblank handler, the call stack will be:
>> dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback 
>> (acquires
>> enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank (tries 
>> to
>> acquire vblank_time_lock)
> 
> Hmm, I'm not sure how this can happen. We acquire and release the
> enc_spinlock
> before enabling the irq, yes we will hold on to the vbl_time_lock, but 
> we
> shouldn't be trying to reacquire an encoder's spinlock after we've 
> enabled
> it.
In the synchronous approach dpu_encoder_toggle_vblank_for_crtc(which 
acquires the enc_spinlock) will be called while we
are holding the vbl_time_lock.

> I don't know how that can deadlock, since we should never be running
> enable and
> the handler concurrently.
> 
I agree that vblank_irq handler should not be running before the enable 
sequence. But
don't you expect the handler to be running while calling the 
vblank_disable sequence?
vbl disable will try to acquire the locks in the opposite order to that 
of irq_handler and the
same issue is bound to happen.

With your patch, you should be able to simulate this deadlock if you can 
inject a delay
by adding a pr_err log in vblank_ctrl_queue_work

Thanks,
Jeykumar S.

> The only thing I can think of is that the vblank interrupts are firing
> after
> vblank has been disabled? In that case, it seems like we should 
> properly
> flush
> them.
> 
> Sean
> 
> 
>> 
>> 
>> > I do see that there is a chance to call drm_handle_vblank() while
>> > holding
>> > enc_spinlock, but couldn't find any obvious lock recursion there.
>> >
>> > Maybe a callstack or lockdep splat would help?
>> >
>> > Sean
>> >
>> >
>> > Here's my hack to bypass the display thread:
>> >
>> > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > b/drivers/gpu/drm/msm/msm_drv.c
>> > index 9c9f7ff6960b38..5a3cac5825319e 100644
>> > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct
> kthread_work
>> > *work)
>> >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>> >  					int crtc_id, bool enable)
>> >  {
>> > +	struct msm_kms *kms = priv->kms;
>> >  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > -	struct vblank_event *vbl_ev;
>> >  	unsigned long flags;
>> >
>> > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > -	if (!vbl_ev)
>> > -		return -ENOMEM;
>> > +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> >
>> > -	vbl_ev->crtc_id = crtc_id;
>> > -	vbl_ev->enable = enable;
>> > +	if (enable)
>> > +		kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
>> > +	else
>> > +		kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
>> >
>> > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> >
>> > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
>> > -			&vbl_ctrl->work);
>> > -
>> >  	return 0;
>> >  }
>> >
>> Even with your patch above, I see frame is getting stuck but it 
>> recovers
> in
>> a while.
>> The patch I tried was assigning
> crtc->funcs->enable_vblank/disable_vblank so
>> that
>> __enable_vblank can call crtc directly. But the above callstack is 
>> still
>> valid for your patch.
>> 
>> Thanks,
>> Jeykumar S.
>> >
>> >
>> > >
>> > > In MDP5, I see the same pattern between vblank_time_lock and
> list_lock
>> > which
>> > > is used to track the irq handlers.
>> > >
>> > > I believe that explains why msm_drv is queuing the vblank
>> > > enable/disable
>> > > works to WQ after acquiring vblank_time_lock.
>> > >
>> > > Thanks,
>> > > Jeykumar S.
>> > >
>> > > > Sean
>> > > >
>> > > > >
>> > > > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> > > > > ---
>> > > > >  drivers/gpu/drm/msm/msm_drv.c | 67
>> > > > +++++++++++++------------------------------
>> > > > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>> > > > >  2 files changed, 20 insertions(+), 54 deletions(-)
>> > > > >
>> > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > > > b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > index 6d6c73b..8da5be2 100644
>> > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>> > > > >  	return val;
>> > > > >  }
>> > > > >
>> > > > > -struct vblank_event {
>> > > > > -	struct list_head node;
>> > > > > +struct msm_vblank_work {
>> > > > > +	struct work_struct work;
>> > > > >  	int crtc_id;
>> > > > >  	bool enable;
>> > > > > +	struct msm_drm_private *priv;
>> > > > >  };
>> > > > >
>> > > > >  static void vblank_ctrl_worker(struct work_struct *work)
>> > > > >  {
>> > > > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> > > > > -						struct
> msm_vblank_ctrl,
>> > > > work);
>> > > > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> > > > > -					struct msm_drm_private,
>> > > > vblank_ctrl);
>> > > > > +	struct msm_vblank_work *vbl_work = container_of(work,
>> > > > > +						struct
> msm_vblank_work,
>> > > > work);
>> > > > > +	struct msm_drm_private *priv = vbl_work->priv;
>> > > > >  	struct msm_kms *kms = priv->kms;
>> > > > > -	struct vblank_event *vbl_ev, *tmp;
>> > > > > -	unsigned long flags;
>> > > > > -
>> > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
> &vbl_ctrl->event_list, node)
>> > > > {
>> > > > > -		list_del(&vbl_ev->node);
>> > > > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > -
>> > > > > -		if (vbl_ev->enable)
>> > > > > -			kms->funcs->enable_vblank(kms,
>> > > > > -
>> > > > priv->crtcs[vbl_ev->crtc_id]);
>> > > > > -		else
>> > > > > -			kms->funcs->disable_vblank(kms,
>> > > > > -
>> > > > priv->crtcs[vbl_ev->crtc_id]);
>> > > > >
>> > > > > -		kfree(vbl_ev);
>> > > > > -
>> > > > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > -	}
>> > > > > +	if (vbl_work->enable)
>> > > > > +		kms->funcs->enable_vblank(kms,
>> > > > priv->crtcs[vbl_work->crtc_id]);
>> > > > > +	else
>> > > > > +		kms->funcs->disable_vblank(kms,
>> > > > priv->crtcs[vbl_work->crtc_id]);
>> > > > >
>> > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > +	kfree(vbl_work);
>> > > > >  }
>> > > > >
>> > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>> > > > >  					int crtc_id, bool enable)
>> > > > >  {
>> > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > > > -	struct vblank_event *vbl_ev;
>> > > > > -	unsigned long flags;
>> > > > > +	struct msm_vblank_work *vbl_work;
>> > > > >
>> > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > > > -	if (!vbl_ev)
>> > > > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> > > > > +	if (!vbl_work)
>> > > > >  		return -ENOMEM;
>> > > > >
>> > > > > -	vbl_ev->crtc_id = crtc_id;
>> > > > > -	vbl_ev->enable = enable;
>> > > > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>> > > > >
>> > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > +	vbl_work->crtc_id = crtc_id;
>> > > > > +	vbl_work->enable = enable;
>> > > > > +	vbl_work->priv = priv;
>> > > > >
>> > > > > -	schedule_work(&vbl_ctrl->work);
>> > > > > +	schedule_work(&vbl_work->work);
>> > > > >
>> > > > >  	return 0;
>> > > > >  }
>> > > > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device
> *dev)
>> > > > >  	struct msm_drm_private *priv = ddev->dev_private;
>> > > > >  	struct msm_kms *kms = priv->kms;
>> > > > >  	struct msm_mdss *mdss = priv->mdss;
>> > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > > > -	struct vblank_event *vbl_ev, *tmp;
>> > > > >  	int i;
>> > > > >
>> > > > >  	/* We must cancel and cleanup any pending vblank
> enable/disable
>> > > > >  	 * work before drm_irq_uninstall() to avoid work
> re-enabling an
>> > > > >  	 * irq after uninstall has disabled it.
>> > > > >  	 */
>> > > > > +
>> > > > >  	msm_gem_shrinker_cleanup(ddev);
>> > > > >
>> > > > >  	drm_kms_helper_poll_fini(ddev);
>> > > > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device
> *dev)
>> > > > >  #endif
>> > > > >  	drm_mode_config_cleanup(ddev);
>> > > > >
>> > > > > -	flush_work(&vbl_ctrl->work);
>> > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
> &vbl_ctrl->event_list, node)
>> > > > {
>> > > > > -		list_del(&vbl_ev->node);
>> > > > > -		kfree(vbl_ev);
>> > > > > -	}
>> > > > > -
>> > > > >  	/* clean up event worker threads */
>> > > > >  	for (i = 0; i < priv->num_crtcs; i++) {
>> > > > >  		if (priv->event_thread[i].thread) {
>> > > > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev,
>> > struct
>> > > > drm_driver *drv)
>> > > > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> > > > >
>> > > > >  	INIT_LIST_HEAD(&priv->inactive_list);
>> > > > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> > > > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> > > > > -	spin_lock_init(&priv->vblank_ctrl.lock);
>> > > > >
>> > > > >  	drm_mode_config_init(ddev);
>> > > > >
>> > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> > > > b/drivers/gpu/drm/msm/msm_drv.h
>> > > > > index 05d33a7..d4cbde2 100644
>> > > > > --- a/drivers/gpu/drm/msm/msm_drv.h
>> > > > > +++ b/drivers/gpu/drm/msm/msm_drv.h
>> > > > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>> > > > >  	PLANE_PROP_MAX_NUM
>> > > > >  };
>> > > > >
>> > > > > -struct msm_vblank_ctrl {
>> > > > > -	struct work_struct work;
>> > > > > -	struct list_head event_list;
>> > > > > -	spinlock_t lock;
>> > > > > -};
>> > > > > -
>> > > > >  #define MSM_GPU_MAX_RINGS 4
>> > > > >  #define MAX_H_TILES_PER_DISPLAY 2
>> > > > >
>> > > > > @@ -225,7 +219,6 @@ struct msm_drm_private {
>> > > > >  	struct notifier_block vmap_notifier;
>> > > > >  	struct shrinker shrinker;
>> > > > >
>> > > > > -	struct msm_vblank_ctrl vblank_ctrl;
>> > > > >  	struct drm_atomic_state *pm_state;
>> > > > >  };
>> > > > >
>> > > > > --
>> > > > > The Qualcomm Innovation Center, Inc. is a member of the Code
> Aurora
>> > > > Forum,
>> > > > > a Linux Foundation Collaborative Project
>> > > > >
>> > > > > _______________________________________________
>> > > > > Freedreno mailing list
>> > > > > Freedreno@lists.freedesktop.org
>> > > > > https://lists.freedesktop.org/mailman/listinfo/freedreno
>> > >
>> > > --
>> > > Jeykumar S
>> 
>> --
>> Jeykumar S

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
       [not found]                       ` <e50d359b8cdd5fd0ccc975a791f65847-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-12-03 14:21                         ` Sean Paul
  2018-12-03 20:27                           ` Jeykumar Sankaran
  0 siblings, 1 reply; 19+ messages in thread
From: Sean Paul @ 2018-12-03 14:21 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: Sean Paul, robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Fri, Nov 30, 2018 at 04:21:15PM -0800, Jeykumar Sankaran wrote:
> On 2018-11-30 12:07, Sean Paul wrote:
> > On Fri, Nov 30, 2018 at 11:45:55AM -0800, Jeykumar Sankaran wrote:
> > > On 2018-11-29 14:15, Sean Paul wrote:
> > > > On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
> > > > > On 2018-11-07 07:55, Sean Paul wrote:
> > > > > > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
> > > > > > > msm maintains a separate structure to define vblank
> > > > > > > work definitions and a list to track events submitted
> > > > > > > to the workqueue. We can avoid this redundant list
> > > > > > > and its protection mechanism, if we subclass the
> > > > > > > work object to encapsulate vblank event parameters.
> > > > > > >
> > > > > > > changes in v2:
> > > > > > > 	- subclass optimization on system wq (Sean Paul)
> > > > > >
> > > > > > I wouldn't do it like this, tbh. One problem is that you've lost
> > your
> > > > > > flush() on
> > > > > > unbind, so there's no way to know if you have workers in the wild
> > > > > > waiting
> > > > > > to
> > > > > > enable/disable vblank.
> > > > > >
> > > > > > Another issues is that AFAICT, we don't need a queue of
> > > > > > enables/disables,
> > > > > > but
> > > > > > rather just the last requested state (ie: should we be on or off).
> > So
> > > > > > things
> > > > > > don't need to be this complicated (and we're possibly thrashing
> > vblank
> > > > > > on/off
> > > > > > for no reason).
> > > > > >
> > > > > > I'm still of the mind that you should just make this synchronous
> > and
> > > > be
> > > > > > done
> > > > > > with the threads (especially since we're still
> > uncovering/introducing
> > > > > > races!).
> > > > > >
> > > > > While scoping out the effort to make vblank events synchronous, I
> > > > > found
> > > > > that the spinlock locking order of vblank request sequence and
> > vblank
> > > > > callback
> > > > > sequences are the opposite.
> > > > >
> > > > > In DPU, drm_vblank_enable acquires vblank_time_lock before
> > registering
> > > > > the crtc to encoder which happens after acquiring encoder_spinlock.
> > > > > But
> > > > > the vblank_callback acquires encoder_spinlock before accessing the
> > > > > registered
> > > > > crtc and calling into drm_vblank_handler which tries to acquire
> > > > > vblank_time_lock.
> > > > > Acquiring both vblank_time_lock and encoder_spinlock in the same
> > > > > thread
> > > > > is leading to deadlock.
> > > >
> > > > Hmm, I'm not sure I follow. Are you seeing issues where irq overlaps
> > > > with
> > > > enable/disable? I hacked in sync vblank enable/disable quickly to see
> > if
> > > > I
> > > > could
> > > > reproduce what you're seeing, but things seemed well behaved.
> > > >
> > > 
> > > The race is between drm_vblank_get/put and vblank_handler contexts.
> > > 
> > > When made synchronous:
> > > 
> > > while calling drm_vblank_get, the callstack looks like below:
> > > drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock) ->
> > > __enable_vblank -> dpu_crtc_vblank ->
> > > dpu_encoder_toggle_vblank_for_crtc
> > > (tries to acquire enc_spinlock)
> > > 
> > > In vblank handler, the call stack will be:
> > > dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback
> > > (acquires
> > > enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank
> > > (tries to
> > > acquire vblank_time_lock)
> > 
> > Hmm, I'm not sure how this can happen. We acquire and release the
> > enc_spinlock
> > before enabling the irq, yes we will hold on to the vbl_time_lock, but
> > we
> > shouldn't be trying to reacquire an encoder's spinlock after we've
> > enabled
> > it.
> In the synchronous approach dpu_encoder_toggle_vblank_for_crtc(which
> acquires the enc_spinlock) will be called while we
> are holding the vbl_time_lock.
> 
> > I don't know how that can deadlock, since we should never be running
> > enable and
> > the handler concurrently.
> > 
> I agree that vblank_irq handler should not be running before the enable
> sequence. But
> don't you expect the handler to be running while calling the vblank_disable
> sequence?

This is an entirely different problem though. It's also one that is easier to
fix. I think we could probably grab the enc_spinlock in disable and clear the
crtc pointer.

What I'm getting at is that there's no fundamental reason why we need to have
async vblank enable/disable.

Sean

> vbl disable will try to acquire the locks in the opposite order to that of
> irq_handler and the
> same issue is bound to happen.
> 
> With your patch, you should be able to simulate this deadlock if you can
> inject a delay
> by adding a pr_err log in vblank_ctrl_queue_work
> 
> Thanks,
> Jeykumar S.
> 
> > The only thing I can think of is that the vblank interrupts are firing
> > after
> > vblank has been disabled? In that case, it seems like we should properly
> > flush
> > them.
> > 
> > Sean
> > 
> > 
> > > 
> > > 
> > > > I do see that there is a chance to call drm_handle_vblank() while
> > > > holding
> > > > enc_spinlock, but couldn't find any obvious lock recursion there.
> > > >
> > > > Maybe a callstack or lockdep splat would help?
> > > >
> > > > Sean
> > > >
> > > >
> > > > Here's my hack to bypass the display thread:
> > > >
> > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > > > b/drivers/gpu/drm/msm/msm_drv.c
> > > > index 9c9f7ff6960b38..5a3cac5825319e 100644
> > > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > > @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct
> > kthread_work
> > > > *work)
> > > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > > >  					int crtc_id, bool enable)
> > > >  {
> > > > +	struct msm_kms *kms = priv->kms;
> > > >  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > -	struct vblank_event *vbl_ev;
> > > >  	unsigned long flags;
> > > >
> > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > > -	if (!vbl_ev)
> > > > -		return -ENOMEM;
> > > > +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > >
> > > > -	vbl_ev->crtc_id = crtc_id;
> > > > -	vbl_ev->enable = enable;
> > > > +	if (enable)
> > > > +		kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
> > > > +	else
> > > > +		kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
> > > >
> > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > >
> > > > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> > > > -			&vbl_ctrl->work);
> > > > -
> > > >  	return 0;
> > > >  }
> > > >
> > > Even with your patch above, I see frame is getting stuck but it
> > > recovers
> > in
> > > a while.
> > > The patch I tried was assigning
> > crtc->funcs->enable_vblank/disable_vblank so
> > > that
> > > __enable_vblank can call crtc directly. But the above callstack is
> > > still
> > > valid for your patch.
> > > 
> > > Thanks,
> > > Jeykumar S.
> > > >
> > > >
> > > > >
> > > > > In MDP5, I see the same pattern between vblank_time_lock and
> > list_lock
> > > > which
> > > > > is used to track the irq handlers.
> > > > >
> > > > > I believe that explains why msm_drv is queuing the vblank
> > > > > enable/disable
> > > > > works to WQ after acquiring vblank_time_lock.
> > > > >
> > > > > Thanks,
> > > > > Jeykumar S.
> > > > >
> > > > > > Sean
> > > > > >
> > > > > > >
> > > > > > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > > > > > ---
> > > > > > >  drivers/gpu/drm/msm/msm_drv.c | 67
> > > > > > +++++++++++++------------------------------
> > > > > > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
> > > > > > >  2 files changed, 20 insertions(+), 54 deletions(-)
> > > > > > >
> > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > > > > > b/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > index 6d6c73b..8da5be2 100644
> > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
> > > > > > >  	return val;
> > > > > > >  }
> > > > > > >
> > > > > > > -struct vblank_event {
> > > > > > > -	struct list_head node;
> > > > > > > +struct msm_vblank_work {
> > > > > > > +	struct work_struct work;
> > > > > > >  	int crtc_id;
> > > > > > >  	bool enable;
> > > > > > > +	struct msm_drm_private *priv;
> > > > > > >  };
> > > > > > >
> > > > > > >  static void vblank_ctrl_worker(struct work_struct *work)
> > > > > > >  {
> > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> > > > > > > -						struct
> > msm_vblank_ctrl,
> > > > > > work);
> > > > > > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> > > > > > > -					struct msm_drm_private,
> > > > > > vblank_ctrl);
> > > > > > > +	struct msm_vblank_work *vbl_work = container_of(work,
> > > > > > > +						struct
> > msm_vblank_work,
> > > > > > work);
> > > > > > > +	struct msm_drm_private *priv = vbl_work->priv;
> > > > > > >  	struct msm_kms *kms = priv->kms;
> > > > > > > -	struct vblank_event *vbl_ev, *tmp;
> > > > > > > -	unsigned long flags;
> > > > > > > -
> > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
> > &vbl_ctrl->event_list, node)
> > > > > > {
> > > > > > > -		list_del(&vbl_ev->node);
> > > > > > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > > > -
> > > > > > > -		if (vbl_ev->enable)
> > > > > > > -			kms->funcs->enable_vblank(kms,
> > > > > > > -
> > > > > > priv->crtcs[vbl_ev->crtc_id]);
> > > > > > > -		else
> > > > > > > -			kms->funcs->disable_vblank(kms,
> > > > > > > -
> > > > > > priv->crtcs[vbl_ev->crtc_id]);
> > > > > > >
> > > > > > > -		kfree(vbl_ev);
> > > > > > > -
> > > > > > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > > -	}
> > > > > > > +	if (vbl_work->enable)
> > > > > > > +		kms->funcs->enable_vblank(kms,
> > > > > > priv->crtcs[vbl_work->crtc_id]);
> > > > > > > +	else
> > > > > > > +		kms->funcs->disable_vblank(kms,
> > > > > > priv->crtcs[vbl_work->crtc_id]);
> > > > > > >
> > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > > > +	kfree(vbl_work);
> > > > > > >  }
> > > > > > >
> > > > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > > > > > >  					int crtc_id, bool enable)
> > > > > > >  {
> > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > > > -	struct vblank_event *vbl_ev;
> > > > > > > -	unsigned long flags;
> > > > > > > +	struct msm_vblank_work *vbl_work;
> > > > > > >
> > > > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > > > > > -	if (!vbl_ev)
> > > > > > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> > > > > > > +	if (!vbl_work)
> > > > > > >  		return -ENOMEM;
> > > > > > >
> > > > > > > -	vbl_ev->crtc_id = crtc_id;
> > > > > > > -	vbl_ev->enable = enable;
> > > > > > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
> > > > > > >
> > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > > > +	vbl_work->crtc_id = crtc_id;
> > > > > > > +	vbl_work->enable = enable;
> > > > > > > +	vbl_work->priv = priv;
> > > > > > >
> > > > > > > -	schedule_work(&vbl_ctrl->work);
> > > > > > > +	schedule_work(&vbl_work->work);
> > > > > > >
> > > > > > >  	return 0;
> > > > > > >  }
> > > > > > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device
> > *dev)
> > > > > > >  	struct msm_drm_private *priv = ddev->dev_private;
> > > > > > >  	struct msm_kms *kms = priv->kms;
> > > > > > >  	struct msm_mdss *mdss = priv->mdss;
> > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > > > -	struct vblank_event *vbl_ev, *tmp;
> > > > > > >  	int i;
> > > > > > >
> > > > > > >  	/* We must cancel and cleanup any pending vblank
> > enable/disable
> > > > > > >  	 * work before drm_irq_uninstall() to avoid work
> > re-enabling an
> > > > > > >  	 * irq after uninstall has disabled it.
> > > > > > >  	 */
> > > > > > > +
> > > > > > >  	msm_gem_shrinker_cleanup(ddev);
> > > > > > >
> > > > > > >  	drm_kms_helper_poll_fini(ddev);
> > > > > > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device
> > *dev)
> > > > > > >  #endif
> > > > > > >  	drm_mode_config_cleanup(ddev);
> > > > > > >
> > > > > > > -	flush_work(&vbl_ctrl->work);
> > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
> > &vbl_ctrl->event_list, node)
> > > > > > {
> > > > > > > -		list_del(&vbl_ev->node);
> > > > > > > -		kfree(vbl_ev);
> > > > > > > -	}
> > > > > > > -
> > > > > > >  	/* clean up event worker threads */
> > > > > > >  	for (i = 0; i < priv->num_crtcs; i++) {
> > > > > > >  		if (priv->event_thread[i].thread) {
> > > > > > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev,
> > > > struct
> > > > > > drm_driver *drv)
> > > > > > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
> > > > > > >
> > > > > > >  	INIT_LIST_HEAD(&priv->inactive_list);
> > > > > > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> > > > > > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> > > > > > > -	spin_lock_init(&priv->vblank_ctrl.lock);
> > > > > > >
> > > > > > >  	drm_mode_config_init(ddev);
> > > > > > >
> > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > > > > > b/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > index 05d33a7..d4cbde2 100644
> > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
> > > > > > >  	PLANE_PROP_MAX_NUM
> > > > > > >  };
> > > > > > >
> > > > > > > -struct msm_vblank_ctrl {
> > > > > > > -	struct work_struct work;
> > > > > > > -	struct list_head event_list;
> > > > > > > -	spinlock_t lock;
> > > > > > > -};
> > > > > > > -
> > > > > > >  #define MSM_GPU_MAX_RINGS 4
> > > > > > >  #define MAX_H_TILES_PER_DISPLAY 2
> > > > > > >
> > > > > > > @@ -225,7 +219,6 @@ struct msm_drm_private {
> > > > > > >  	struct notifier_block vmap_notifier;
> > > > > > >  	struct shrinker shrinker;
> > > > > > >
> > > > > > > -	struct msm_vblank_ctrl vblank_ctrl;
> > > > > > >  	struct drm_atomic_state *pm_state;
> > > > > > >  };
> > > > > > >
> > > > > > > --
> > > > > > > The Qualcomm Innovation Center, Inc. is a member of the Code
> > Aurora
> > > > > > Forum,
> > > > > > > a Linux Foundation Collaborative Project
> > > > > > >
> > > > > > > _______________________________________________
> > > > > > > Freedreno mailing list
> > > > > > > Freedreno@lists.freedesktop.org
> > > > > > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> > > > >
> > > > > --
> > > > > Jeykumar S
> > > 
> > > --
> > > Jeykumar S
> 
> -- 
> Jeykumar S

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-12-03 14:21                         ` Sean Paul
@ 2018-12-03 20:27                           ` Jeykumar Sankaran
  2018-12-07 17:22                             ` [Freedreno] " Sean Paul
  0 siblings, 1 reply; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-12-03 20:27 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-12-03 06:21, Sean Paul wrote:
> On Fri, Nov 30, 2018 at 04:21:15PM -0800, Jeykumar Sankaran wrote:
>> On 2018-11-30 12:07, Sean Paul wrote:
>> > On Fri, Nov 30, 2018 at 11:45:55AM -0800, Jeykumar Sankaran wrote:
>> > > On 2018-11-29 14:15, Sean Paul wrote:
>> > > > On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
>> > > > > On 2018-11-07 07:55, Sean Paul wrote:
>> > > > > > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran
> wrote:
>> > > > > > > msm maintains a separate structure to define vblank
>> > > > > > > work definitions and a list to track events submitted
>> > > > > > > to the workqueue. We can avoid this redundant list
>> > > > > > > and its protection mechanism, if we subclass the
>> > > > > > > work object to encapsulate vblank event parameters.
>> > > > > > >
>> > > > > > > changes in v2:
>> > > > > > > 	- subclass optimization on system wq (Sean Paul)
>> > > > > >
>> > > > > > I wouldn't do it like this, tbh. One problem is that you've
> lost
>> > your
>> > > > > > flush() on
>> > > > > > unbind, so there's no way to know if you have workers in the
> wild
>> > > > > > waiting
>> > > > > > to
>> > > > > > enable/disable vblank.
>> > > > > >
>> > > > > > Another issues is that AFAICT, we don't need a queue of
>> > > > > > enables/disables,
>> > > > > > but
>> > > > > > rather just the last requested state (ie: should we be on or
> off).
>> > So
>> > > > > > things
>> > > > > > don't need to be this complicated (and we're possibly
> thrashing
>> > vblank
>> > > > > > on/off
>> > > > > > for no reason).
>> > > > > >
>> > > > > > I'm still of the mind that you should just make this
> synchronous
>> > and
>> > > > be
>> > > > > > done
>> > > > > > with the threads (especially since we're still
>> > uncovering/introducing
>> > > > > > races!).
>> > > > > >
>> > > > > While scoping out the effort to make vblank events synchronous,
> I
>> > > > > found
>> > > > > that the spinlock locking order of vblank request sequence and
>> > vblank
>> > > > > callback
>> > > > > sequences are the opposite.
>> > > > >
>> > > > > In DPU, drm_vblank_enable acquires vblank_time_lock before
>> > registering
>> > > > > the crtc to encoder which happens after acquiring
> encoder_spinlock.
>> > > > > But
>> > > > > the vblank_callback acquires encoder_spinlock before accessing
> the
>> > > > > registered
>> > > > > crtc and calling into drm_vblank_handler which tries to acquire
>> > > > > vblank_time_lock.
>> > > > > Acquiring both vblank_time_lock and encoder_spinlock in the same
>> > > > > thread
>> > > > > is leading to deadlock.
>> > > >
>> > > > Hmm, I'm not sure I follow. Are you seeing issues where irq
> overlaps
>> > > > with
>> > > > enable/disable? I hacked in sync vblank enable/disable quickly to
> see
>> > if
>> > > > I
>> > > > could
>> > > > reproduce what you're seeing, but things seemed well behaved.
>> > > >
>> > >
>> > > The race is between drm_vblank_get/put and vblank_handler contexts.
>> > >
>> > > When made synchronous:
>> > >
>> > > while calling drm_vblank_get, the callstack looks like below:
>> > > drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock) ->
>> > > __enable_vblank -> dpu_crtc_vblank ->
>> > > dpu_encoder_toggle_vblank_for_crtc
>> > > (tries to acquire enc_spinlock)
>> > >
>> > > In vblank handler, the call stack will be:
>> > > dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback
>> > > (acquires
>> > > enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank
>> > > (tries to
>> > > acquire vblank_time_lock)
>> >
>> > Hmm, I'm not sure how this can happen. We acquire and release the
>> > enc_spinlock
>> > before enabling the irq, yes we will hold on to the vbl_time_lock, but
>> > we
>> > shouldn't be trying to reacquire an encoder's spinlock after we've
>> > enabled
>> > it.
>> In the synchronous approach dpu_encoder_toggle_vblank_for_crtc(which
>> acquires the enc_spinlock) will be called while we
>> are holding the vbl_time_lock.
>> 
>> > I don't know how that can deadlock, since we should never be running
>> > enable and
>> > the handler concurrently.
>> >
>> I agree that vblank_irq handler should not be running before the 
>> enable
>> sequence. But
>> don't you expect the handler to be running while calling the
> vblank_disable
>> sequence?
> 
> This is an entirely different problem though. It's also one that is 
> easier
> to
> fix. I think we could probably grab the enc_spinlock in disable and 
> clear
> the
> crtc pointer.
> 
we do hold enc_spinlock in dpu_encoder_assign_crtc (drm/msm: dpu: Remove 
vblank_callback from encoder)
where we clear the crtc pointer.

> What I'm getting at is that there's no fundamental reason why we need 
> to
> have
> async vblank enable/disable.
> 
> Sean
> 
There is really no *need* to have them async. But I believe the reason 
why they
are implemented this way is to avoid deadlock between the below two 
paths.

Restating the above findings:
vblank_handlers and vblank enable/disable can run concurrently. The 
first trying to acquire
vbl_time_lock holding enc_spinlock. Other trying to acquire enc_spinlock 
holding
vbl_time_lock.

Thanks,
Jeykumar S.


>> vbl disable will try to acquire the locks in the opposite order to 
>> that
> of
>> irq_handler and the
>> same issue is bound to happen.
>> 
>> With your patch, you should be able to simulate this deadlock if you 
>> can
>> inject a delay
>> by adding a pr_err log in vblank_ctrl_queue_work
>> 
>> Thanks,
>> Jeykumar S.
>> 
>> > The only thing I can think of is that the vblank interrupts are firing
>> > after
>> > vblank has been disabled? In that case, it seems like we should
> properly
>> > flush
>> > them.
>> >
>> > Sean
>> >
>> >
>> > >
>> > >
>> > > > I do see that there is a chance to call drm_handle_vblank() while
>> > > > holding
>> > > > enc_spinlock, but couldn't find any obvious lock recursion there.
>> > > >
>> > > > Maybe a callstack or lockdep splat would help?
>> > > >
>> > > > Sean
>> > > >
>> > > >
>> > > > Here's my hack to bypass the display thread:
>> > > >
>> > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > > > b/drivers/gpu/drm/msm/msm_drv.c
>> > > > index 9c9f7ff6960b38..5a3cac5825319e 100644
>> > > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > > > @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct
>> > kthread_work
>> > > > *work)
>> > > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>> > > >  					int crtc_id, bool enable)
>> > > >  {
>> > > > +	struct msm_kms *kms = priv->kms;
>> > > >  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > > -	struct vblank_event *vbl_ev;
>> > > >  	unsigned long flags;
>> > > >
>> > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > > -	if (!vbl_ev)
>> > > > -		return -ENOMEM;
>> > > > +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > >
>> > > > -	vbl_ev->crtc_id = crtc_id;
>> > > > -	vbl_ev->enable = enable;
>> > > > +	if (enable)
>> > > > +		kms->funcs->enable_vblank(kms,
> priv->crtcs[crtc_id]);
>> > > > +	else
>> > > > +		kms->funcs->disable_vblank(kms,
> priv->crtcs[crtc_id]);
>> > > >
>> > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > >
>> > > > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
>> > > > -			&vbl_ctrl->work);
>> > > > -
>> > > >  	return 0;
>> > > >  }
>> > > >
>> > > Even with your patch above, I see frame is getting stuck but it
>> > > recovers
>> > in
>> > > a while.
>> > > The patch I tried was assigning
>> > crtc->funcs->enable_vblank/disable_vblank so
>> > > that
>> > > __enable_vblank can call crtc directly. But the above callstack is
>> > > still
>> > > valid for your patch.
>> > >
>> > > Thanks,
>> > > Jeykumar S.
>> > > >
>> > > >
>> > > > >
>> > > > > In MDP5, I see the same pattern between vblank_time_lock and
>> > list_lock
>> > > > which
>> > > > > is used to track the irq handlers.
>> > > > >
>> > > > > I believe that explains why msm_drv is queuing the vblank
>> > > > > enable/disable
>> > > > > works to WQ after acquiring vblank_time_lock.
>> > > > >
>> > > > > Thanks,
>> > > > > Jeykumar S.
>> > > > >
>> > > > > > Sean
>> > > > > >
>> > > > > > >
>> > > > > > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> > > > > > > ---
>> > > > > > >  drivers/gpu/drm/msm/msm_drv.c | 67
>> > > > > > +++++++++++++------------------------------
>> > > > > > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>> > > > > > >  2 files changed, 20 insertions(+), 54 deletions(-)
>> > > > > > >
>> > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > index 6d6c73b..8da5be2 100644
>> > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem
> *addr)
>> > > > > > >  	return val;
>> > > > > > >  }
>> > > > > > >
>> > > > > > > -struct vblank_event {
>> > > > > > > -	struct list_head node;
>> > > > > > > +struct msm_vblank_work {
>> > > > > > > +	struct work_struct work;
>> > > > > > >  	int crtc_id;
>> > > > > > >  	bool enable;
>> > > > > > > +	struct msm_drm_private *priv;
>> > > > > > >  };
>> > > > > > >
>> > > > > > >  static void vblank_ctrl_worker(struct work_struct *work)
>> > > > > > >  {
>> > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> > > > > > > -						struct
>> > msm_vblank_ctrl,
>> > > > > > work);
>> > > > > > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> > > > > > > -					struct msm_drm_private,
>> > > > > > vblank_ctrl);
>> > > > > > > +	struct msm_vblank_work *vbl_work = container_of(work,
>> > > > > > > +						struct
>> > msm_vblank_work,
>> > > > > > work);
>> > > > > > > +	struct msm_drm_private *priv = vbl_work->priv;
>> > > > > > >  	struct msm_kms *kms = priv->kms;
>> > > > > > > -	struct vblank_event *vbl_ev, *tmp;
>> > > > > > > -	unsigned long flags;
>> > > > > > > -
>> > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
>> > &vbl_ctrl->event_list, node)
>> > > > > > {
>> > > > > > > -		list_del(&vbl_ev->node);
>> > > > > > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > > > -
>> > > > > > > -		if (vbl_ev->enable)
>> > > > > > > -			kms->funcs->enable_vblank(kms,
>> > > > > > > -
>> > > > > > priv->crtcs[vbl_ev->crtc_id]);
>> > > > > > > -		else
>> > > > > > > -			kms->funcs->disable_vblank(kms,
>> > > > > > > -
>> > > > > > priv->crtcs[vbl_ev->crtc_id]);
>> > > > > > >
>> > > > > > > -		kfree(vbl_ev);
>> > > > > > > -
>> > > > > > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > > -	}
>> > > > > > > +	if (vbl_work->enable)
>> > > > > > > +		kms->funcs->enable_vblank(kms,
>> > > > > > priv->crtcs[vbl_work->crtc_id]);
>> > > > > > > +	else
>> > > > > > > +		kms->funcs->disable_vblank(kms,
>> > > > > > priv->crtcs[vbl_work->crtc_id]);
>> > > > > > >
>> > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > > > +	kfree(vbl_work);
>> > > > > > >  }
>> > > > > > >
>> > > > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private
> *priv,
>> > > > > > >  					int crtc_id, bool enable)
>> > > > > > >  {
>> > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > > > > > -	struct vblank_event *vbl_ev;
>> > > > > > > -	unsigned long flags;
>> > > > > > > +	struct msm_vblank_work *vbl_work;
>> > > > > > >
>> > > > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > > > > > -	if (!vbl_ev)
>> > > > > > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> > > > > > > +	if (!vbl_work)
>> > > > > > >  		return -ENOMEM;
>> > > > > > >
>> > > > > > > -	vbl_ev->crtc_id = crtc_id;
>> > > > > > > -	vbl_ev->enable = enable;
>> > > > > > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>> > > > > > >
>> > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > > > +	vbl_work->crtc_id = crtc_id;
>> > > > > > > +	vbl_work->enable = enable;
>> > > > > > > +	vbl_work->priv = priv;
>> > > > > > >
>> > > > > > > -	schedule_work(&vbl_ctrl->work);
>> > > > > > > +	schedule_work(&vbl_work->work);
>> > > > > > >
>> > > > > > >  	return 0;
>> > > > > > >  }
>> > > > > > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct
> device
>> > *dev)
>> > > > > > >  	struct msm_drm_private *priv = ddev->dev_private;
>> > > > > > >  	struct msm_kms *kms = priv->kms;
>> > > > > > >  	struct msm_mdss *mdss = priv->mdss;
>> > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > > > > > -	struct vblank_event *vbl_ev, *tmp;
>> > > > > > >  	int i;
>> > > > > > >
>> > > > > > >  	/* We must cancel and cleanup any pending vblank
>> > enable/disable
>> > > > > > >  	 * work before drm_irq_uninstall() to avoid work
>> > re-enabling an
>> > > > > > >  	 * irq after uninstall has disabled it.
>> > > > > > >  	 */
>> > > > > > > +
>> > > > > > >  	msm_gem_shrinker_cleanup(ddev);
>> > > > > > >
>> > > > > > >  	drm_kms_helper_poll_fini(ddev);
>> > > > > > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device
>> > *dev)
>> > > > > > >  #endif
>> > > > > > >  	drm_mode_config_cleanup(ddev);
>> > > > > > >
>> > > > > > > -	flush_work(&vbl_ctrl->work);
>> > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
>> > &vbl_ctrl->event_list, node)
>> > > > > > {
>> > > > > > > -		list_del(&vbl_ev->node);
>> > > > > > > -		kfree(vbl_ev);
>> > > > > > > -	}
>> > > > > > > -
>> > > > > > >  	/* clean up event worker threads */
>> > > > > > >  	for (i = 0; i < priv->num_crtcs; i++) {
>> > > > > > >  		if (priv->event_thread[i].thread) {
>> > > > > > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device
> *dev,
>> > > > struct
>> > > > > > drm_driver *drv)
>> > > > > > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> > > > > > >
>> > > > > > >  	INIT_LIST_HEAD(&priv->inactive_list);
>> > > > > > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> > > > > > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> > > > > > > -	spin_lock_init(&priv->vblank_ctrl.lock);
>> > > > > > >
>> > > > > > >  	drm_mode_config_init(ddev);
>> > > > > > >
>> > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > b/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > index 05d33a7..d4cbde2 100644
>> > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>> > > > > > >  	PLANE_PROP_MAX_NUM
>> > > > > > >  };
>> > > > > > >
>> > > > > > > -struct msm_vblank_ctrl {
>> > > > > > > -	struct work_struct work;
>> > > > > > > -	struct list_head event_list;
>> > > > > > > -	spinlock_t lock;
>> > > > > > > -};
>> > > > > > > -
>> > > > > > >  #define MSM_GPU_MAX_RINGS 4
>> > > > > > >  #define MAX_H_TILES_PER_DISPLAY 2
>> > > > > > >
>> > > > > > > @@ -225,7 +219,6 @@ struct msm_drm_private {
>> > > > > > >  	struct notifier_block vmap_notifier;
>> > > > > > >  	struct shrinker shrinker;
>> > > > > > >
>> > > > > > > -	struct msm_vblank_ctrl vblank_ctrl;
>> > > > > > >  	struct drm_atomic_state *pm_state;
>> > > > > > >  };
>> > > > > > >
>> > > > > > > --
>> > > > > > > The Qualcomm Innovation Center, Inc. is a member of the Code
>> > Aurora
>> > > > > > Forum,
>> > > > > > > a Linux Foundation Collaborative Project
>> > > > > > >
>> > > > > > > _______________________________________________
>> > > > > > > Freedreno mailing list
>> > > > > > > Freedreno@lists.freedesktop.org
>> > > > > > > https://lists.freedesktop.org/mailman/listinfo/freedreno
>> > > > >
>> > > > > --
>> > > > > Jeykumar S
>> > >
>> > > --
>> > > Jeykumar S
>> 
>> --
>> Jeykumar S

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-11-07 15:55       ` Sean Paul
  2018-11-20 22:04         ` Jeykumar Sankaran
@ 2018-12-06 18:56         ` Jeykumar Sankaran
  2018-12-07  2:32           ` [Freedreno] " Jeykumar Sankaran
  1 sibling, 1 reply; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-12-06 18:56 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-07 07:55, Sean Paul wrote:
> On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
>> msm maintains a separate structure to define vblank
>> work definitions and a list to track events submitted
>> to the workqueue. We can avoid this redundant list
>> and its protection mechanism, if we subclass the
>> work object to encapsulate vblank event parameters.
>> 
>> changes in v2:
>> 	- subclass optimization on system wq (Sean Paul)
> 
> I wouldn't do it like this, tbh. One problem is that you've lost your
> flush() on
> unbind, so there's no way to know if you have workers in the wild 
> waiting
> to
> enable/disable vblank.
> 
I believe I didnt respond to this quesition of yours. This patch is
removing the flush on the work. But flush_workqueue before destroying 
the WQ
will make sure all the queued work jobs are run to completion.

To honor the comment, I will fix the patch to move the flush/destroy wq 
before the
irq_unininstall.

Thanks,
Jeykumar S.

> Another issues is that AFAICT, we don't need a queue of 
> enables/disables,
> but
> rather just the last requested state (ie: should we be on or off). So
> things
> don't need to be this complicated (and we're possibly thrashing vblank
> on/off
> for no reason).
> 
> I'm still of the mind that you should just make this synchronous and be
> done
> with the threads (especially since we're still uncovering/introducing
> races!).
> 
> Sean
> 
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/msm_drv.c | 67
> +++++++++++++------------------------------
>>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>>  2 files changed, 20 insertions(+), 54 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
>> index 6d6c73b..8da5be2 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>>  	return val;
>>  }
>> 
>> -struct vblank_event {
>> -	struct list_head node;
>> +struct msm_vblank_work {
>> +	struct work_struct work;
>>  	int crtc_id;
>>  	bool enable;
>> +	struct msm_drm_private *priv;
>>  };
>> 
>>  static void vblank_ctrl_worker(struct work_struct *work)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> -						struct msm_vblank_ctrl,
> work);
>> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> -					struct msm_drm_private,
> vblank_ctrl);
>> +	struct msm_vblank_work *vbl_work = container_of(work,
>> +						struct msm_vblank_work,
> work);
>> +	struct msm_drm_private *priv = vbl_work->priv;
>>  	struct msm_kms *kms = priv->kms;
>> -	struct vblank_event *vbl_ev, *tmp;
>> -	unsigned long flags;
>> -
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> -
>> -		if (vbl_ev->enable)
>> -			kms->funcs->enable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> -		else
>> -			kms->funcs->disable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> 
>> -		kfree(vbl_ev);
>> -
>> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	}
>> +	if (vbl_work->enable)
>> +		kms->funcs->enable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> +	else
>> +		kms->funcs->disable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> 
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	kfree(vbl_work);
>>  }
>> 
>>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>>  					int crtc_id, bool enable)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev;
>> -	unsigned long flags;
>> +	struct msm_vblank_work *vbl_work;
>> 
>> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> -	if (!vbl_ev)
>> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> +	if (!vbl_work)
>>  		return -ENOMEM;
>> 
>> -	vbl_ev->crtc_id = crtc_id;
>> -	vbl_ev->enable = enable;
>> +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>> 
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	vbl_work->crtc_id = crtc_id;
>> +	vbl_work->enable = enable;
>> +	vbl_work->priv = priv;
>> 
>> -	schedule_work(&vbl_ctrl->work);
>> +	schedule_work(&vbl_work->work);
>> 
>>  	return 0;
>>  }
>> @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>>  	struct msm_drm_private *priv = ddev->dev_private;
>>  	struct msm_kms *kms = priv->kms;
>>  	struct msm_mdss *mdss = priv->mdss;
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev, *tmp;
>>  	int i;
>> 
>>  	/* We must cancel and cleanup any pending vblank enable/disable
>>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>>  	 * irq after uninstall has disabled it.
>>  	 */
>> +
>>  	msm_gem_shrinker_cleanup(ddev);
>> 
>>  	drm_kms_helper_poll_fini(ddev);
>> @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>>  #endif
>>  	drm_mode_config_cleanup(ddev);
>> 
>> -	flush_work(&vbl_ctrl->work);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		kfree(vbl_ev);
>> -	}
>> -
>>  	/* clean up event worker threads */
>>  	for (i = 0; i < priv->num_crtcs; i++) {
>>  		if (priv->event_thread[i].thread) {
>> @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> drm_driver *drv)
>>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> 
>>  	INIT_LIST_HEAD(&priv->inactive_list);
>> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> -	spin_lock_init(&priv->vblank_ctrl.lock);
>> 
>>  	drm_mode_config_init(ddev);
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
>> index 05d33a7..d4cbde2 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.h
>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>>  	PLANE_PROP_MAX_NUM
>>  };
>> 
>> -struct msm_vblank_ctrl {
>> -	struct work_struct work;
>> -	struct list_head event_list;
>> -	spinlock_t lock;
>> -};
>> -
>>  #define MSM_GPU_MAX_RINGS 4
>>  #define MAX_H_TILES_PER_DISPLAY 2
>> 
>> @@ -225,7 +219,6 @@ struct msm_drm_private {
>>  	struct notifier_block vmap_notifier;
>>  	struct shrinker shrinker;
>> 
>> -	struct msm_vblank_ctrl vblank_ctrl;
>>  	struct drm_atomic_state *pm_state;
>>  };
>> 
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 
>> _______________________________________________
>> Freedreno mailing list
>> Freedreno@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [Freedreno] [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-12-06 18:56         ` Jeykumar Sankaran
@ 2018-12-07  2:32           ` Jeykumar Sankaran
  0 siblings, 0 replies; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-12-07  2:32 UTC (permalink / raw)
  To: Sean Paul; +Cc: linux-arm-msm, dri-devel, seanpaul, hoegsberg, freedreno

On 2018-12-06 10:56, Jeykumar Sankaran wrote:
> On 2018-11-07 07:55, Sean Paul wrote:
>> On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran wrote:
>>> msm maintains a separate structure to define vblank
>>> work definitions and a list to track events submitted
>>> to the workqueue. We can avoid this redundant list
>>> and its protection mechanism, if we subclass the
>>> work object to encapsulate vblank event parameters.
>>> 
>>> changes in v2:
>>> 	- subclass optimization on system wq (Sean Paul)
>> 
>> I wouldn't do it like this, tbh. One problem is that you've lost your
>> flush() on
>> unbind, so there's no way to know if you have workers in the wild 
>> waiting
>> to
>> enable/disable vblank.
>> 
> I believe I didnt respond to this quesition of yours. This patch is
> removing the flush on the work. But flush_workqueue before destroying 
> the WQ
> will make sure all the queued work jobs are run to completion.
> 
> To honor the comment, I will fix the patch to move the flush/destroy
> wq before the
> irq_unininstall.
> 
Above is possible if we use priv->wq for vblank requests. Posted v4 with
the above fixes.

Thanks,
Jeykumar S.
> 
>> Another issues is that AFAICT, we don't need a queue of 
>> enables/disables,
>> but
>> rather just the last requested state (ie: should we be on or off). So
>> things
>> don't need to be this complicated (and we're possibly thrashing vblank
>> on/off
>> for no reason).
>> 
>> I'm still of the mind that you should just make this synchronous and 
>> be
>> done
>> with the threads (especially since we're still uncovering/introducing
>> races!).
>> 
>> Sean
>> 
>>> 
>>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>>> ---
>>>  drivers/gpu/drm/msm/msm_drv.c | 67
>> +++++++++++++------------------------------
>>>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>>>  2 files changed, 20 insertions(+), 54 deletions(-)
>>> 
>>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> b/drivers/gpu/drm/msm/msm_drv.c
>>> index 6d6c73b..8da5be2 100644
>>> --- a/drivers/gpu/drm/msm/msm_drv.c
>>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>>> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>>>  	return val;
>>>  }
>>> 
>>> -struct vblank_event {
>>> -	struct list_head node;
>>> +struct msm_vblank_work {
>>> +	struct work_struct work;
>>>  	int crtc_id;
>>>  	bool enable;
>>> +	struct msm_drm_private *priv;
>>>  };
>>> 
>>>  static void vblank_ctrl_worker(struct work_struct *work)
>>>  {
>>> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>>> -						struct msm_vblank_ctrl,
>> work);
>>> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>>> -					struct msm_drm_private,
>> vblank_ctrl);
>>> +	struct msm_vblank_work *vbl_work = container_of(work,
>>> +						struct msm_vblank_work,
>> work);
>>> +	struct msm_drm_private *priv = vbl_work->priv;
>>>  	struct msm_kms *kms = priv->kms;
>>> -	struct vblank_event *vbl_ev, *tmp;
>>> -	unsigned long flags;
>>> -
>>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> {
>>> -		list_del(&vbl_ev->node);
>>> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>>> -
>>> -		if (vbl_ev->enable)
>>> -			kms->funcs->enable_vblank(kms,
>>> -
>> priv->crtcs[vbl_ev->crtc_id]);
>>> -		else
>>> -			kms->funcs->disable_vblank(kms,
>>> -
>> priv->crtcs[vbl_ev->crtc_id]);
>>> 
>>> -		kfree(vbl_ev);
>>> -
>>> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>>> -	}
>>> +	if (vbl_work->enable)
>>> +		kms->funcs->enable_vblank(kms,
>> priv->crtcs[vbl_work->crtc_id]);
>>> +	else
>>> +		kms->funcs->disable_vblank(kms,
>> priv->crtcs[vbl_work->crtc_id]);
>>> 
>>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>>> +	kfree(vbl_work);
>>>  }
>>> 
>>>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>>>  					int crtc_id, bool enable)
>>>  {
>>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>>> -	struct vblank_event *vbl_ev;
>>> -	unsigned long flags;
>>> +	struct msm_vblank_work *vbl_work;
>>> 
>>> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>>> -	if (!vbl_ev)
>>> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>>> +	if (!vbl_work)
>>>  		return -ENOMEM;
>>> 
>>> -	vbl_ev->crtc_id = crtc_id;
>>> -	vbl_ev->enable = enable;
>>> +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>>> 
>>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>>> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>>> +	vbl_work->crtc_id = crtc_id;
>>> +	vbl_work->enable = enable;
>>> +	vbl_work->priv = priv;
>>> 
>>> -	schedule_work(&vbl_ctrl->work);
>>> +	schedule_work(&vbl_work->work);
>>> 
>>>  	return 0;
>>>  }
>>> @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct device *dev)
>>>  	struct msm_drm_private *priv = ddev->dev_private;
>>>  	struct msm_kms *kms = priv->kms;
>>>  	struct msm_mdss *mdss = priv->mdss;
>>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>>> -	struct vblank_event *vbl_ev, *tmp;
>>>  	int i;
>>> 
>>>  	/* We must cancel and cleanup any pending vblank enable/disable
>>>  	 * work before drm_irq_uninstall() to avoid work re-enabling an
>>>  	 * irq after uninstall has disabled it.
>>>  	 */
>>> +
>>>  	msm_gem_shrinker_cleanup(ddev);
>>> 
>>>  	drm_kms_helper_poll_fini(ddev);
>>> @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device *dev)
>>>  #endif
>>>  	drm_mode_config_cleanup(ddev);
>>> 
>>> -	flush_work(&vbl_ctrl->work);
>>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> {
>>> -		list_del(&vbl_ev->node);
>>> -		kfree(vbl_ev);
>>> -	}
>>> -
>>>  	/* clean up event worker threads */
>>>  	for (i = 0; i < priv->num_crtcs; i++) {
>>>  		if (priv->event_thread[i].thread) {
>>> @@ -469,9 +445,6 @@ static int msm_drm_init(struct device *dev, 
>>> struct
>> drm_driver *drv)
>>>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>>> 
>>>  	INIT_LIST_HEAD(&priv->inactive_list);
>>> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>>> -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>>> -	spin_lock_init(&priv->vblank_ctrl.lock);
>>> 
>>>  	drm_mode_config_init(ddev);
>>> 
>>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> b/drivers/gpu/drm/msm/msm_drv.h
>>> index 05d33a7..d4cbde2 100644
>>> --- a/drivers/gpu/drm/msm/msm_drv.h
>>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>>> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>>>  	PLANE_PROP_MAX_NUM
>>>  };
>>> 
>>> -struct msm_vblank_ctrl {
>>> -	struct work_struct work;
>>> -	struct list_head event_list;
>>> -	spinlock_t lock;
>>> -};
>>> -
>>>  #define MSM_GPU_MAX_RINGS 4
>>>  #define MAX_H_TILES_PER_DISPLAY 2
>>> 
>>> @@ -225,7 +219,6 @@ struct msm_drm_private {
>>>  	struct notifier_block vmap_notifier;
>>>  	struct shrinker shrinker;
>>> 
>>> -	struct msm_vblank_ctrl vblank_ctrl;
>>>  	struct drm_atomic_state *pm_state;
>>>  };
>>> 
>>> --
>>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
>> Forum,
>>> a Linux Foundation Collaborative Project
>>> 
>>> _______________________________________________
>>> Freedreno mailing list
>>> Freedreno@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [Freedreno] [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-12-03 20:27                           ` Jeykumar Sankaran
@ 2018-12-07 17:22                             ` Sean Paul
  2018-12-07 23:23                               ` Jeykumar Sankaran
  0 siblings, 1 reply; 19+ messages in thread
From: Sean Paul @ 2018-12-07 17:22 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: Sean Paul, dri-devel, hoegsberg, linux-arm-msm, seanpaul, freedreno

On Mon, Dec 03, 2018 at 12:27:42PM -0800, Jeykumar Sankaran wrote:
> On 2018-12-03 06:21, Sean Paul wrote:
> > On Fri, Nov 30, 2018 at 04:21:15PM -0800, Jeykumar Sankaran wrote:
> > > On 2018-11-30 12:07, Sean Paul wrote:
> > > > On Fri, Nov 30, 2018 at 11:45:55AM -0800, Jeykumar Sankaran wrote:
> > > > > On 2018-11-29 14:15, Sean Paul wrote:
> > > > > > On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran wrote:
> > > > > > > On 2018-11-07 07:55, Sean Paul wrote:
> > > > > > > > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar Sankaran
> > wrote:
> > > > > > > > > msm maintains a separate structure to define vblank
> > > > > > > > > work definitions and a list to track events submitted
> > > > > > > > > to the workqueue. We can avoid this redundant list
> > > > > > > > > and its protection mechanism, if we subclass the
> > > > > > > > > work object to encapsulate vblank event parameters.
> > > > > > > > >
> > > > > > > > > changes in v2:
> > > > > > > > > 	- subclass optimization on system wq (Sean Paul)
> > > > > > > >
> > > > > > > > I wouldn't do it like this, tbh. One problem is that you've
> > lost
> > > > your
> > > > > > > > flush() on
> > > > > > > > unbind, so there's no way to know if you have workers in the
> > wild
> > > > > > > > waiting
> > > > > > > > to
> > > > > > > > enable/disable vblank.
> > > > > > > >
> > > > > > > > Another issues is that AFAICT, we don't need a queue of
> > > > > > > > enables/disables,
> > > > > > > > but
> > > > > > > > rather just the last requested state (ie: should we be on or
> > off).
> > > > So
> > > > > > > > things
> > > > > > > > don't need to be this complicated (and we're possibly
> > thrashing
> > > > vblank
> > > > > > > > on/off
> > > > > > > > for no reason).
> > > > > > > >
> > > > > > > > I'm still of the mind that you should just make this
> > synchronous
> > > > and
> > > > > > be
> > > > > > > > done
> > > > > > > > with the threads (especially since we're still
> > > > uncovering/introducing
> > > > > > > > races!).
> > > > > > > >
> > > > > > > While scoping out the effort to make vblank events synchronous,
> > I
> > > > > > > found
> > > > > > > that the spinlock locking order of vblank request sequence and
> > > > vblank
> > > > > > > callback
> > > > > > > sequences are the opposite.
> > > > > > >
> > > > > > > In DPU, drm_vblank_enable acquires vblank_time_lock before
> > > > registering
> > > > > > > the crtc to encoder which happens after acquiring
> > encoder_spinlock.
> > > > > > > But
> > > > > > > the vblank_callback acquires encoder_spinlock before accessing
> > the
> > > > > > > registered
> > > > > > > crtc and calling into drm_vblank_handler which tries to acquire
> > > > > > > vblank_time_lock.
> > > > > > > Acquiring both vblank_time_lock and encoder_spinlock in the same
> > > > > > > thread
> > > > > > > is leading to deadlock.
> > > > > >
> > > > > > Hmm, I'm not sure I follow. Are you seeing issues where irq
> > overlaps
> > > > > > with
> > > > > > enable/disable? I hacked in sync vblank enable/disable quickly to
> > see
> > > > if
> > > > > > I
> > > > > > could
> > > > > > reproduce what you're seeing, but things seemed well behaved.
> > > > > >
> > > > >
> > > > > The race is between drm_vblank_get/put and vblank_handler contexts.
> > > > >
> > > > > When made synchronous:
> > > > >
> > > > > while calling drm_vblank_get, the callstack looks like below:
> > > > > drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock) ->
> > > > > __enable_vblank -> dpu_crtc_vblank ->
> > > > > dpu_encoder_toggle_vblank_for_crtc
> > > > > (tries to acquire enc_spinlock)
> > > > >
> > > > > In vblank handler, the call stack will be:
> > > > > dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback
> > > > > (acquires
> > > > > enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank
> > > > > (tries to
> > > > > acquire vblank_time_lock)
> > > >
> > > > Hmm, I'm not sure how this can happen. We acquire and release the
> > > > enc_spinlock
> > > > before enabling the irq, yes we will hold on to the vbl_time_lock, but
> > > > we
> > > > shouldn't be trying to reacquire an encoder's spinlock after we've
> > > > enabled
> > > > it.
> > > In the synchronous approach dpu_encoder_toggle_vblank_for_crtc(which
> > > acquires the enc_spinlock) will be called while we
> > > are holding the vbl_time_lock.
> > > 
> > > > I don't know how that can deadlock, since we should never be running
> > > > enable and
> > > > the handler concurrently.
> > > >
> > > I agree that vblank_irq handler should not be running before the
> > > enable
> > > sequence. But
> > > don't you expect the handler to be running while calling the
> > vblank_disable
> > > sequence?
> > 
> > This is an entirely different problem though. It's also one that is
> > easier
> > to
> > fix. I think we could probably grab the enc_spinlock in disable and
> > clear
> > the
> > crtc pointer.
> > 
> we do hold enc_spinlock in dpu_encoder_assign_crtc (drm/msm: dpu: Remove
> vblank_callback from encoder)
> where we clear the crtc pointer.
> 
> > What I'm getting at is that there's no fundamental reason why we need to
> > have
> > async vblank enable/disable.
> > 
> > Sean
> > 
> There is really no *need* to have them async. But I believe the reason why
> they
> are implemented this way is to avoid deadlock between the below two paths.
> 
> Restating the above findings:
> vblank_handlers and vblank enable/disable can run concurrently. 

I think this is where we disagree. The handler will only be called when
enc->crtc is set.

In the case of disable, we clear the pointer _after_ vblank is
disabled, so enc_spinlock should be uncontested.

On enable, the pointer is set _before_ vblank is enabled, so again the
enc_spinlock is uncontested.

I tracked down the deadlock you found and submitted a patch for it in [1]. The
issue is that vblank was being incorrectly enabled whenever the encoder is on.
With the fix, you can add however long delay you want to vblank_enable and you
won't produce a deadlock.

Sean

[1]- https://lists.freedesktop.org/archives/dri-devel/2018-December/199670.html


> The first
> trying to acquire
> vbl_time_lock holding enc_spinlock. Other trying to acquire enc_spinlock
> holding
> vbl_time_lock.
> 
> Thanks,
> Jeykumar S.
> 
> 
> > > vbl disable will try to acquire the locks in the opposite order to
> > > that
> > of
> > > irq_handler and the
> > > same issue is bound to happen.
> > > 
> > > With your patch, you should be able to simulate this deadlock if you
> > > can
> > > inject a delay
> > > by adding a pr_err log in vblank_ctrl_queue_work
> > > 
> > > Thanks,
> > > Jeykumar S.
> > > 
> > > > The only thing I can think of is that the vblank interrupts are firing
> > > > after
> > > > vblank has been disabled? In that case, it seems like we should
> > properly
> > > > flush
> > > > them.
> > > >
> > > > Sean
> > > >
> > > >
> > > > >
> > > > >
> > > > > > I do see that there is a chance to call drm_handle_vblank() while
> > > > > > holding
> > > > > > enc_spinlock, but couldn't find any obvious lock recursion there.
> > > > > >
> > > > > > Maybe a callstack or lockdep splat would help?
> > > > > >
> > > > > > Sean
> > > > > >
> > > > > >
> > > > > > Here's my hack to bypass the display thread:
> > > > > >
> > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > > > > > b/drivers/gpu/drm/msm/msm_drv.c
> > > > > > index 9c9f7ff6960b38..5a3cac5825319e 100644
> > > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > > > > @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct
> > > > kthread_work
> > > > > > *work)
> > > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > > > > >  					int crtc_id, bool enable)
> > > > > >  {
> > > > > > +	struct msm_kms *kms = priv->kms;
> > > > > >  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > > -	struct vblank_event *vbl_ev;
> > > > > >  	unsigned long flags;
> > > > > >
> > > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > > > > -	if (!vbl_ev)
> > > > > > -		return -ENOMEM;
> > > > > > +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > >
> > > > > > -	vbl_ev->crtc_id = crtc_id;
> > > > > > -	vbl_ev->enable = enable;
> > > > > > +	if (enable)
> > > > > > +		kms->funcs->enable_vblank(kms,
> > priv->crtcs[crtc_id]);
> > > > > > +	else
> > > > > > +		kms->funcs->disable_vblank(kms,
> > priv->crtcs[crtc_id]);
> > > > > >
> > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > > > >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > >
> > > > > > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> > > > > > -			&vbl_ctrl->work);
> > > > > > -
> > > > > >  	return 0;
> > > > > >  }
> > > > > >
> > > > > Even with your patch above, I see frame is getting stuck but it
> > > > > recovers
> > > > in
> > > > > a while.
> > > > > The patch I tried was assigning
> > > > crtc->funcs->enable_vblank/disable_vblank so
> > > > > that
> > > > > __enable_vblank can call crtc directly. But the above callstack is
> > > > > still
> > > > > valid for your patch.
> > > > >
> > > > > Thanks,
> > > > > Jeykumar S.
> > > > > >
> > > > > >
> > > > > > >
> > > > > > > In MDP5, I see the same pattern between vblank_time_lock and
> > > > list_lock
> > > > > > which
> > > > > > > is used to track the irq handlers.
> > > > > > >
> > > > > > > I believe that explains why msm_drv is queuing the vblank
> > > > > > > enable/disable
> > > > > > > works to WQ after acquiring vblank_time_lock.
> > > > > > >
> > > > > > > Thanks,
> > > > > > > Jeykumar S.
> > > > > > >
> > > > > > > > Sean
> > > > > > > >
> > > > > > > > >
> > > > > > > > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > > > > > > > ---
> > > > > > > > >  drivers/gpu/drm/msm/msm_drv.c | 67
> > > > > > > > +++++++++++++------------------------------
> > > > > > > > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
> > > > > > > > >  2 files changed, 20 insertions(+), 54 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > > b/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > > > index 6d6c73b..8da5be2 100644
> > > > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > > > > > > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem
> > *addr)
> > > > > > > > >  	return val;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > -struct vblank_event {
> > > > > > > > > -	struct list_head node;
> > > > > > > > > +struct msm_vblank_work {
> > > > > > > > > +	struct work_struct work;
> > > > > > > > >  	int crtc_id;
> > > > > > > > >  	bool enable;
> > > > > > > > > +	struct msm_drm_private *priv;
> > > > > > > > >  };
> > > > > > > > >
> > > > > > > > >  static void vblank_ctrl_worker(struct work_struct *work)
> > > > > > > > >  {
> > > > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> > > > > > > > > -						struct
> > > > msm_vblank_ctrl,
> > > > > > > > work);
> > > > > > > > > -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> > > > > > > > > -					struct msm_drm_private,
> > > > > > > > vblank_ctrl);
> > > > > > > > > +	struct msm_vblank_work *vbl_work = container_of(work,
> > > > > > > > > +						struct
> > > > msm_vblank_work,
> > > > > > > > work);
> > > > > > > > > +	struct msm_drm_private *priv = vbl_work->priv;
> > > > > > > > >  	struct msm_kms *kms = priv->kms;
> > > > > > > > > -	struct vblank_event *vbl_ev, *tmp;
> > > > > > > > > -	unsigned long flags;
> > > > > > > > > -
> > > > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
> > > > &vbl_ctrl->event_list, node)
> > > > > > > > {
> > > > > > > > > -		list_del(&vbl_ev->node);
> > > > > > > > > -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > > > > > -
> > > > > > > > > -		if (vbl_ev->enable)
> > > > > > > > > -			kms->funcs->enable_vblank(kms,
> > > > > > > > > -
> > > > > > > > priv->crtcs[vbl_ev->crtc_id]);
> > > > > > > > > -		else
> > > > > > > > > -			kms->funcs->disable_vblank(kms,
> > > > > > > > > -
> > > > > > > > priv->crtcs[vbl_ev->crtc_id]);
> > > > > > > > >
> > > > > > > > > -		kfree(vbl_ev);
> > > > > > > > > -
> > > > > > > > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > > > > -	}
> > > > > > > > > +	if (vbl_work->enable)
> > > > > > > > > +		kms->funcs->enable_vblank(kms,
> > > > > > > > priv->crtcs[vbl_work->crtc_id]);
> > > > > > > > > +	else
> > > > > > > > > +		kms->funcs->disable_vblank(kms,
> > > > > > > > priv->crtcs[vbl_work->crtc_id]);
> > > > > > > > >
> > > > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > > > > > +	kfree(vbl_work);
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private
> > *priv,
> > > > > > > > >  					int crtc_id, bool enable)
> > > > > > > > >  {
> > > > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > > > > > -	struct vblank_event *vbl_ev;
> > > > > > > > > -	unsigned long flags;
> > > > > > > > > +	struct msm_vblank_work *vbl_work;
> > > > > > > > >
> > > > > > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > > > > > > > -	if (!vbl_ev)
> > > > > > > > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> > > > > > > > > +	if (!vbl_work)
> > > > > > > > >  		return -ENOMEM;
> > > > > > > > >
> > > > > > > > > -	vbl_ev->crtc_id = crtc_id;
> > > > > > > > > -	vbl_ev->enable = enable;
> > > > > > > > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
> > > > > > > > >
> > > > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > > > > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > > > > > > > +	vbl_work->crtc_id = crtc_id;
> > > > > > > > > +	vbl_work->enable = enable;
> > > > > > > > > +	vbl_work->priv = priv;
> > > > > > > > >
> > > > > > > > > -	schedule_work(&vbl_ctrl->work);
> > > > > > > > > +	schedule_work(&vbl_work->work);
> > > > > > > > >
> > > > > > > > >  	return 0;
> > > > > > > > >  }
> > > > > > > > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct
> > device
> > > > *dev)
> > > > > > > > >  	struct msm_drm_private *priv = ddev->dev_private;
> > > > > > > > >  	struct msm_kms *kms = priv->kms;
> > > > > > > > >  	struct msm_mdss *mdss = priv->mdss;
> > > > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > > > > > > > -	struct vblank_event *vbl_ev, *tmp;
> > > > > > > > >  	int i;
> > > > > > > > >
> > > > > > > > >  	/* We must cancel and cleanup any pending vblank
> > > > enable/disable
> > > > > > > > >  	 * work before drm_irq_uninstall() to avoid work
> > > > re-enabling an
> > > > > > > > >  	 * irq after uninstall has disabled it.
> > > > > > > > >  	 */
> > > > > > > > > +
> > > > > > > > >  	msm_gem_shrinker_cleanup(ddev);
> > > > > > > > >
> > > > > > > > >  	drm_kms_helper_poll_fini(ddev);
> > > > > > > > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct device
> > > > *dev)
> > > > > > > > >  #endif
> > > > > > > > >  	drm_mode_config_cleanup(ddev);
> > > > > > > > >
> > > > > > > > > -	flush_work(&vbl_ctrl->work);
> > > > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
> > > > &vbl_ctrl->event_list, node)
> > > > > > > > {
> > > > > > > > > -		list_del(&vbl_ev->node);
> > > > > > > > > -		kfree(vbl_ev);
> > > > > > > > > -	}
> > > > > > > > > -
> > > > > > > > >  	/* clean up event worker threads */
> > > > > > > > >  	for (i = 0; i < priv->num_crtcs; i++) {
> > > > > > > > >  		if (priv->event_thread[i].thread) {
> > > > > > > > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct device
> > *dev,
> > > > > > struct
> > > > > > > > drm_driver *drv)
> > > > > > > > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
> > > > > > > > >
> > > > > > > > >  	INIT_LIST_HEAD(&priv->inactive_list);
> > > > > > > > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> > > > > > > > > -	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> > > > > > > > > -	spin_lock_init(&priv->vblank_ctrl.lock);
> > > > > > > > >
> > > > > > > > >  	drm_mode_config_init(ddev);
> > > > > > > > >
> > > > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > > b/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > > > index 05d33a7..d4cbde2 100644
> > > > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > > > > > > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
> > > > > > > > >  	PLANE_PROP_MAX_NUM
> > > > > > > > >  };
> > > > > > > > >
> > > > > > > > > -struct msm_vblank_ctrl {
> > > > > > > > > -	struct work_struct work;
> > > > > > > > > -	struct list_head event_list;
> > > > > > > > > -	spinlock_t lock;
> > > > > > > > > -};
> > > > > > > > > -
> > > > > > > > >  #define MSM_GPU_MAX_RINGS 4
> > > > > > > > >  #define MAX_H_TILES_PER_DISPLAY 2
> > > > > > > > >
> > > > > > > > > @@ -225,7 +219,6 @@ struct msm_drm_private {
> > > > > > > > >  	struct notifier_block vmap_notifier;
> > > > > > > > >  	struct shrinker shrinker;
> > > > > > > > >
> > > > > > > > > -	struct msm_vblank_ctrl vblank_ctrl;
> > > > > > > > >  	struct drm_atomic_state *pm_state;
> > > > > > > > >  };
> > > > > > > > >
> > > > > > > > > --
> > > > > > > > > The Qualcomm Innovation Center, Inc. is a member of the Code
> > > > Aurora
> > > > > > > > Forum,
> > > > > > > > > a Linux Foundation Collaborative Project
> > > > > > > > >
> > > > > > > > > _______________________________________________
> > > > > > > > > Freedreno mailing list
> > > > > > > > > Freedreno@lists.freedesktop.org
> > > > > > > > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> > > > > > >
> > > > > > > --
> > > > > > > Jeykumar S
> > > > >
> > > > > --
> > > > > Jeykumar S
> > > 
> > > --
> > > Jeykumar S
> 
> -- 
> Jeykumar S

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH v2 5/5] drm/msm: subclass work object for vblank events
  2018-12-07 17:22                             ` [Freedreno] " Sean Paul
@ 2018-12-07 23:23                               ` Jeykumar Sankaran
  0 siblings, 0 replies; 19+ messages in thread
From: Jeykumar Sankaran @ 2018-12-07 23:23 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	jcrouse-sgV2jX0FEOL9JmXXK+q4OQ, seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-12-07 09:22, Sean Paul wrote:
> On Mon, Dec 03, 2018 at 12:27:42PM -0800, Jeykumar Sankaran wrote:
>> On 2018-12-03 06:21, Sean Paul wrote:
>> > On Fri, Nov 30, 2018 at 04:21:15PM -0800, Jeykumar Sankaran wrote:
>> > > On 2018-11-30 12:07, Sean Paul wrote:
>> > > > On Fri, Nov 30, 2018 at 11:45:55AM -0800, Jeykumar Sankaran wrote:
>> > > > > On 2018-11-29 14:15, Sean Paul wrote:
>> > > > > > On Tue, Nov 20, 2018 at 02:04:14PM -0800, Jeykumar Sankaran
> wrote:
>> > > > > > > On 2018-11-07 07:55, Sean Paul wrote:
>> > > > > > > > On Tue, Nov 06, 2018 at 02:36:30PM -0800, Jeykumar
> Sankaran
>> > wrote:
>> > > > > > > > > msm maintains a separate structure to define vblank
>> > > > > > > > > work definitions and a list to track events submitted
>> > > > > > > > > to the workqueue. We can avoid this redundant list
>> > > > > > > > > and its protection mechanism, if we subclass the
>> > > > > > > > > work object to encapsulate vblank event parameters.
>> > > > > > > > >
>> > > > > > > > > changes in v2:
>> > > > > > > > > 	- subclass optimization on system wq (Sean Paul)
>> > > > > > > >
>> > > > > > > > I wouldn't do it like this, tbh. One problem is that
> you've
>> > lost
>> > > > your
>> > > > > > > > flush() on
>> > > > > > > > unbind, so there's no way to know if you have workers in
> the
>> > wild
>> > > > > > > > waiting
>> > > > > > > > to
>> > > > > > > > enable/disable vblank.
>> > > > > > > >
>> > > > > > > > Another issues is that AFAICT, we don't need a queue of
>> > > > > > > > enables/disables,
>> > > > > > > > but
>> > > > > > > > rather just the last requested state (ie: should we be on
> or
>> > off).
>> > > > So
>> > > > > > > > things
>> > > > > > > > don't need to be this complicated (and we're possibly
>> > thrashing
>> > > > vblank
>> > > > > > > > on/off
>> > > > > > > > for no reason).
>> > > > > > > >
>> > > > > > > > I'm still of the mind that you should just make this
>> > synchronous
>> > > > and
>> > > > > > be
>> > > > > > > > done
>> > > > > > > > with the threads (especially since we're still
>> > > > uncovering/introducing
>> > > > > > > > races!).
>> > > > > > > >
>> > > > > > > While scoping out the effort to make vblank events
> synchronous,
>> > I
>> > > > > > > found
>> > > > > > > that the spinlock locking order of vblank request sequence
> and
>> > > > vblank
>> > > > > > > callback
>> > > > > > > sequences are the opposite.
>> > > > > > >
>> > > > > > > In DPU, drm_vblank_enable acquires vblank_time_lock before
>> > > > registering
>> > > > > > > the crtc to encoder which happens after acquiring
>> > encoder_spinlock.
>> > > > > > > But
>> > > > > > > the vblank_callback acquires encoder_spinlock before
> accessing
>> > the
>> > > > > > > registered
>> > > > > > > crtc and calling into drm_vblank_handler which tries to
> acquire
>> > > > > > > vblank_time_lock.
>> > > > > > > Acquiring both vblank_time_lock and encoder_spinlock in the
> same
>> > > > > > > thread
>> > > > > > > is leading to deadlock.
>> > > > > >
>> > > > > > Hmm, I'm not sure I follow. Are you seeing issues where irq
>> > overlaps
>> > > > > > with
>> > > > > > enable/disable? I hacked in sync vblank enable/disable quickly
> to
>> > see
>> > > > if
>> > > > > > I
>> > > > > > could
>> > > > > > reproduce what you're seeing, but things seemed well behaved.
>> > > > > >
>> > > > >
>> > > > > The race is between drm_vblank_get/put and vblank_handler
> contexts.
>> > > > >
>> > > > > When made synchronous:
>> > > > >
>> > > > > while calling drm_vblank_get, the callstack looks like below:
>> > > > > drm_vblank_get -> drm_vblank_enable (acquires vblank_time_lock)
> ->
>> > > > > __enable_vblank -> dpu_crtc_vblank ->
>> > > > > dpu_encoder_toggle_vblank_for_crtc
>> > > > > (tries to acquire enc_spinlock)
>> > > > >
>> > > > > In vblank handler, the call stack will be:
>> > > > > dpu_encoder_phys_vid_vblank_irq -> dpu_encoder_vblank_callback
>> > > > > (acquires
>> > > > > enc_spinlock) -> dpu_crtc_vblank_callback -> drm_handle_vblank
>> > > > > (tries to
>> > > > > acquire vblank_time_lock)
>> > > >
>> > > > Hmm, I'm not sure how this can happen. We acquire and release the
>> > > > enc_spinlock
>> > > > before enabling the irq, yes we will hold on to the vbl_time_lock,
> but
>> > > > we
>> > > > shouldn't be trying to reacquire an encoder's spinlock after we've
>> > > > enabled
>> > > > it.
>> > > In the synchronous approach dpu_encoder_toggle_vblank_for_crtc(which
>> > > acquires the enc_spinlock) will be called while we
>> > > are holding the vbl_time_lock.
>> > >
>> > > > I don't know how that can deadlock, since we should never be
> running
>> > > > enable and
>> > > > the handler concurrently.
>> > > >
>> > > I agree that vblank_irq handler should not be running before the
>> > > enable
>> > > sequence. But
>> > > don't you expect the handler to be running while calling the
>> > vblank_disable
>> > > sequence?
>> >
>> > This is an entirely different problem though. It's also one that is
>> > easier
>> > to
>> > fix. I think we could probably grab the enc_spinlock in disable and
>> > clear
>> > the
>> > crtc pointer.
>> >
>> we do hold enc_spinlock in dpu_encoder_assign_crtc (drm/msm: dpu: 
>> Remove
>> vblank_callback from encoder)
>> where we clear the crtc pointer.
>> 
>> > What I'm getting at is that there's no fundamental reason why we need
> to
>> > have
>> > async vblank enable/disable.
>> >
>> > Sean
>> >
>> There is really no *need* to have them async. But I believe the reason
> why
>> they
>> are implemented this way is to avoid deadlock between the below two
> paths.
>> 
>> Restating the above findings:
>> vblank_handlers and vblank enable/disable can run concurrently.
> 
> I think this is where we disagree. The handler will only be called when
> enc->crtc is set.
> 
> In the case of disable, we clear the pointer _after_ vblank is
> disabled, so enc_spinlock should be uncontested.
> 
> On enable, the pointer is set _before_ vblank is enabled, so again the
> enc_spinlock is uncontested.
> 
> I tracked down the deadlock you found and submitted a patch for it in 
> [1].
> The
> issue is that vblank was being incorrectly enabled whenever the encoder 
> is
> on.
> With the fix, you can add however long delay you want to vblank_enable 
> and
> you
> won't produce a deadlock.
> 
> Sean
> 
> [1]-
> https://lists.freedesktop.org/archives/dri-devel/2018-December/199670.html
> 
> 
I am aware of the bug in DPU which enables vsync irq before 
vblank_enable is called. That is
why I tried to explain the concurrency during vblank_disable. Your patch 
certainly fixes
the enable sequence. But the deadlock is still reproducible during 
vblank_disable.
Below are the logs I could collect during deadlock:

[   13.448254] [drm:dpu_encoder_vblank_callback:1242] vbl: 
dpu_encoder_vblank_callback - acquire enc_spinlock
[   13.458187] vbl: drm_handle_vblank - acquire vblank_time_lock
[   13.472993] [drm:dpu_encoder_phys_vid_vblank_irq:319] vbl: 
dpu_encoder_phys_vid_vblank_irq
[   13.481586] [drm:dpu_encoder_vblank_callback:1242] vbl: 
dpu_encoder_vblank_callback - acquire enc_spinlock
[   13.491518] vbl: drm_handle_vblank - acquire vblank_time_lock

--> vblank_disable 5 sec timer expires
           [Task 1] [   13.506175] vbl: vblank_disable_fn - timer expires
--> vblank irq is fired.
           [Task 2] [   13.506326] 
[drm:dpu_encoder_phys_vid_vblank_irq:319] vbl: 
dpu_encoder_phys_vid_vblank_irq
--> Task 1 acquires vblank_time_lock
           [Task 1] [   13.511157] vbl: drm_vblank_disable_and_save - 
acquire vblank_time_lock
--> Task 2 acquires enc spin lock
           [Task 2] [   13.519748] [drm:dpu_encoder_vblank_callback:1242] 
vbl: dpu_encoder_vblank_callback - acquire enc_spinlock
--> Task 2 waiting for vblank_time_lock
           [Task 2] [   13.519757] vbl: drm_handle_vblank - acquire 
vblank_time_lock
           [Task 1] [   13.526682] vbl: __disable_vblank
           [Task 1] [   13.546082] vbl: vblank_ctrl_queue_work
--> Task 1 waiting for enc_spinlock causing deadlock
           [Task 1] [   13.550068] 
[drm:dpu_encoder_toggle_vblank_for_crtc:1287] vbl: 
dpu_encoder_toggle_vblank_for_crtc - acquire enc_spinlock

Task 1 represents vblank_disable thread
Task 2 represents vblank irq handler thread

The patch I used for the additional logging as below:

diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 98e0911..3874241 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -330,6 +330,8 @@ static void __disable_vblank(struct drm_device *dev, 
unsigned int pipe)
                 }
         }

+       pr_err("vbl: %s \n", __func__);
+
         dev->driver->disable_vblank(dev, pipe);
  }

@@ -350,6 +352,7 @@ void drm_vblank_disable_and_save(struct drm_device 
*dev, unsigned int pipe)
          * so no updates of timestamps or count can happen after we've
          * disabled. Needed to prevent races in case of delayed irq's.
          */
+       pr_err("vbl: %s - acquire vblank_time_lock \n", __func__);
         spin_lock_irqsave(&dev->vblank_time_lock, irqflags);

         /*
@@ -382,6 +385,8 @@ static void vblank_disable_fn(struct timer_list *t)
         unsigned int pipe = vblank->pipe;
         unsigned long irqflags;

+       pr_err("vbl: %s - timer expires\n", __func__);
+
         spin_lock_irqsave(&dev->vbl_lock, irqflags);
         if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
                 DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
@@ -1685,7 +1690,7 @@ bool drm_handle_vblank(struct drm_device *dev, 
unsigned int pipe)
         struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
         unsigned long irqflags;
         bool disable_irq;
-
+
         if (WARN_ON_ONCE(!dev->num_crtcs))
                 return false;

@@ -1694,6 +1699,9 @@ bool drm_handle_vblank(struct drm_device *dev, 
unsigned int pipe)

         spin_lock_irqsave(&dev->event_lock, irqflags);

+       pr_err("vbl: %s - acquire vblank_time_lock\n", __func__);
+
+
         /* Need timestamp lock to prevent concurrent execution with
          * vblank enable/disable, as this would cause inconsistent
          * or corrupted timestamps and vblank counts.
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 36158b7..c2e5f05 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1239,7 +1239,9 @@ static void dpu_encoder_vblank_callback(struct 
drm_encoder *drm_enc,
         DPU_ATRACE_BEGIN("encoder_vblank_callback");
         dpu_enc = to_dpu_encoder_virt(drm_enc);

+       pr_err("vbl: %s - acquire enc_spinlock\n", __func__);
         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
         if (dpu_enc->crtc)
                 dpu_crtc_vblank_callback(dpu_enc->crtc);
         spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
@@ -1282,6 +1284,8 @@ void dpu_encoder_toggle_vblank_for_crtc(struct 
drm_encoder *drm_enc,

         trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);

+       pr_err("vbl: %s - acquire enc_spinlock\n", __func__);
+
         spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
         if (dpu_enc->crtc != crtc) {
                 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, 
lock_flags);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c 
b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index e3125a1..a74f1a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -316,6 +316,8 @@ static void dpu_encoder_phys_vid_vblank_irq(void 
*arg, int irq_idx)

         DPU_ATRACE_BEGIN("vblank_irq");

+       pr_err("vbl: %s \n", __func__);
+
         if (phys_enc->parent_ops->handle_vblank_virt)
                 
phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
                                 phys_enc);
diff --git a/drivers/gpu/drm/msm/msm_drv.c 
b/drivers/gpu/drm/msm/msm_drv.c
index bd527b7..99b74f9 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -249,7 +249,7 @@ static int vblank_ctrl_queue_work(struct 
msm_drm_private *priv,
         unsigned long flags;
         struct msm_kms *kms = priv->kms;

+       pr_err("vbl: %s\n", __func__);

         spin_lock_irqsave(&vbl_ctrl->lock, flags);
         if (enable)

Thanks,
Jeykumar S.

>> The first
>> trying to acquire
>> vbl_time_lock holding enc_spinlock. Other trying to acquire 
>> enc_spinlock
>> holding
>> vbl_time_lock.
>> 
>> Thanks,
>> Jeykumar S.
>> 
>> 
>> > > vbl disable will try to acquire the locks in the opposite order to
>> > > that
>> > of
>> > > irq_handler and the
>> > > same issue is bound to happen.
>> > >
>> > > With your patch, you should be able to simulate this deadlock if you
>> > > can
>> > > inject a delay
>> > > by adding a pr_err log in vblank_ctrl_queue_work
>> > >
>> > > Thanks,
>> > > Jeykumar S.
>> > >
>> > > > The only thing I can think of is that the vblank interrupts are
> firing
>> > > > after
>> > > > vblank has been disabled? In that case, it seems like we should
>> > properly
>> > > > flush
>> > > > them.
>> > > >
>> > > > Sean
>> > > >
>> > > >
>> > > > >
>> > > > >
>> > > > > > I do see that there is a chance to call drm_handle_vblank()
> while
>> > > > > > holding
>> > > > > > enc_spinlock, but couldn't find any obvious lock recursion
> there.
>> > > > > >
>> > > > > > Maybe a callstack or lockdep splat would help?
>> > > > > >
>> > > > > > Sean
>> > > > > >
>> > > > > >
>> > > > > > Here's my hack to bypass the display thread:
>> > > > > >
>> > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > index 9c9f7ff6960b38..5a3cac5825319e 100644
>> > > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > @@ -242,24 +242,19 @@ static void vblank_ctrl_worker(struct
>> > > > kthread_work
>> > > > > > *work)
>> > > > > >  static int vblank_ctrl_queue_work(struct msm_drm_private
> *priv,
>> > > > > >  					int crtc_id, bool enable)
>> > > > > >  {
>> > > > > > +	struct msm_kms *kms = priv->kms;
>> > > > > >  	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > > > > -	struct vblank_event *vbl_ev;
>> > > > > >  	unsigned long flags;
>> > > > > >
>> > > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > > > > -	if (!vbl_ev)
>> > > > > > -		return -ENOMEM;
>> > > > > > +	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > >
>> > > > > > -	vbl_ev->crtc_id = crtc_id;
>> > > > > > -	vbl_ev->enable = enable;
>> > > > > > +	if (enable)
>> > > > > > +		kms->funcs->enable_vblank(kms,
>> > priv->crtcs[crtc_id]);
>> > > > > > +	else
>> > > > > > +		kms->funcs->disable_vblank(kms,
>> > priv->crtcs[crtc_id]);
>> > > > > >
>> > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > > > >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > >
>> > > > > > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
>> > > > > > -			&vbl_ctrl->work);
>> > > > > > -
>> > > > > >  	return 0;
>> > > > > >  }
>> > > > > >
>> > > > > Even with your patch above, I see frame is getting stuck but it
>> > > > > recovers
>> > > > in
>> > > > > a while.
>> > > > > The patch I tried was assigning
>> > > > crtc->funcs->enable_vblank/disable_vblank so
>> > > > > that
>> > > > > __enable_vblank can call crtc directly. But the above callstack
> is
>> > > > > still
>> > > > > valid for your patch.
>> > > > >
>> > > > > Thanks,
>> > > > > Jeykumar S.
>> > > > > >
>> > > > > >
>> > > > > > >
>> > > > > > > In MDP5, I see the same pattern between vblank_time_lock and
>> > > > list_lock
>> > > > > > which
>> > > > > > > is used to track the irq handlers.
>> > > > > > >
>> > > > > > > I believe that explains why msm_drv is queuing the vblank
>> > > > > > > enable/disable
>> > > > > > > works to WQ after acquiring vblank_time_lock.
>> > > > > > >
>> > > > > > > Thanks,
>> > > > > > > Jeykumar S.
>> > > > > > >
>> > > > > > > > Sean
>> > > > > > > >
>> > > > > > > > >
>> > > > > > > > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> > > > > > > > > ---
>> > > > > > > > >  drivers/gpu/drm/msm/msm_drv.c | 67
>> > > > > > > > +++++++++++++------------------------------
>> > > > > > > > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>> > > > > > > > >  2 files changed, 20 insertions(+), 54 deletions(-)
>> > > > > > > > >
>> > > > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > > b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > > > index 6d6c73b..8da5be2 100644
>> > > > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.c
>> > > > > > > > > @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem
>> > *addr)
>> > > > > > > > >  	return val;
>> > > > > > > > >  }
>> > > > > > > > >
>> > > > > > > > > -struct vblank_event {
>> > > > > > > > > -	struct list_head node;
>> > > > > > > > > +struct msm_vblank_work {
>> > > > > > > > > +	struct work_struct work;
>> > > > > > > > >  	int crtc_id;
>> > > > > > > > >  	bool enable;
>> > > > > > > > > +	struct msm_drm_private *priv;
>> > > > > > > > >  };
>> > > > > > > > >
>> > > > > > > > >  static void vblank_ctrl_worker(struct work_struct
> *work)
>> > > > > > > > >  {
>> > > > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl =
> container_of(work,
>> > > > > > > > > -						struct
>> > > > msm_vblank_ctrl,
>> > > > > > > > work);
>> > > > > > > > > -	struct msm_drm_private *priv =
> container_of(vbl_ctrl,
>> > > > > > > > > -					struct
> msm_drm_private,
>> > > > > > > > vblank_ctrl);
>> > > > > > > > > +	struct msm_vblank_work *vbl_work =
> container_of(work,
>> > > > > > > > > +						struct
>> > > > msm_vblank_work,
>> > > > > > > > work);
>> > > > > > > > > +	struct msm_drm_private *priv = vbl_work->priv;
>> > > > > > > > >  	struct msm_kms *kms = priv->kms;
>> > > > > > > > > -	struct vblank_event *vbl_ev, *tmp;
>> > > > > > > > > -	unsigned long flags;
>> > > > > > > > > -
>> > > > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
>> > > > &vbl_ctrl->event_list, node)
>> > > > > > > > {
>> > > > > > > > > -		list_del(&vbl_ev->node);
>> > > > > > > > > -		spin_unlock_irqrestore(&vbl_ctrl->lock,
> flags);
>> > > > > > > > > -
>> > > > > > > > > -		if (vbl_ev->enable)
>> > > > > > > > > -			kms->funcs->enable_vblank(kms,
>> > > > > > > > > -
>> > > > > > > > priv->crtcs[vbl_ev->crtc_id]);
>> > > > > > > > > -		else
>> > > > > > > > > -			kms->funcs->disable_vblank(kms,
>> > > > > > > > > -
>> > > > > > > > priv->crtcs[vbl_ev->crtc_id]);
>> > > > > > > > >
>> > > > > > > > > -		kfree(vbl_ev);
>> > > > > > > > > -
>> > > > > > > > > -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > > > > -	}
>> > > > > > > > > +	if (vbl_work->enable)
>> > > > > > > > > +		kms->funcs->enable_vblank(kms,
>> > > > > > > > priv->crtcs[vbl_work->crtc_id]);
>> > > > > > > > > +	else
>> > > > > > > > > +		kms->funcs->disable_vblank(kms,
>> > > > > > > > priv->crtcs[vbl_work->crtc_id]);
>> > > > > > > > >
>> > > > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > > > > > +	kfree(vbl_work);
>> > > > > > > > >  }
>> > > > > > > > >
>> > > > > > > > >  static int vblank_ctrl_queue_work(struct
> msm_drm_private
>> > *priv,
>> > > > > > > > >  					int crtc_id, bool
> enable)
>> > > > > > > > >  {
>> > > > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl =
> &priv->vblank_ctrl;
>> > > > > > > > > -	struct vblank_event *vbl_ev;
>> > > > > > > > > -	unsigned long flags;
>> > > > > > > > > +	struct msm_vblank_work *vbl_work;
>> > > > > > > > >
>> > > > > > > > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > > > > > > > -	if (!vbl_ev)
>> > > > > > > > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> > > > > > > > > +	if (!vbl_work)
>> > > > > > > > >  		return -ENOMEM;
>> > > > > > > > >
>> > > > > > > > > -	vbl_ev->crtc_id = crtc_id;
>> > > > > > > > > -	vbl_ev->enable = enable;
>> > > > > > > > > +	INIT_WORK(&vbl_work->work, vblank_ctrl_worker);
>> > > > > > > > >
>> > > > > > > > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > > > > > > > -	list_add_tail(&vbl_ev->node,
> &vbl_ctrl->event_list);
>> > > > > > > > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > > > > > > > +	vbl_work->crtc_id = crtc_id;
>> > > > > > > > > +	vbl_work->enable = enable;
>> > > > > > > > > +	vbl_work->priv = priv;
>> > > > > > > > >
>> > > > > > > > > -	schedule_work(&vbl_ctrl->work);
>> > > > > > > > > +	schedule_work(&vbl_work->work);
>> > > > > > > > >
>> > > > > > > > >  	return 0;
>> > > > > > > > >  }
>> > > > > > > > > @@ -269,14 +252,13 @@ static int msm_drm_uninit(struct
>> > device
>> > > > *dev)
>> > > > > > > > >  	struct msm_drm_private *priv = ddev->dev_private;
>> > > > > > > > >  	struct msm_kms *kms = priv->kms;
>> > > > > > > > >  	struct msm_mdss *mdss = priv->mdss;
>> > > > > > > > > -	struct msm_vblank_ctrl *vbl_ctrl =
> &priv->vblank_ctrl;
>> > > > > > > > > -	struct vblank_event *vbl_ev, *tmp;
>> > > > > > > > >  	int i;
>> > > > > > > > >
>> > > > > > > > >  	/* We must cancel and cleanup any pending vblank
>> > > > enable/disable
>> > > > > > > > >  	 * work before drm_irq_uninstall() to avoid work
>> > > > re-enabling an
>> > > > > > > > >  	 * irq after uninstall has disabled it.
>> > > > > > > > >  	 */
>> > > > > > > > > +
>> > > > > > > > >  	msm_gem_shrinker_cleanup(ddev);
>> > > > > > > > >
>> > > > > > > > >  	drm_kms_helper_poll_fini(ddev);
>> > > > > > > > > @@ -292,12 +274,6 @@ static int msm_drm_uninit(struct
> device
>> > > > *dev)
>> > > > > > > > >  #endif
>> > > > > > > > >  	drm_mode_config_cleanup(ddev);
>> > > > > > > > >
>> > > > > > > > > -	flush_work(&vbl_ctrl->work);
>> > > > > > > > > -	list_for_each_entry_safe(vbl_ev, tmp,
>> > > > &vbl_ctrl->event_list, node)
>> > > > > > > > {
>> > > > > > > > > -		list_del(&vbl_ev->node);
>> > > > > > > > > -		kfree(vbl_ev);
>> > > > > > > > > -	}
>> > > > > > > > > -
>> > > > > > > > >  	/* clean up event worker threads */
>> > > > > > > > >  	for (i = 0; i < priv->num_crtcs; i++) {
>> > > > > > > > >  		if (priv->event_thread[i].thread) {
>> > > > > > > > > @@ -469,9 +445,6 @@ static int msm_drm_init(struct
> device
>> > *dev,
>> > > > > > struct
>> > > > > > > > drm_driver *drv)
>> > > > > > > > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> > > > > > > > >
>> > > > > > > > >  	INIT_LIST_HEAD(&priv->inactive_list);
>> > > > > > > > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> > > > > > > > > -	INIT_WORK(&priv->vblank_ctrl.work,
> vblank_ctrl_worker);
>> > > > > > > > > -	spin_lock_init(&priv->vblank_ctrl.lock);
>> > > > > > > > >
>> > > > > > > > >  	drm_mode_config_init(ddev);
>> > > > > > > > >
>> > > > > > > > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > > b/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > > > index 05d33a7..d4cbde2 100644
>> > > > > > > > > --- a/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > > > +++ b/drivers/gpu/drm/msm/msm_drv.h
>> > > > > > > > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>> > > > > > > > >  	PLANE_PROP_MAX_NUM
>> > > > > > > > >  };
>> > > > > > > > >
>> > > > > > > > > -struct msm_vblank_ctrl {
>> > > > > > > > > -	struct work_struct work;
>> > > > > > > > > -	struct list_head event_list;
>> > > > > > > > > -	spinlock_t lock;
>> > > > > > > > > -};
>> > > > > > > > > -
>> > > > > > > > >  #define MSM_GPU_MAX_RINGS 4
>> > > > > > > > >  #define MAX_H_TILES_PER_DISPLAY 2
>> > > > > > > > >
>> > > > > > > > > @@ -225,7 +219,6 @@ struct msm_drm_private {
>> > > > > > > > >  	struct notifier_block vmap_notifier;
>> > > > > > > > >  	struct shrinker shrinker;
>> > > > > > > > >
>> > > > > > > > > -	struct msm_vblank_ctrl vblank_ctrl;
>> > > > > > > > >  	struct drm_atomic_state *pm_state;
>> > > > > > > > >  };
>> > > > > > > > >
>> > > > > > > > > --
>> > > > > > > > > The Qualcomm Innovation Center, Inc. is a member of the
> Code
>> > > > Aurora
>> > > > > > > > Forum,
>> > > > > > > > > a Linux Foundation Collaborative Project
>> > > > > > > > >
>> > > > > > > > > _______________________________________________
>> > > > > > > > > Freedreno mailing list
>> > > > > > > > > Freedreno@lists.freedesktop.org
>> > > > > > > > > https://lists.freedesktop.org/mailman/listinfo/freedreno
>> > > > > > >
>> > > > > > > --
>> > > > > > > Jeykumar S
>> > > > >
>> > > > > --
>> > > > > Jeykumar S
>> > >
>> > > --
>> > > Jeykumar S
>> 
>> --
>> Jeykumar S

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2018-12-07 23:23 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-06 22:36 [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup Jeykumar Sankaran
2018-11-06 22:36 ` [PATCH v2 3/5] drm/msm/dpu: use system wq for idle power collapse Jeykumar Sankaran
     [not found] ` <1541543790-748-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-11-06 22:36   ` [PATCH v2 2/5] drm/msm/dpu: use system wq for vblank events Jeykumar Sankaran
2018-11-06 22:36   ` [PATCH v2 4/5] drm/msm: clean up display thread Jeykumar Sankaran
2018-11-06 22:36   ` [PATCH v2 5/5] drm/msm: subclass work object for vblank events Jeykumar Sankaran
     [not found]     ` <1541543790-748-5-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-11-06 23:15       ` Jordan Crouse
2018-11-07 15:55       ` Sean Paul
2018-11-20 22:04         ` Jeykumar Sankaran
     [not found]           ` <86c75419b86da1a3f538638ef0004203-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-11-29 22:15             ` Sean Paul
2018-11-30 19:45               ` Jeykumar Sankaran
     [not found]                 ` <126d5b3a93c1827aaf10cd64486d4967-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-11-30 20:07                   ` Sean Paul
2018-12-01  0:21                     ` Jeykumar Sankaran
     [not found]                       ` <e50d359b8cdd5fd0ccc975a791f65847-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-12-03 14:21                         ` Sean Paul
2018-12-03 20:27                           ` Jeykumar Sankaran
2018-12-07 17:22                             ` [Freedreno] " Sean Paul
2018-12-07 23:23                               ` Jeykumar Sankaran
2018-12-06 18:56         ` Jeykumar Sankaran
2018-12-07  2:32           ` [Freedreno] " Jeykumar Sankaran
2018-11-07 15:42   ` [PATCH v2 1/5] drm/msm: destroy msm threads after config cleanup Sean Paul

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.