All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events
@ 2018-11-01  0:19 Jeykumar Sankaran
       [not found] ` <1541031545-20520-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  0 siblings, 1 reply; 11+ messages in thread
From: Jeykumar Sankaran @ 2018-11-01  0:19 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA
  Cc: hoegsberg-hpIqsD4AKlfQT0dZR+AlfA, Jeykumar Sankaran,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w

DPU was using one thread per display to dispatch async
commits and vblank requests. Since clean up already happened
in msm to use the common thread for all the display commits,
display threads are only used to cater vblank requests. Single
thread is sufficient to do the job without any performance hits.

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |  6 +---
 drivers/gpu/drm/msm/msm_drv.c               | 50 ++++++++++++-----------------
 drivers/gpu/drm/msm/msm_drv.h               |  2 +-
 3 files changed, 23 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 82c55ef..aff20f5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -753,11 +753,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
 	is_vid_mode = dpu_enc->disp_info.capabilities &
 						MSM_DISPLAY_CAP_VID_MODE;
 
-	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
-		DPU_ERROR("invalid crtc index\n");
-		return -EINVAL;
-	}
-	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+	disp_thread = &priv->disp_thread;
 
 	/*
 	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c9f7ff..1f384b3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
-			&vbl_ctrl->work);
+	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
 
 	return 0;
 }
@@ -284,14 +283,12 @@ static int msm_drm_uninit(struct device *dev)
 		kfree(vbl_ev);
 	}
 
+	kthread_flush_worker(&priv->disp_thread.worker);
+	kthread_stop(priv->disp_thread.thread);
+	priv->disp_thread.thread = NULL;
+
 	/* clean up display commit/event worker threads */
 	for (i = 0; i < priv->num_crtcs; i++) {
-		if (priv->disp_thread[i].thread) {
-			kthread_flush_worker(&priv->disp_thread[i].worker);
-			kthread_stop(priv->disp_thread[i].thread);
-			priv->disp_thread[i].thread = NULL;
-		}
-
 		if (priv->event_thread[i].thread) {
 			kthread_flush_worker(&priv->event_thread[i].worker);
 			kthread_stop(priv->event_thread[i].thread);
@@ -537,6 +534,22 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	ddev->mode_config.funcs = &mode_config_funcs;
 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
 
+	/* initialize display thread */
+	kthread_init_worker(&priv->disp_thread.worker);
+	priv->disp_thread.dev = ddev;
+	priv->disp_thread.thread = kthread_run(kthread_worker_fn,
+					       &priv->disp_thread.worker,
+					       "disp_thread");
+	if (IS_ERR(priv->disp_thread.thread)) {
+		DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
+		priv->disp_thread.thread = NULL;
+		goto err_msm_uninit;
+	}
+
+	ret = sched_setscheduler(priv->disp_thread.thread, SCHED_FIFO, &param);
+	if (ret)
+		pr_warn("display thread priority update failed: %d\n", ret);
+
 	/**
 	 * this priority was found during empiric testing to have appropriate
 	 * realtime scheduling to process display updates and interact with
@@ -544,27 +557,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	 */
 	param.sched_priority = 16;
 	for (i = 0; i < priv->num_crtcs; i++) {
-
-		/* initialize display thread */
-		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
-		kthread_init_worker(&priv->disp_thread[i].worker);
-		priv->disp_thread[i].dev = ddev;
-		priv->disp_thread[i].thread =
-			kthread_run(kthread_worker_fn,
-				&priv->disp_thread[i].worker,
-				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
-		if (IS_ERR(priv->disp_thread[i].thread)) {
-			DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
-			priv->disp_thread[i].thread = NULL;
-			goto err_msm_uninit;
-		}
-
-		ret = sched_setscheduler(priv->disp_thread[i].thread,
-					 SCHED_FIFO, &param);
-		if (ret)
-			dev_warn(dev, "disp_thread set priority failed: %d\n",
-				 ret);
-
 		/* initialize event thread */
 		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
 		kthread_init_worker(&priv->event_thread[i].worker);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f32..e81b1fa 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -197,7 +197,7 @@ struct msm_drm_private {
 	unsigned int num_crtcs;
 	struct drm_crtc *crtcs[MAX_CRTCS];
 
-	struct msm_drm_thread disp_thread[MAX_CRTCS];
+	struct msm_drm_thread disp_thread;
 	struct msm_drm_thread event_thread[MAX_CRTCS];
 
 	unsigned int num_encoders;
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/2] drm/msm: subclass work object for vblank events
       [not found] ` <1541031545-20520-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-11-01  0:19   ` Jeykumar Sankaran
       [not found]     ` <1541031545-20520-2-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-01 19:09   ` [PATCH 1/2] drm/msm: use common display thread for dispatching " Sean Paul
  2018-11-01 19:44   ` Jordan Crouse
  2 siblings, 1 reply; 11+ messages in thread
From: Jeykumar Sankaran @ 2018-11-01  0:19 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	linux-arm-msm-u79uwXL29TY76Z2rM5mHXA
  Cc: hoegsberg-hpIqsD4AKlfQT0dZR+AlfA, Jeykumar Sankaran,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w

msm maintains a separate structure to define vblank
work definitions and a list to track events submitted
to the display worker thread. We can avoid these
redundant list and its protection mechanism, if we
subclass the work object to encapsulate vblank
event parameters.

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
---
 drivers/gpu/drm/msm/msm_drv.c | 70 ++++++++++++-------------------------------
 drivers/gpu/drm/msm/msm_drv.h |  7 -----
 2 files changed, 19 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 1f384b3..67a96ee 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
 	return val;
 }
 
-struct vblank_event {
-	struct list_head node;
+struct msm_vblank_work {
+	struct kthread_work work;
 	int crtc_id;
 	bool enable;
+	struct msm_drm_private *priv;
 };
 
 static void vblank_ctrl_worker(struct kthread_work *work)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
-						struct msm_vblank_ctrl, work);
-	struct msm_drm_private *priv = container_of(vbl_ctrl,
-					struct msm_drm_private, vblank_ctrl);
+	struct msm_vblank_work *vbl_work = container_of(work,
+						struct msm_vblank_work, work);
+	struct msm_drm_private *priv = vbl_work->priv;
 	struct msm_kms *kms = priv->kms;
-	struct vblank_event *vbl_ev, *tmp;
-	unsigned long flags;
-
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
-
-		if (vbl_ev->enable)
-			kms->funcs->enable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
-		else
-			kms->funcs->disable_vblank(kms,
-						priv->crtcs[vbl_ev->crtc_id]);
 
-		kfree(vbl_ev);
-
-		spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	}
+	if (vbl_work->enable)
+		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
+	else
+		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
 
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	kfree(vbl_work);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
 					int crtc_id, bool enable)
 {
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev;
-	unsigned long flags;
+	struct msm_vblank_work *vbl_work;
 
-	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
-	if (!vbl_ev)
+	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
+	if (!vbl_work)
 		return -ENOMEM;
 
-	vbl_ev->crtc_id = crtc_id;
-	vbl_ev->enable = enable;
+	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
 
-	spin_lock_irqsave(&vbl_ctrl->lock, flags);
-	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+	vbl_work->crtc_id = crtc_id;
+	vbl_work->enable = enable;
+	vbl_work->priv = priv;
 
-	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
+	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
 
 	return 0;
 }
@@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
 	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
 	struct msm_mdss *mdss = priv->mdss;
-	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
-	struct vblank_event *vbl_ev, *tmp;
 	int i;
 
-	/* We must cancel and cleanup any pending vblank enable/disable
-	 * work before drm_irq_uninstall() to avoid work re-enabling an
-	 * irq after uninstall has disabled it.
-	 */
-	kthread_flush_work(&vbl_ctrl->work);
-	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
-		list_del(&vbl_ev->node);
-		kfree(vbl_ev);
-	}
-
 	kthread_flush_worker(&priv->disp_thread.worker);
 	kthread_stop(priv->disp_thread.thread);
 	priv->disp_thread.thread = NULL;
@@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 	priv->wq = alloc_ordered_workqueue("msm", 0);
 
 	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
 
 	drm_mode_config_init(ddev);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e81b1fa..b91e306 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
 	PLANE_PROP_MAX_NUM
 };
 
-struct msm_vblank_ctrl {
-	struct kthread_work work;
-	struct list_head event_list;
-	spinlock_t lock;
-};
-
 #define MSM_GPU_MAX_RINGS 4
 #define MAX_H_TILES_PER_DISPLAY 2
 
@@ -226,7 +220,6 @@ struct msm_drm_private {
 	struct notifier_block vmap_notifier;
 	struct shrinker shrinker;
 
-	struct msm_vblank_ctrl vblank_ctrl;
 	struct drm_atomic_state *pm_state;
 };
 
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project

_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events
       [not found] ` <1541031545-20520-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-01  0:19   ` [PATCH 2/2] drm/msm: subclass work object for " Jeykumar Sankaran
@ 2018-11-01 19:09   ` Sean Paul
  2018-11-02 23:16     ` [Freedreno] " Jeykumar Sankaran
  2018-11-08 22:23     ` Jeykumar Sankaran
  2018-11-01 19:44   ` Jordan Crouse
  2 siblings, 2 replies; 11+ messages in thread
From: Sean Paul @ 2018-11-01 19:09 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Wed, Oct 31, 2018 at 05:19:04PM -0700, Jeykumar Sankaran wrote:
> DPU was using one thread per display to dispatch async
> commits and vblank requests. Since clean up already happened
> in msm to use the common thread for all the display commits,
> display threads are only used to cater vblank requests. Single
> thread is sufficient to do the job without any performance hits.
> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |  6 +---
>  drivers/gpu/drm/msm/msm_drv.c               | 50 ++++++++++++-----------------
>  drivers/gpu/drm/msm/msm_drv.h               |  2 +-
>  3 files changed, 23 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> index 82c55ef..aff20f5 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> @@ -753,11 +753,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
>  	is_vid_mode = dpu_enc->disp_info.capabilities &
>  						MSM_DISPLAY_CAP_VID_MODE;
>  
> -	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
> -		DPU_ERROR("invalid crtc index\n");
> -		return -EINVAL;
> -	}
> -	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
> +	disp_thread = &priv->disp_thread;
>  
>  	/*
>  	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 9c9f7ff..1f384b3 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>  
> -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> -			&vbl_ctrl->work);
> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>  
>  	return 0;
>  }
> @@ -284,14 +283,12 @@ static int msm_drm_uninit(struct device *dev)
>  		kfree(vbl_ev);
>  	}
>  
> +	kthread_flush_worker(&priv->disp_thread.worker);
> +	kthread_stop(priv->disp_thread.thread);

I realize this is moving existing code, but is there a race here? You can't have
work enqueued in between the flush and stop?

You might also want to use kthread_destroy_worker to do this work (in a
follow-up patch including the event threads too).

> +	priv->disp_thread.thread = NULL;
> +
>  	/* clean up display commit/event worker threads */

This comment needs updating now

>  	for (i = 0; i < priv->num_crtcs; i++) {
> -		if (priv->disp_thread[i].thread) {
> -			kthread_flush_worker(&priv->disp_thread[i].worker);
> -			kthread_stop(priv->disp_thread[i].thread);
> -			priv->disp_thread[i].thread = NULL;
> -		}
> -
>  		if (priv->event_thread[i].thread) {
>  			kthread_flush_worker(&priv->event_thread[i].worker);
>  			kthread_stop(priv->event_thread[i].thread);
> @@ -537,6 +534,22 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	ddev->mode_config.funcs = &mode_config_funcs;
>  	ddev->mode_config.helper_private = &mode_config_helper_funcs;
>  
> +	/* initialize display thread */
> +	kthread_init_worker(&priv->disp_thread.worker);
> +	priv->disp_thread.dev = ddev;
> +	priv->disp_thread.thread = kthread_run(kthread_worker_fn,
> +					       &priv->disp_thread.worker,
> +					       "disp_thread");
> +	if (IS_ERR(priv->disp_thread.thread)) {
> +		DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
> +		priv->disp_thread.thread = NULL;
> +		goto err_msm_uninit;
> +	}
> +
> +	ret = sched_setscheduler(priv->disp_thread.thread, SCHED_FIFO, &param);
> +	if (ret)
> +		pr_warn("display thread priority update failed: %d\n", ret);
> +
>  	/**
>  	 * this priority was found during empiric testing to have appropriate
>  	 * realtime scheduling to process display updates and interact with
> @@ -544,27 +557,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	 */
>  	param.sched_priority = 16;
>  	for (i = 0; i < priv->num_crtcs; i++) {
> -
> -		/* initialize display thread */
> -		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
> -		kthread_init_worker(&priv->disp_thread[i].worker);
> -		priv->disp_thread[i].dev = ddev;
> -		priv->disp_thread[i].thread =
> -			kthread_run(kthread_worker_fn,
> -				&priv->disp_thread[i].worker,
> -				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
> -		if (IS_ERR(priv->disp_thread[i].thread)) {
> -			DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
> -			priv->disp_thread[i].thread = NULL;
> -			goto err_msm_uninit;
> -		}
> -
> -		ret = sched_setscheduler(priv->disp_thread[i].thread,
> -					 SCHED_FIFO, &param);
> -		if (ret)
> -			dev_warn(dev, "disp_thread set priority failed: %d\n",
> -				 ret);
> -
>  		/* initialize event thread */
>  		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
>  		kthread_init_worker(&priv->event_thread[i].worker);
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index 9d11f32..e81b1fa 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -197,7 +197,7 @@ struct msm_drm_private {
>  	unsigned int num_crtcs;
>  	struct drm_crtc *crtcs[MAX_CRTCS];
>  
> -	struct msm_drm_thread disp_thread[MAX_CRTCS];
> +	struct msm_drm_thread disp_thread;
>  	struct msm_drm_thread event_thread[MAX_CRTCS];
>  
>  	unsigned int num_encoders;
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/2] drm/msm: subclass work object for vblank events
       [not found]     ` <1541031545-20520-2-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
@ 2018-11-01 19:18       ` Sean Paul
  2018-11-02 23:38         ` Jeykumar Sankaran
  0 siblings, 1 reply; 11+ messages in thread
From: Sean Paul @ 2018-11-01 19:18 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
> msm maintains a separate structure to define vblank
> work definitions and a list to track events submitted
> to the display worker thread. We can avoid these
> redundant list and its protection mechanism, if we
> subclass the work object to encapsulate vblank
> event parameters.
> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/msm_drv.c | 70 ++++++++++++-------------------------------
>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>  2 files changed, 19 insertions(+), 58 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 1f384b3..67a96ee 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>  	return val;
>  }
>  
> -struct vblank_event {
> -	struct list_head node;
> +struct msm_vblank_work {
> +	struct kthread_work work;
>  	int crtc_id;
>  	bool enable;
> +	struct msm_drm_private *priv;
>  };
>  
>  static void vblank_ctrl_worker(struct kthread_work *work)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
> -						struct msm_vblank_ctrl, work);
> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
> -					struct msm_drm_private, vblank_ctrl);
> +	struct msm_vblank_work *vbl_work = container_of(work,
> +						struct msm_vblank_work, work);
> +	struct msm_drm_private *priv = vbl_work->priv;
>  	struct msm_kms *kms = priv->kms;
> -	struct vblank_event *vbl_ev, *tmp;
> -	unsigned long flags;
> -
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> -
> -		if (vbl_ev->enable)
> -			kms->funcs->enable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
> -		else
> -			kms->funcs->disable_vblank(kms,
> -						priv->crtcs[vbl_ev->crtc_id]);
>  
> -		kfree(vbl_ev);
> -
> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	}
> +	if (vbl_work->enable)
> +		kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]);
> +	else
> +		kms->funcs->disable_vblank(kms,	priv->crtcs[vbl_work->crtc_id]);
>  
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	kfree(vbl_work);
>  }
>  
>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  					int crtc_id, bool enable)
>  {
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev;
> -	unsigned long flags;
> +	struct msm_vblank_work *vbl_work;
>  
> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> -	if (!vbl_ev)
> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> +	if (!vbl_work)
>  		return -ENOMEM;
>  
> -	vbl_ev->crtc_id = crtc_id;
> -	vbl_ev->enable = enable;
> +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
>  
> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> +	vbl_work->crtc_id = crtc_id;
> +	vbl_work->enable = enable;
> +	vbl_work->priv = priv;
>  
> -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);

So I think this can get even more simplified. In the short term, you can just
use the systemwq to do the enable and disable.

In the long term, the enable_vblank/disable_vblank functions should be
optimized so they don't sleep. I took a quick look at them perhaps this is
all because of the crtc_lock mutex? That lock seems a bit suspicious to me,
especially being dropped around the pm_runtime calls in
_dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on the modeset
locks for some of these functions, and perhaps convert it to a spinlock if we
can't get rid of it entirely.

Sean

>  
>  	return 0;
>  }
> @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
>  	struct msm_drm_private *priv = ddev->dev_private;
>  	struct msm_kms *kms = priv->kms;
>  	struct msm_mdss *mdss = priv->mdss;
> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> -	struct vblank_event *vbl_ev, *tmp;
>  	int i;
>  
> -	/* We must cancel and cleanup any pending vblank enable/disable
> -	 * work before drm_irq_uninstall() to avoid work re-enabling an
> -	 * irq after uninstall has disabled it.
> -	 */
> -	kthread_flush_work(&vbl_ctrl->work);
> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
> -		list_del(&vbl_ev->node);
> -		kfree(vbl_ev);
> -	}
> -
>  	kthread_flush_worker(&priv->disp_thread.worker);
>  	kthread_stop(priv->disp_thread.thread);
>  	priv->disp_thread.thread = NULL;
> @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>  
>  	INIT_LIST_HEAD(&priv->inactive_list);
> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> -	spin_lock_init(&priv->vblank_ctrl.lock);
>  
>  	drm_mode_config_init(ddev);
>  
> diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
> index e81b1fa..b91e306 100644
> --- a/drivers/gpu/drm/msm/msm_drv.h
> +++ b/drivers/gpu/drm/msm/msm_drv.h
> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>  	PLANE_PROP_MAX_NUM
>  };
>  
> -struct msm_vblank_ctrl {
> -	struct kthread_work work;
> -	struct list_head event_list;
> -	spinlock_t lock;
> -};
> -
>  #define MSM_GPU_MAX_RINGS 4
>  #define MAX_H_TILES_PER_DISPLAY 2
>  
> @@ -226,7 +220,6 @@ struct msm_drm_private {
>  	struct notifier_block vmap_notifier;
>  	struct shrinker shrinker;
>  
> -	struct msm_vblank_ctrl vblank_ctrl;
>  	struct drm_atomic_state *pm_state;
>  };
>  
> -- 
> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
> a Linux Foundation Collaborative Project
> 
> _______________________________________________
> Freedreno mailing list
> Freedreno@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events
       [not found] ` <1541031545-20520-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
  2018-11-01  0:19   ` [PATCH 2/2] drm/msm: subclass work object for " Jeykumar Sankaran
  2018-11-01 19:09   ` [PATCH 1/2] drm/msm: use common display thread for dispatching " Sean Paul
@ 2018-11-01 19:44   ` Jordan Crouse
  2 siblings, 0 replies; 11+ messages in thread
From: Jordan Crouse @ 2018-11-01 19:44 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On Wed, Oct 31, 2018 at 05:19:04PM -0700, Jeykumar Sankaran wrote:
> DPU was using one thread per display to dispatch async
> commits and vblank requests. Since clean up already happened
> in msm to use the common thread for all the display commits,
> display threads are only used to cater vblank requests. Single
> thread is sufficient to do the job without any performance hits.
> 
> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> ---
>  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |  6 +---
>  drivers/gpu/drm/msm/msm_drv.c               | 50 ++++++++++++-----------------
>  drivers/gpu/drm/msm/msm_drv.h               |  2 +-
>  3 files changed, 23 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> index 82c55ef..aff20f5 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> @@ -753,11 +753,7 @@ static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
>  	is_vid_mode = dpu_enc->disp_info.capabilities &
>  						MSM_DISPLAY_CAP_VID_MODE;
>  
> -	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
> -		DPU_ERROR("invalid crtc index\n");
> -		return -EINVAL;
> -	}
> -	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
> +	disp_thread = &priv->disp_thread;
>  
>  	/*
>  	 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> index 9c9f7ff..1f384b3 100644
> --- a/drivers/gpu/drm/msm/msm_drv.c
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>  	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>  
> -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> -			&vbl_ctrl->work);
> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>  
>  	return 0;
>  }
> @@ -284,14 +283,12 @@ static int msm_drm_uninit(struct device *dev)
>  		kfree(vbl_ev);
>  	}
>  
> +	kthread_flush_worker(&priv->disp_thread.worker);
> +	kthread_stop(priv->disp_thread.thread);
> +	priv->disp_thread.thread = NULL;
> +

kthread doesn't check for NULL or error - you'll want a 
if (!IS_ERR(priv->disp_thread.worker)) check for this block.

>  	/* clean up display commit/event worker threads */
>  	for (i = 0; i < priv->num_crtcs; i++) {
> -		if (priv->disp_thread[i].thread) {
> -			kthread_flush_worker(&priv->disp_thread[i].worker);
> -			kthread_stop(priv->disp_thread[i].thread);
> -			priv->disp_thread[i].thread = NULL;
> -		}
> -
>  		if (priv->event_thread[i].thread) {
>  			kthread_flush_worker(&priv->event_thread[i].worker);
>  			kthread_stop(priv->event_thread[i].thread);
> @@ -537,6 +534,22 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
>  	ddev->mode_config.funcs = &mode_config_funcs;
>  	ddev->mode_config.helper_private = &mode_config_helper_funcs;
>  
> +	/* initialize display thread */
> +	kthread_init_worker(&priv->disp_thread.worker);
> +	priv->disp_thread.dev = ddev;
> +	priv->disp_thread.thread = kthread_run(kthread_worker_fn,
> +					       &priv->disp_thread.worker,
> +					       "disp_thread");

This name should be more descriptive for the driver - disp_thread is a bit
generic.

> +	if (IS_ERR(priv->disp_thread.thread)) {
> +		DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
> +		priv->disp_thread.thread = NULL;

You don't need this if you check for !IS_ERR in msm_drm_uninit.

> +		goto err_msm_uninit;
> +	}
> +
> +	ret = sched_setscheduler(priv->disp_thread.thread, SCHED_FIFO, &param);
> +	if (ret)
> +		pr_warn("display thread priority update failed: %d\n", ret);
> +

<snip>

Jordan

-- 
The Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Freedreno] [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events
  2018-11-01 19:09   ` [PATCH 1/2] drm/msm: use common display thread for dispatching " Sean Paul
@ 2018-11-02 23:16     ` Jeykumar Sankaran
  2018-11-08 22:23     ` Jeykumar Sankaran
  1 sibling, 0 replies; 11+ messages in thread
From: Jeykumar Sankaran @ 2018-11-02 23:16 UTC (permalink / raw)
  To: Sean Paul; +Cc: linux-arm-msm, dri-devel, seanpaul, hoegsberg, freedreno

On 2018-11-01 12:09, Sean Paul wrote:
> On Wed, Oct 31, 2018 at 05:19:04PM -0700, Jeykumar Sankaran wrote:
>> DPU was using one thread per display to dispatch async
>> commits and vblank requests. Since clean up already happened
>> in msm to use the common thread for all the display commits,
>> display threads are only used to cater vblank requests. Single
>> thread is sufficient to do the job without any performance hits.
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |  6 +---
>>  drivers/gpu/drm/msm/msm_drv.c               | 50
> ++++++++++++-----------------
>>  drivers/gpu/drm/msm/msm_drv.h               |  2 +-
>>  3 files changed, 23 insertions(+), 35 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
>> index 82c55ef..aff20f5 100644
>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
>> @@ -753,11 +753,7 @@ static int dpu_encoder_resource_control(struct
> drm_encoder *drm_enc,
>>  	is_vid_mode = dpu_enc->disp_info.capabilities &
>>  						MSM_DISPLAY_CAP_VID_MODE;
>> 
>> -	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
>> -		DPU_ERROR("invalid crtc index\n");
>> -		return -EINVAL;
>> -	}
>> -	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
>> +	disp_thread = &priv->disp_thread;
>> 
>>  	/*
>>  	 * when idle_pc is not supported, process only KICKOFF, STOP and
> MODESET
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
>> index 9c9f7ff..1f384b3 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct
> msm_drm_private *priv,
>>  	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>>  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> 
>> -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
>> -			&vbl_ctrl->work);
>> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>> 
>>  	return 0;
>>  }
>> @@ -284,14 +283,12 @@ static int msm_drm_uninit(struct device *dev)
>>  		kfree(vbl_ev);
>>  	}
>> 
>> +	kthread_flush_worker(&priv->disp_thread.worker);
>> +	kthread_stop(priv->disp_thread.thread);
> 
> I realize this is moving existing code, but is there a race here? You
> can't have
> work enqueued in between the flush and stop?
Yes. I see only priv->kms is checked before queuing the work.

I can move the thread cleanup after the drm_mode_config_cleanup which
releases the CRTC objects. This way no one can make any further vblank 
requests.

Thanks and Regards,
Jeykumar S.
> 
> You might also want to use kthread_destroy_worker to do this work (in a
> follow-up patch including the event threads too).
> 
>> +	priv->disp_thread.thread = NULL;
>> +
>>  	/* clean up display commit/event worker threads */
> 
> This comment needs updating now
> 
>>  	for (i = 0; i < priv->num_crtcs; i++) {
>> -		if (priv->disp_thread[i].thread) {
>> -
> kthread_flush_worker(&priv->disp_thread[i].worker);
>> -			kthread_stop(priv->disp_thread[i].thread);
>> -			priv->disp_thread[i].thread = NULL;
>> -		}
>> -
>>  		if (priv->event_thread[i].thread) {
>> 
> kthread_flush_worker(&priv->event_thread[i].worker);
>>  			kthread_stop(priv->event_thread[i].thread);
>> @@ -537,6 +534,22 @@ static int msm_drm_init(struct device *dev, 
>> struct
> drm_driver *drv)
>>  	ddev->mode_config.funcs = &mode_config_funcs;
>>  	ddev->mode_config.helper_private = &mode_config_helper_funcs;
>> 
>> +	/* initialize display thread */
>> +	kthread_init_worker(&priv->disp_thread.worker);
>> +	priv->disp_thread.dev = ddev;
>> +	priv->disp_thread.thread = kthread_run(kthread_worker_fn,
>> +					       &priv->disp_thread.worker,
>> +					       "disp_thread");
>> +	if (IS_ERR(priv->disp_thread.thread)) {
>> +		DRM_DEV_ERROR(dev, "failed to create crtc_commit
> kthread\n");
>> +		priv->disp_thread.thread = NULL;
>> +		goto err_msm_uninit;
>> +	}
>> +
>> +	ret = sched_setscheduler(priv->disp_thread.thread, SCHED_FIFO,
> &param);
>> +	if (ret)
>> +		pr_warn("display thread priority update failed: %d\n",
> ret);
>> +
>>  	/**
>>  	 * this priority was found during empiric testing to have
> appropriate
>>  	 * realtime scheduling to process display updates and interact
> with
>> @@ -544,27 +557,6 @@ static int msm_drm_init(struct device *dev, 
>> struct
> drm_driver *drv)
>>  	 */
>>  	param.sched_priority = 16;
>>  	for (i = 0; i < priv->num_crtcs; i++) {
>> -
>> -		/* initialize display thread */
>> -		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
>> -		kthread_init_worker(&priv->disp_thread[i].worker);
>> -		priv->disp_thread[i].dev = ddev;
>> -		priv->disp_thread[i].thread =
>> -			kthread_run(kthread_worker_fn,
>> -				&priv->disp_thread[i].worker,
>> -				"crtc_commit:%d",
> priv->disp_thread[i].crtc_id);
>> -		if (IS_ERR(priv->disp_thread[i].thread)) {
>> -			DRM_DEV_ERROR(dev, "failed to create crtc_commit
> kthread\n");
>> -			priv->disp_thread[i].thread = NULL;
>> -			goto err_msm_uninit;
>> -		}
>> -
>> -		ret = sched_setscheduler(priv->disp_thread[i].thread,
>> -					 SCHED_FIFO, &param);
>> -		if (ret)
>> -			dev_warn(dev, "disp_thread set priority failed:
> %d\n",
>> -				 ret);
>> -
>>  		/* initialize event thread */
>>  		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
>>  		kthread_init_worker(&priv->event_thread[i].worker);
>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
>> index 9d11f32..e81b1fa 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.h
>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>> @@ -197,7 +197,7 @@ struct msm_drm_private {
>>  	unsigned int num_crtcs;
>>  	struct drm_crtc *crtcs[MAX_CRTCS];
>> 
>> -	struct msm_drm_thread disp_thread[MAX_CRTCS];
>> +	struct msm_drm_thread disp_thread;
>>  	struct msm_drm_thread event_thread[MAX_CRTCS];
>> 
>>  	unsigned int num_encoders;
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 
>> _______________________________________________
>> Freedreno mailing list
>> Freedreno@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/2] drm/msm: subclass work object for vblank events
  2018-11-01 19:18       ` Sean Paul
@ 2018-11-02 23:38         ` Jeykumar Sankaran
  2018-11-05 17:24           ` [Freedreno] " Sean Paul
  0 siblings, 1 reply; 11+ messages in thread
From: Jeykumar Sankaran @ 2018-11-02 23:38 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-01 12:18, Sean Paul wrote:
> On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
>> msm maintains a separate structure to define vblank
>> work definitions and a list to track events submitted
>> to the display worker thread. We can avoid these
>> redundant list and its protection mechanism, if we
>> subclass the work object to encapsulate vblank
>> event parameters.
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/msm_drv.c | 70
> ++++++++++++-------------------------------
>>  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>>  2 files changed, 19 insertions(+), 58 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
>> index 1f384b3..67a96ee 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -203,61 +203,44 @@ u32 msm_readl(const void __iomem *addr)
>>  	return val;
>>  }
>> 
>> -struct vblank_event {
>> -	struct list_head node;
>> +struct msm_vblank_work {
>> +	struct kthread_work work;
>>  	int crtc_id;
>>  	bool enable;
>> +	struct msm_drm_private *priv;
>>  };
>> 
>>  static void vblank_ctrl_worker(struct kthread_work *work)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
>> -						struct msm_vblank_ctrl,
> work);
>> -	struct msm_drm_private *priv = container_of(vbl_ctrl,
>> -					struct msm_drm_private,
> vblank_ctrl);
>> +	struct msm_vblank_work *vbl_work = container_of(work,
>> +						struct msm_vblank_work,
> work);
>> +	struct msm_drm_private *priv = vbl_work->priv;
>>  	struct msm_kms *kms = priv->kms;
>> -	struct vblank_event *vbl_ev, *tmp;
>> -	unsigned long flags;
>> -
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> -
>> -		if (vbl_ev->enable)
>> -			kms->funcs->enable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> -		else
>> -			kms->funcs->disable_vblank(kms,
>> -
> priv->crtcs[vbl_ev->crtc_id]);
>> 
>> -		kfree(vbl_ev);
>> -
>> -		spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	}
>> +	if (vbl_work->enable)
>> +		kms->funcs->enable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> +	else
>> +		kms->funcs->disable_vblank(kms,
> priv->crtcs[vbl_work->crtc_id]);
>> 
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	kfree(vbl_work);
>>  }
>> 
>>  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>>  					int crtc_id, bool enable)
>>  {
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev;
>> -	unsigned long flags;
>> +	struct msm_vblank_work *vbl_work;
>> 
>> -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> -	if (!vbl_ev)
>> +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> +	if (!vbl_work)
>>  		return -ENOMEM;
>> 
>> -	vbl_ev->crtc_id = crtc_id;
>> -	vbl_ev->enable = enable;
>> +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
>> 
>> -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> +	vbl_work->crtc_id = crtc_id;
>> +	vbl_work->enable = enable;
>> +	vbl_work->priv = priv;
>> 
>> -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
> 
> So I think this can get even more simplified. In the short term, you 
> can
> just
> use the systemwq to do the enable and disable.

you mean priv->wq?

> 
> In the long term, the enable_vblank/disable_vblank functions should be
> optimized so they don't sleep. I took a quick look at them perhaps this 
> is
> all because of the crtc_lock mutex? That lock seems a bit suspicious to
> me,
> especially being dropped around the pm_runtime calls in
> _dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on 
> the
> modeset
> locks for some of these functions, and perhaps convert it to a spinlock 
> if
> we
> can't get rid of it entirely.

crtc_lock has a history of usage in the downstream driver. It was 
introduced to protect
vblank variables when vblank requests were handled in the user thread
(not the display thread). When event threads were introduced to receive
encoder events, the lock was further expanded to protect few more vars. 
It was
also needed to synchronize CRTC accesses between debugfs dump calls
and display thread.

Would like to deal with this cleanup bit later once we lose these extra 
threads.

Thanks and Regards,
Jeykumar S.

> 
> Sean
> 
>> 
>>  	return 0;
>>  }
>> @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
>>  	struct msm_drm_private *priv = ddev->dev_private;
>>  	struct msm_kms *kms = priv->kms;
>>  	struct msm_mdss *mdss = priv->mdss;
>> -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> -	struct vblank_event *vbl_ev, *tmp;
>>  	int i;
>> 
>> -	/* We must cancel and cleanup any pending vblank enable/disable
>> -	 * work before drm_irq_uninstall() to avoid work re-enabling an
>> -	 * irq after uninstall has disabled it.
>> -	 */
>> -	kthread_flush_work(&vbl_ctrl->work);
>> -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> {
>> -		list_del(&vbl_ev->node);
>> -		kfree(vbl_ev);
>> -	}
>> -
>>  	kthread_flush_worker(&priv->disp_thread.worker);
>>  	kthread_stop(priv->disp_thread.thread);
>>  	priv->disp_thread.thread = NULL;
>> @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> drm_driver *drv)
>>  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> 
>>  	INIT_LIST_HEAD(&priv->inactive_list);
>> -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> -	spin_lock_init(&priv->vblank_ctrl.lock);
>> 
>>  	drm_mode_config_init(ddev);
>> 
>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
>> index e81b1fa..b91e306 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.h
>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>> @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>>  	PLANE_PROP_MAX_NUM
>>  };
>> 
>> -struct msm_vblank_ctrl {
>> -	struct kthread_work work;
>> -	struct list_head event_list;
>> -	spinlock_t lock;
>> -};
>> -
>>  #define MSM_GPU_MAX_RINGS 4
>>  #define MAX_H_TILES_PER_DISPLAY 2
>> 
>> @@ -226,7 +220,6 @@ struct msm_drm_private {
>>  	struct notifier_block vmap_notifier;
>>  	struct shrinker shrinker;
>> 
>> -	struct msm_vblank_ctrl vblank_ctrl;
>>  	struct drm_atomic_state *pm_state;
>>  };
>> 
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 
>> _______________________________________________
>> Freedreno mailing list
>> Freedreno@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Freedreno] [PATCH 2/2] drm/msm: subclass work object for vblank events
  2018-11-02 23:38         ` Jeykumar Sankaran
@ 2018-11-05 17:24           ` Sean Paul
  2018-11-05 21:23             ` Jeykumar Sankaran
  0 siblings, 1 reply; 11+ messages in thread
From: Sean Paul @ 2018-11-05 17:24 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: Sean Paul, dri-devel, seanpaul, linux-arm-msm, hoegsberg, freedreno

On Fri, Nov 02, 2018 at 04:38:48PM -0700, Jeykumar Sankaran wrote:
> On 2018-11-01 12:18, Sean Paul wrote:
> > On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
> > > msm maintains a separate structure to define vblank
> > > work definitions and a list to track events submitted
> > > to the display worker thread. We can avoid these
> > > redundant list and its protection mechanism, if we
> > > subclass the work object to encapsulate vblank
> > > event parameters.
> > > 
> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > ---
> > >  drivers/gpu/drm/msm/msm_drv.c | 70
> > ++++++++++++-------------------------------
> > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
> > >  2 files changed, 19 insertions(+), 58 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > b/drivers/gpu/drm/msm/msm_drv.c
> > > index 1f384b3..67a96ee 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > +++ b/drivers/gpu/drm/msm/msm_drv.c

/snip

> > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
> > >  					int crtc_id, bool enable)
> > >  {
> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > -	struct vblank_event *vbl_ev;
> > > -	unsigned long flags;
> > > +	struct msm_vblank_work *vbl_work;
> > > 
> > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
> > > -	if (!vbl_ev)
> > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
> > > +	if (!vbl_work)
> > >  		return -ENOMEM;
> > > 
> > > -	vbl_ev->crtc_id = crtc_id;
> > > -	vbl_ev->enable = enable;
> > > +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
> > > 
> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
> > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > +	vbl_work->crtc_id = crtc_id;
> > > +	vbl_work->enable = enable;
> > > +	vbl_work->priv = priv;
> > > 
> > > -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
> > > +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
> > 
> > So I think this can get even more simplified. In the short term, you can
> > just
> > use the systemwq to do the enable and disable.
> 
> you mean priv->wq?
> 

I meant the system workqueue, we probably don't need our own for this.


> > 
> > In the long term, the enable_vblank/disable_vblank functions should be
> > optimized so they don't sleep. I took a quick look at them perhaps this
> > is
> > all because of the crtc_lock mutex? That lock seems a bit suspicious to
> > me,
> > especially being dropped around the pm_runtime calls in
> > _dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on the
> > modeset
> > locks for some of these functions, and perhaps convert it to a spinlock
> > if
> > we
> > can't get rid of it entirely.
> 
> crtc_lock has a history of usage in the downstream driver. It was introduced
> to protect
> vblank variables when vblank requests were handled in the user thread
> (not the display thread). When event threads were introduced to receive
> encoder events, the lock was further expanded to protect few more vars. It
> was
> also needed to synchronize CRTC accesses between debugfs dump calls
> and display thread.

The debugfs case can be solved pretty easily by using the modeset locks. I
haven't looked closely at the event threads, could we convert crtc_lock to a
spinlock and then make vblank enable/disable synchronous?

Sean

> 
> Would like to deal with this cleanup bit later once we lose these extra
> threads.
> 
> Thanks and Regards,
> Jeykumar S.
> 
> > 
> > Sean
> > 
> > > 
> > >  	return 0;
> > >  }
> > > @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
> > >  	struct msm_drm_private *priv = ddev->dev_private;
> > >  	struct msm_kms *kms = priv->kms;
> > >  	struct msm_mdss *mdss = priv->mdss;
> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
> > > -	struct vblank_event *vbl_ev, *tmp;
> > >  	int i;
> > > 
> > > -	/* We must cancel and cleanup any pending vblank enable/disable
> > > -	 * work before drm_irq_uninstall() to avoid work re-enabling an
> > > -	 * irq after uninstall has disabled it.
> > > -	 */
> > > -	kthread_flush_work(&vbl_ctrl->work);
> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
> > {
> > > -		list_del(&vbl_ev->node);
> > > -		kfree(vbl_ev);
> > > -	}
> > > -
> > >  	kthread_flush_worker(&priv->disp_thread.worker);
> > >  	kthread_stop(priv->disp_thread.thread);
> > >  	priv->disp_thread.thread = NULL;
> > > @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev, struct
> > drm_driver *drv)
> > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
> > > 
> > >  	INIT_LIST_HEAD(&priv->inactive_list);
> > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
> > > -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
> > > -	spin_lock_init(&priv->vblank_ctrl.lock);
> > > 
> > >  	drm_mode_config_init(ddev);
> > > 
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > b/drivers/gpu/drm/msm/msm_drv.h
> > > index e81b1fa..b91e306 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
> > >  	PLANE_PROP_MAX_NUM
> > >  };
> > > 
> > > -struct msm_vblank_ctrl {
> > > -	struct kthread_work work;
> > > -	struct list_head event_list;
> > > -	spinlock_t lock;
> > > -};
> > > -
> > >  #define MSM_GPU_MAX_RINGS 4
> > >  #define MAX_H_TILES_PER_DISPLAY 2
> > > 
> > > @@ -226,7 +220,6 @@ struct msm_drm_private {
> > >  	struct notifier_block vmap_notifier;
> > >  	struct shrinker shrinker;
> > > 
> > > -	struct msm_vblank_ctrl vblank_ctrl;
> > >  	struct drm_atomic_state *pm_state;
> > >  };
> > > 
> > > --
> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> > Forum,
> > > a Linux Foundation Collaborative Project
> > > 
> > > _______________________________________________
> > > Freedreno mailing list
> > > Freedreno@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> 
> -- 
> Jeykumar S

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 2/2] drm/msm: subclass work object for vblank events
  2018-11-05 17:24           ` [Freedreno] " Sean Paul
@ 2018-11-05 21:23             ` Jeykumar Sankaran
  0 siblings, 0 replies; 11+ messages in thread
From: Jeykumar Sankaran @ 2018-11-05 21:23 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-05 09:24, Sean Paul wrote:
> On Fri, Nov 02, 2018 at 04:38:48PM -0700, Jeykumar Sankaran wrote:
>> On 2018-11-01 12:18, Sean Paul wrote:
>> > On Wed, Oct 31, 2018 at 05:19:05PM -0700, Jeykumar Sankaran wrote:
>> > > msm maintains a separate structure to define vblank
>> > > work definitions and a list to track events submitted
>> > > to the display worker thread. We can avoid these
>> > > redundant list and its protection mechanism, if we
>> > > subclass the work object to encapsulate vblank
>> > > event parameters.
>> > >
>> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> > > ---
>> > >  drivers/gpu/drm/msm/msm_drv.c | 70
>> > ++++++++++++-------------------------------
>> > >  drivers/gpu/drm/msm/msm_drv.h |  7 -----
>> > >  2 files changed, 19 insertions(+), 58 deletions(-)
>> > >
>> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
>> > b/drivers/gpu/drm/msm/msm_drv.c
>> > > index 1f384b3..67a96ee 100644
>> > > --- a/drivers/gpu/drm/msm/msm_drv.c
>> > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> 
> /snip
> 
>> > >  static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
>> > >  					int crtc_id, bool enable)
>> > >  {
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > -	struct vblank_event *vbl_ev;
>> > > -	unsigned long flags;
>> > > +	struct msm_vblank_work *vbl_work;
>> > >
>> > > -	vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC);
>> > > -	if (!vbl_ev)
>> > > +	vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC);
>> > > +	if (!vbl_work)
>> > >  		return -ENOMEM;
>> > >
>> > > -	vbl_ev->crtc_id = crtc_id;
>> > > -	vbl_ev->enable = enable;
>> > > +	kthread_init_work(&vbl_work->work, vblank_ctrl_worker);
>> > >
>> > > -	spin_lock_irqsave(&vbl_ctrl->lock, flags);
>> > > -	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>> > > -	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> > > +	vbl_work->crtc_id = crtc_id;
>> > > +	vbl_work->enable = enable;
>> > > +	vbl_work->priv = priv;
>> > >
>> > > -	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>> > > +	kthread_queue_work(&priv->disp_thread.worker, &vbl_work->work);
>> >
>> > So I think this can get even more simplified. In the short term, you
> can
>> > just
>> > use the systemwq to do the enable and disable.
>> 
>> you mean priv->wq?
>> 
> 
> I meant the system workqueue, we probably don't need our own for this.
> 
> 
>> >
>> > In the long term, the enable_vblank/disable_vblank functions should be
>> > optimized so they don't sleep. I took a quick look at them perhaps
> this
>> > is
>> > all because of the crtc_lock mutex? That lock seems a bit suspicious
> to
>> > me,
>> > especially being dropped around the pm_runtime calls in
>> > _dpu_crtc_vblank_enable_no_lock(). I think we could probably rely on
> the
>> > modeset
>> > locks for some of these functions, and perhaps convert it to a
> spinlock
>> > if
>> > we
>> > can't get rid of it entirely.
>> 
>> crtc_lock has a history of usage in the downstream driver. It was
> introduced
>> to protect
>> vblank variables when vblank requests were handled in the user thread
>> (not the display thread). When event threads were introduced to 
>> receive
>> encoder events, the lock was further expanded to protect few more 
>> vars.
> It
>> was
>> also needed to synchronize CRTC accesses between debugfs dump calls
>> and display thread.
> 
> The debugfs case can be solved pretty easily by using the modeset 
> locks. I
> haven't looked closely at the event threads, could we convert crtc_lock 
> to
> a
> spinlock and then make vblank enable/disable synchronous?
Did a little digging into the reason why vblank enable/disable was made 
asynchronous
in the first place. Looks like Rob was also using priv->wq to queue 
vblank requests
before display threads were introduced by the DPU driver.

The only reason I can think of was to support smart panels, where we 
wait for
CTL_START interrupt instead of PING_PONG_DONE which is needed for fence
releases. Need to confirm with Rob for MDP5 behaviour before switcing to 
sync.

For now, I submit a patch to use system wq.
> 
> Sean
> 
>> 
>> Would like to deal with this cleanup bit later once we lose these 
>> extra
>> threads.
>> 
>> Thanks and Regards,
>> Jeykumar S.
>> 
>> >
>> > Sean
>> >
>> > >
>> > >  	return 0;
>> > >  }
>> > > @@ -269,20 +252,8 @@ static int msm_drm_uninit(struct device *dev)
>> > >  	struct msm_drm_private *priv = ddev->dev_private;
>> > >  	struct msm_kms *kms = priv->kms;
>> > >  	struct msm_mdss *mdss = priv->mdss;
>> > > -	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
>> > > -	struct vblank_event *vbl_ev, *tmp;
>> > >  	int i;
>> > >
>> > > -	/* We must cancel and cleanup any pending vblank enable/disable
>> > > -	 * work before drm_irq_uninstall() to avoid work re-enabling an
>> > > -	 * irq after uninstall has disabled it.
>> > > -	 */
>> > > -	kthread_flush_work(&vbl_ctrl->work);
>> > > -	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node)
>> > {
>> > > -		list_del(&vbl_ev->node);
>> > > -		kfree(vbl_ev);
>> > > -	}
>> > > -
>> > >  	kthread_flush_worker(&priv->disp_thread.worker);
>> > >  	kthread_stop(priv->disp_thread.thread);
>> > >  	priv->disp_thread.thread = NULL;
>> > > @@ -474,9 +445,6 @@ static int msm_drm_init(struct device *dev,
> struct
>> > drm_driver *drv)
>> > >  	priv->wq = alloc_ordered_workqueue("msm", 0);
>> > >
>> > >  	INIT_LIST_HEAD(&priv->inactive_list);
>> > > -	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
>> > > -	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
>> > > -	spin_lock_init(&priv->vblank_ctrl.lock);
>> > >
>> > >  	drm_mode_config_init(ddev);
>> > >
>> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
>> > b/drivers/gpu/drm/msm/msm_drv.h
>> > > index e81b1fa..b91e306 100644
>> > > --- a/drivers/gpu/drm/msm/msm_drv.h
>> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
>> > > @@ -77,12 +77,6 @@ enum msm_mdp_plane_property {
>> > >  	PLANE_PROP_MAX_NUM
>> > >  };
>> > >
>> > > -struct msm_vblank_ctrl {
>> > > -	struct kthread_work work;
>> > > -	struct list_head event_list;
>> > > -	spinlock_t lock;
>> > > -};
>> > > -
>> > >  #define MSM_GPU_MAX_RINGS 4
>> > >  #define MAX_H_TILES_PER_DISPLAY 2
>> > >
>> > > @@ -226,7 +220,6 @@ struct msm_drm_private {
>> > >  	struct notifier_block vmap_notifier;
>> > >  	struct shrinker shrinker;
>> > >
>> > > -	struct msm_vblank_ctrl vblank_ctrl;
>> > >  	struct drm_atomic_state *pm_state;
>> > >  };
>> > >
>> > > --
>> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
>> > Forum,
>> > > a Linux Foundation Collaborative Project
>> > >
>> > > _______________________________________________
>> > > Freedreno mailing list
>> > > Freedreno@lists.freedesktop.org
>> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
>> 
>> --
>> Jeykumar S

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events
  2018-11-01 19:09   ` [PATCH 1/2] drm/msm: use common display thread for dispatching " Sean Paul
  2018-11-02 23:16     ` [Freedreno] " Jeykumar Sankaran
@ 2018-11-08 22:23     ` Jeykumar Sankaran
  2018-11-14 20:56       ` [Freedreno] " Sean Paul
  1 sibling, 1 reply; 11+ messages in thread
From: Jeykumar Sankaran @ 2018-11-08 22:23 UTC (permalink / raw)
  To: Sean Paul
  Cc: linux-arm-msm-u79uwXL29TY76Z2rM5mHXA,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	robdclark-Re5JQEeQqe8AvxtiuMwx3w,
	seanpaul-F7+t8E8rja9g9hUCZPvPmw,
	hoegsberg-hpIqsD4AKlfQT0dZR+AlfA,
	freedreno-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

On 2018-11-01 12:09, Sean Paul wrote:
> On Wed, Oct 31, 2018 at 05:19:04PM -0700, Jeykumar Sankaran wrote:
>> DPU was using one thread per display to dispatch async
>> commits and vblank requests. Since clean up already happened
>> in msm to use the common thread for all the display commits,
>> display threads are only used to cater vblank requests. Single
>> thread is sufficient to do the job without any performance hits.
>> 
>> Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
>> ---
>>  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |  6 +---
>>  drivers/gpu/drm/msm/msm_drv.c               | 50
> ++++++++++++-----------------
>>  drivers/gpu/drm/msm/msm_drv.h               |  2 +-
>>  3 files changed, 23 insertions(+), 35 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
>> index 82c55ef..aff20f5 100644
>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
>> @@ -753,11 +753,7 @@ static int dpu_encoder_resource_control(struct
> drm_encoder *drm_enc,
>>  	is_vid_mode = dpu_enc->disp_info.capabilities &
>>  						MSM_DISPLAY_CAP_VID_MODE;
>> 
>> -	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
>> -		DPU_ERROR("invalid crtc index\n");
>> -		return -EINVAL;
>> -	}
>> -	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
>> +	disp_thread = &priv->disp_thread;
>> 
>>  	/*
>>  	 * when idle_pc is not supported, process only KICKOFF, STOP and
> MODESET
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c
> b/drivers/gpu/drm/msm/msm_drv.c
>> index 9c9f7ff..1f384b3 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.c
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct
> msm_drm_private *priv,
>>  	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
>>  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
>> 
>> -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
>> -			&vbl_ctrl->work);
>> +	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
>> 
>>  	return 0;
>>  }
>> @@ -284,14 +283,12 @@ static int msm_drm_uninit(struct device *dev)
>>  		kfree(vbl_ev);
>>  	}
>> 
>> +	kthread_flush_worker(&priv->disp_thread.worker);
>> +	kthread_stop(priv->disp_thread.thread);
> 
> I realize this is moving existing code, but is there a race here? You
> can't have
> work enqueued in between the flush and stop?
I looked further into this comment. Ideally, we call into msm_unbind 
only when
the device is released and we release the device only on the last close 
of
the drm device. So the userspace doesn't have any device handle to make
ioctl calls, which could queue jobs to this queue. Since we are making 
sure
to flush out the last job already on the queue, we can safely call the
kthread_stop here.

Thanks and Regards,
Jeykumar S.

> 
> You might also want to use kthread_destroy_worker to do this work (in a
> follow-up patch including the event threads too).
> 
>> +	priv->disp_thread.thread = NULL;
>> +
>>  	/* clean up display commit/event worker threads */
> 
> This comment needs updating now
> 
>>  	for (i = 0; i < priv->num_crtcs; i++) {
>> -		if (priv->disp_thread[i].thread) {
>> -
> kthread_flush_worker(&priv->disp_thread[i].worker);
>> -			kthread_stop(priv->disp_thread[i].thread);
>> -			priv->disp_thread[i].thread = NULL;
>> -		}
>> -
>>  		if (priv->event_thread[i].thread) {
>> 
> kthread_flush_worker(&priv->event_thread[i].worker);
>>  			kthread_stop(priv->event_thread[i].thread);
>> @@ -537,6 +534,22 @@ static int msm_drm_init(struct device *dev, 
>> struct
> drm_driver *drv)
>>  	ddev->mode_config.funcs = &mode_config_funcs;
>>  	ddev->mode_config.helper_private = &mode_config_helper_funcs;
>> 
>> +	/* initialize display thread */
>> +	kthread_init_worker(&priv->disp_thread.worker);
>> +	priv->disp_thread.dev = ddev;
>> +	priv->disp_thread.thread = kthread_run(kthread_worker_fn,
>> +					       &priv->disp_thread.worker,
>> +					       "disp_thread");
>> +	if (IS_ERR(priv->disp_thread.thread)) {
>> +		DRM_DEV_ERROR(dev, "failed to create crtc_commit
> kthread\n");
>> +		priv->disp_thread.thread = NULL;
>> +		goto err_msm_uninit;
>> +	}
>> +
>> +	ret = sched_setscheduler(priv->disp_thread.thread, SCHED_FIFO,
> &param);
>> +	if (ret)
>> +		pr_warn("display thread priority update failed: %d\n",
> ret);
>> +
>>  	/**
>>  	 * this priority was found during empiric testing to have
> appropriate
>>  	 * realtime scheduling to process display updates and interact
> with
>> @@ -544,27 +557,6 @@ static int msm_drm_init(struct device *dev, 
>> struct
> drm_driver *drv)
>>  	 */
>>  	param.sched_priority = 16;
>>  	for (i = 0; i < priv->num_crtcs; i++) {
>> -
>> -		/* initialize display thread */
>> -		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
>> -		kthread_init_worker(&priv->disp_thread[i].worker);
>> -		priv->disp_thread[i].dev = ddev;
>> -		priv->disp_thread[i].thread =
>> -			kthread_run(kthread_worker_fn,
>> -				&priv->disp_thread[i].worker,
>> -				"crtc_commit:%d",
> priv->disp_thread[i].crtc_id);
>> -		if (IS_ERR(priv->disp_thread[i].thread)) {
>> -			DRM_DEV_ERROR(dev, "failed to create crtc_commit
> kthread\n");
>> -			priv->disp_thread[i].thread = NULL;
>> -			goto err_msm_uninit;
>> -		}
>> -
>> -		ret = sched_setscheduler(priv->disp_thread[i].thread,
>> -					 SCHED_FIFO, &param);
>> -		if (ret)
>> -			dev_warn(dev, "disp_thread set priority failed:
> %d\n",
>> -				 ret);
>> -
>>  		/* initialize event thread */
>>  		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
>>  		kthread_init_worker(&priv->event_thread[i].worker);
>> diff --git a/drivers/gpu/drm/msm/msm_drv.h
> b/drivers/gpu/drm/msm/msm_drv.h
>> index 9d11f32..e81b1fa 100644
>> --- a/drivers/gpu/drm/msm/msm_drv.h
>> +++ b/drivers/gpu/drm/msm/msm_drv.h
>> @@ -197,7 +197,7 @@ struct msm_drm_private {
>>  	unsigned int num_crtcs;
>>  	struct drm_crtc *crtcs[MAX_CRTCS];
>> 
>> -	struct msm_drm_thread disp_thread[MAX_CRTCS];
>> +	struct msm_drm_thread disp_thread;
>>  	struct msm_drm_thread event_thread[MAX_CRTCS];
>> 
>>  	unsigned int num_encoders;
>> --
>> The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> Forum,
>> a Linux Foundation Collaborative Project
>> 
>> _______________________________________________
>> Freedreno mailing list
>> Freedreno@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/freedreno

-- 
Jeykumar S
_______________________________________________
Freedreno mailing list
Freedreno@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/freedreno

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Freedreno] [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events
  2018-11-08 22:23     ` Jeykumar Sankaran
@ 2018-11-14 20:56       ` Sean Paul
  0 siblings, 0 replies; 11+ messages in thread
From: Sean Paul @ 2018-11-14 20:56 UTC (permalink / raw)
  To: Jeykumar Sankaran
  Cc: Sean Paul, dri-devel, seanpaul, linux-arm-msm, hoegsberg, freedreno

On Thu, Nov 08, 2018 at 02:23:33PM -0800, Jeykumar Sankaran wrote:
> On 2018-11-01 12:09, Sean Paul wrote:
> > On Wed, Oct 31, 2018 at 05:19:04PM -0700, Jeykumar Sankaran wrote:
> > > DPU was using one thread per display to dispatch async
> > > commits and vblank requests. Since clean up already happened
> > > in msm to use the common thread for all the display commits,
> > > display threads are only used to cater vblank requests. Single
> > > thread is sufficient to do the job without any performance hits.
> > > 
> > > Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
> > > ---
> > >  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c |  6 +---
> > >  drivers/gpu/drm/msm/msm_drv.c               | 50
> > ++++++++++++-----------------
> > >  drivers/gpu/drm/msm/msm_drv.h               |  2 +-
> > >  3 files changed, 23 insertions(+), 35 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > > index 82c55ef..aff20f5 100644
> > > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> > > @@ -753,11 +753,7 @@ static int dpu_encoder_resource_control(struct
> > drm_encoder *drm_enc,
> > >  	is_vid_mode = dpu_enc->disp_info.capabilities &
> > >  						MSM_DISPLAY_CAP_VID_MODE;
> > > 
> > > -	if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
> > > -		DPU_ERROR("invalid crtc index\n");
> > > -		return -EINVAL;
> > > -	}
> > > -	disp_thread = &priv->disp_thread[drm_enc->crtc->index];
> > > +	disp_thread = &priv->disp_thread;
> > > 
> > >  	/*
> > >  	 * when idle_pc is not supported, process only KICKOFF, STOP and
> > MODESET
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.c
> > b/drivers/gpu/drm/msm/msm_drv.c
> > > index 9c9f7ff..1f384b3 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.c
> > > +++ b/drivers/gpu/drm/msm/msm_drv.c
> > > @@ -257,8 +257,7 @@ static int vblank_ctrl_queue_work(struct
> > msm_drm_private *priv,
> > >  	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
> > >  	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
> > > 
> > > -	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
> > > -			&vbl_ctrl->work);
> > > +	kthread_queue_work(&priv->disp_thread.worker, &vbl_ctrl->work);
> > > 
> > >  	return 0;
> > >  }
> > > @@ -284,14 +283,12 @@ static int msm_drm_uninit(struct device *dev)
> > >  		kfree(vbl_ev);
> > >  	}
> > > 
> > > +	kthread_flush_worker(&priv->disp_thread.worker);
> > > +	kthread_stop(priv->disp_thread.thread);
> > 
> > I realize this is moving existing code, but is there a race here? You
> > can't have
> > work enqueued in between the flush and stop?
> I looked further into this comment. Ideally, we call into msm_unbind only
> when
> the device is released and we release the device only on the last close of
> the drm device. So the userspace doesn't have any device handle to make
> ioctl calls, which could queue jobs to this queue. Since we are making sure
> to flush out the last job already on the queue, we can safely call the
> kthread_stop here.

Cool, thanks for digging into this!

Sean

> 
> Thanks and Regards,
> Jeykumar S.
> 
> > 
> > You might also want to use kthread_destroy_worker to do this work (in a
> > follow-up patch including the event threads too).
> > 
> > > +	priv->disp_thread.thread = NULL;
> > > +
> > >  	/* clean up display commit/event worker threads */
> > 
> > This comment needs updating now
> > 
> > >  	for (i = 0; i < priv->num_crtcs; i++) {
> > > -		if (priv->disp_thread[i].thread) {
> > > -
> > kthread_flush_worker(&priv->disp_thread[i].worker);
> > > -			kthread_stop(priv->disp_thread[i].thread);
> > > -			priv->disp_thread[i].thread = NULL;
> > > -		}
> > > -
> > >  		if (priv->event_thread[i].thread) {
> > > 
> > kthread_flush_worker(&priv->event_thread[i].worker);
> > >  			kthread_stop(priv->event_thread[i].thread);
> > > @@ -537,6 +534,22 @@ static int msm_drm_init(struct device *dev,
> > > struct
> > drm_driver *drv)
> > >  	ddev->mode_config.funcs = &mode_config_funcs;
> > >  	ddev->mode_config.helper_private = &mode_config_helper_funcs;
> > > 
> > > +	/* initialize display thread */
> > > +	kthread_init_worker(&priv->disp_thread.worker);
> > > +	priv->disp_thread.dev = ddev;
> > > +	priv->disp_thread.thread = kthread_run(kthread_worker_fn,
> > > +					       &priv->disp_thread.worker,
> > > +					       "disp_thread");
> > > +	if (IS_ERR(priv->disp_thread.thread)) {
> > > +		DRM_DEV_ERROR(dev, "failed to create crtc_commit
> > kthread\n");
> > > +		priv->disp_thread.thread = NULL;
> > > +		goto err_msm_uninit;
> > > +	}
> > > +
> > > +	ret = sched_setscheduler(priv->disp_thread.thread, SCHED_FIFO,
> > &param);
> > > +	if (ret)
> > > +		pr_warn("display thread priority update failed: %d\n",
> > ret);
> > > +
> > >  	/**
> > >  	 * this priority was found during empiric testing to have
> > appropriate
> > >  	 * realtime scheduling to process display updates and interact
> > with
> > > @@ -544,27 +557,6 @@ static int msm_drm_init(struct device *dev,
> > > struct
> > drm_driver *drv)
> > >  	 */
> > >  	param.sched_priority = 16;
> > >  	for (i = 0; i < priv->num_crtcs; i++) {
> > > -
> > > -		/* initialize display thread */
> > > -		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
> > > -		kthread_init_worker(&priv->disp_thread[i].worker);
> > > -		priv->disp_thread[i].dev = ddev;
> > > -		priv->disp_thread[i].thread =
> > > -			kthread_run(kthread_worker_fn,
> > > -				&priv->disp_thread[i].worker,
> > > -				"crtc_commit:%d",
> > priv->disp_thread[i].crtc_id);
> > > -		if (IS_ERR(priv->disp_thread[i].thread)) {
> > > -			DRM_DEV_ERROR(dev, "failed to create crtc_commit
> > kthread\n");
> > > -			priv->disp_thread[i].thread = NULL;
> > > -			goto err_msm_uninit;
> > > -		}
> > > -
> > > -		ret = sched_setscheduler(priv->disp_thread[i].thread,
> > > -					 SCHED_FIFO, &param);
> > > -		if (ret)
> > > -			dev_warn(dev, "disp_thread set priority failed:
> > %d\n",
> > > -				 ret);
> > > -
> > >  		/* initialize event thread */
> > >  		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
> > >  		kthread_init_worker(&priv->event_thread[i].worker);
> > > diff --git a/drivers/gpu/drm/msm/msm_drv.h
> > b/drivers/gpu/drm/msm/msm_drv.h
> > > index 9d11f32..e81b1fa 100644
> > > --- a/drivers/gpu/drm/msm/msm_drv.h
> > > +++ b/drivers/gpu/drm/msm/msm_drv.h
> > > @@ -197,7 +197,7 @@ struct msm_drm_private {
> > >  	unsigned int num_crtcs;
> > >  	struct drm_crtc *crtcs[MAX_CRTCS];
> > > 
> > > -	struct msm_drm_thread disp_thread[MAX_CRTCS];
> > > +	struct msm_drm_thread disp_thread;
> > >  	struct msm_drm_thread event_thread[MAX_CRTCS];
> > > 
> > >  	unsigned int num_encoders;
> > > --
> > > The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
> > Forum,
> > > a Linux Foundation Collaborative Project
> > > 
> > > _______________________________________________
> > > Freedreno mailing list
> > > Freedreno@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/freedreno
> 
> -- 
> Jeykumar S

-- 
Sean Paul, Software Engineer, Google / Chromium OS
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2018-11-14 20:56 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-11-01  0:19 [PATCH 1/2] drm/msm: use common display thread for dispatching vblank events Jeykumar Sankaran
     [not found] ` <1541031545-20520-1-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-11-01  0:19   ` [PATCH 2/2] drm/msm: subclass work object for " Jeykumar Sankaran
     [not found]     ` <1541031545-20520-2-git-send-email-jsanka-sgV2jX0FEOL9JmXXK+q4OQ@public.gmane.org>
2018-11-01 19:18       ` Sean Paul
2018-11-02 23:38         ` Jeykumar Sankaran
2018-11-05 17:24           ` [Freedreno] " Sean Paul
2018-11-05 21:23             ` Jeykumar Sankaran
2018-11-01 19:09   ` [PATCH 1/2] drm/msm: use common display thread for dispatching " Sean Paul
2018-11-02 23:16     ` [Freedreno] " Jeykumar Sankaran
2018-11-08 22:23     ` Jeykumar Sankaran
2018-11-14 20:56       ` [Freedreno] " Sean Paul
2018-11-01 19:44   ` Jordan Crouse

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.