All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] drm/msm/mdp5: Refactor CTL code
@ 2015-06-26 20:03 Hai Li
  2015-06-26 20:03 ` [PATCH 1/2] drm/msm/mdp5: Allocate CTL for each display interface Hai Li
  2015-06-26 20:03   ` Hai Li
  0 siblings, 2 replies; 4+ messages in thread
From: Hai Li @ 2015-06-26 20:03 UTC (permalink / raw)
  To: dri-devel; +Cc: linux-arm-msm, linux-kernel, robdclark, Hai Li

Instead of allocating CTL for each CRTC, we start to associate CTL
to each display interface, which reflects real HW requirement.
It also helps in making use of HW single FLUSH feature to sync
between dual DSI pipes.

Hai Li (2):
  drm/msm/mdp5: Allocate CTL for each display interface
  drm/msm/mdp5: Allocate CTL0/1 for dual DSI single FLUSH

 drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c |  12 +-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c        |  26 +---
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c         | 179 ++++++++++++++++++------
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h         |  13 +-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c     |  18 ++-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c         |  38 +++--
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h         |   8 +-
 7 files changed, 200 insertions(+), 94 deletions(-)

-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] drm/msm/mdp5: Allocate CTL for each display interface
  2015-06-26 20:03 [PATCH 0/2] drm/msm/mdp5: Refactor CTL code Hai Li
@ 2015-06-26 20:03 ` Hai Li
  2015-06-26 20:03   ` Hai Li
  1 sibling, 0 replies; 4+ messages in thread
From: Hai Li @ 2015-06-26 20:03 UTC (permalink / raw)
  To: dri-devel; +Cc: linux-arm-msm, linux-kernel, robdclark, Hai Li

In MDP5, CTL contains information of the whole pipeline whose
output goes down to a display interface. In various cases, one
interface may require 2 CRTCs, but only one CTL. Some interfaces
also require to use certain CTLs.

Instead of allocating CTL for each active CRTC, this change is to
associate a CTL with each interface.

Signed-off-by: Hai Li <hali@codeaurora.org>
---
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c | 12 ++++--
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c        | 26 +++----------
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c         | 49 +++++++++----------------
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h         |  9 ++---
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c     | 12 ++++--
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c         | 36 ++++++++++++++----
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h         |  8 ++--
 7 files changed, 76 insertions(+), 76 deletions(-)

diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index ee31b16..8e6c9b5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -21,6 +21,8 @@ struct mdp5_cmd_encoder {
 	struct mdp5_interface intf;
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base)
 
@@ -210,13 +212,14 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
 			mode->vsync_end, mode->vtotal,
 			mode->type, mode->flags);
 	pingpong_tearcheck_setup(encoder, mode);
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_cmd_enc->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
+				mdp5_cmd_enc->ctl);
 }
 
 static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(!mdp5_cmd_enc->enabled))
@@ -235,7 +238,7 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
 static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
 	struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
 
 	if (WARN_ON(mdp5_cmd_enc->enabled))
@@ -300,7 +303,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 
 /* initialize command mode encoder */
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_cmd_encoder *mdp5_cmd_enc;
@@ -320,6 +323,7 @@ struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
 
 	memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf));
 	encoder = &mdp5_cmd_enc->base;
+	mdp5_cmd_enc->ctl = ctl;
 
 	drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs,
 			DRM_MODE_ENCODER_DSI);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index dea3d2e..ca3d40b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -160,8 +160,7 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
 
 	if (mdp5_crtc->ctl && !crtc->state->enable) {
 		/* set STAGE_UNUSED for all layers */
-		mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
-		mdp5_ctl_release(mdp5_crtc->ctl);
+		mdp5_ctl_blend(mdp5_crtc->ctl, 0x00000000);
 		mdp5_crtc->ctl = NULL;
 	}
 }
@@ -248,7 +247,7 @@ static void blend_setup(struct drm_crtc *crtc)
 	}
 
 	DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
-	mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
+	mdp5_ctl_blend(mdp5_crtc->ctl, blend_cfg);
 
 out:
 	spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
@@ -336,7 +335,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_crtc_state *state)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-	struct mdp5_kms *mdp5_kms = get_kms(crtc);
 	struct drm_plane *plane;
 	struct drm_device *dev = crtc->dev;
 	struct plane_state pstates[STAGE3 + 1];
@@ -344,13 +342,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 
 	DBG("%s: check", mdp5_crtc->name);
 
-	/* request a free CTL, if none is already allocated for this CRTC */
-	if (state->enable && !mdp5_crtc->ctl) {
-		mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
-		if (WARN_ON(!mdp5_crtc->ctl))
-			return -EINVAL;
-	}
-
 	/* verify that there are not too many planes attached to crtc
 	 * and that we don't have conflicting mixer stages:
 	 */
@@ -691,8 +682,8 @@ void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
 	complete_flip(crtc, file);
 }
 
-/* set interface for routing crtc->encoder: */
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
 	struct mdp5_kms *mdp5_kms = get_kms(crtc);
@@ -715,7 +706,8 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
 
 	mdp_irq_update(&mdp5_kms->base);
 
-	mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
+	mdp5_crtc->ctl = ctl;
+	mdp5_ctl_set_pipeline(ctl, intf, lm);
 }
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
@@ -724,12 +716,6 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc)
 	return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
 }
 
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
-{
-	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
-	return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
-}
-
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
 {
 	struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index f2530f2..c968ec5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -60,8 +60,6 @@ struct mdp5_ctl {
 	u32 pending_ctl_trigger;
 
 	bool cursor_on;
-
-	struct drm_crtc *crtc;
 };
 
 struct mdp5_ctl_manager {
@@ -168,11 +166,21 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 }
 
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
+		struct mdp5_interface *intf, int lm)
 {
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
 
+	if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
+		dev_err(mdp5_kms->dev->dev,
+			"CTL %d is allocated by INTF %d, but used by INTF %d\n",
+			ctl->id, ctl->pipeline.intf.num, intf->num);
+		return -EINVAL;
+	}
+
+	ctl->lm = lm;
+
 	memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
 
 	ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
@@ -296,7 +304,7 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
 	return 0;
 }
 
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 blend_cfg)
 {
 	unsigned long flags;
 
@@ -306,10 +314,10 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
 		blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
 
 	spin_lock_irqsave(&ctl->hw_lock, flags);
-	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+	ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
 	spin_unlock_irqrestore(&ctl->hw_lock, flags);
 
-	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(lm);
+	ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
 
 	return 0;
 }
@@ -434,38 +442,18 @@ u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
 	return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
 }
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl)
-{
-	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
-	unsigned long flags;
-
-	if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
-		dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
-				ctl->id, ctl->busy);
-		return;
-	}
-
-	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
-	ctl->busy = false;
-	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
-
-	DBG("CTL %d released", ctl->id);
-}
-
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 {
 	return WARN_ON(!ctl) ? -EINVAL : ctl->id;
 }
 
 /*
- * mdp5_ctl_request() - CTL dynamic allocation
- *
- * Note: Current implementation considers that we can only have one CRTC per CTL
+ * mdp5_ctl_request() - CTL allocation
  *
  * @return first free CTL
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
-		struct drm_crtc *crtc)
+		int intf_num)
 {
 	struct mdp5_ctl *ctl = NULL;
 	unsigned long flags;
@@ -483,9 +471,8 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
 	}
 
 	ctl = &ctl_mgr->ctls[c];
-
-	ctl->lm = mdp5_crtc_get_lm(crtc);
-	ctl->crtc = crtc;
+	ctl->pipeline.intf.num = intf_num;
+	ctl->lm = -1;
 	ctl->busy = true;
 	ctl->pending_ctl_trigger = 0;
 	DBG("CTL %d allocated", ctl->id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 4678228..ac1530e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -32,11 +32,12 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
  * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
  * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
  */
-struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
+struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
 
 struct mdp5_interface;
-int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, struct mdp5_interface *intf);
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
+				int lm);
 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
 
 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
@@ -74,7 +75,7 @@ static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
  * CTL registers need to be flushed after calling this function
  * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
  */
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 blend_cfg);
 
 /**
  * mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -91,8 +92,6 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
 
-void mdp5_ctl_release(struct mdp5_ctl *ctl);
-
 
 
 #endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index de97c08..3fa1913 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -27,6 +27,8 @@ struct mdp5_encoder {
 	spinlock_t intf_lock;	/* protect REG_MDP5_INTF_* registers */
 	bool enabled;
 	uint32_t bsc;
+
+	struct mdp5_ctl *ctl;
 };
 #define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
 
@@ -222,14 +224,15 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
 
 	spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
 
-	mdp5_crtc_set_intf(encoder->crtc, &mdp5_encoder->intf);
+	mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
+				mdp5_encoder->ctl);
 }
 
 static void mdp5_encoder_disable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	int lm = mdp5_crtc_get_lm(encoder->crtc);
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
@@ -264,7 +267,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
 	struct mdp5_kms *mdp5_kms = get_kms(encoder);
-	struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
+	struct mdp5_ctl *ctl = mdp5_encoder->ctl;
 	struct mdp5_interface *intf = &mdp5_encoder->intf;
 	int intfn = mdp5_encoder->intf.num;
 	unsigned long flags;
@@ -329,7 +332,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 
 /* initialize encoder */
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf)
+			struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
 	struct drm_encoder *encoder = NULL;
 	struct mdp5_encoder *mdp5_encoder;
@@ -345,6 +348,7 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
 
 	memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
 	encoder = &mdp5_encoder->base;
+	mdp5_encoder->ctl = ctl;
 
 	spin_lock_init(&mdp5_encoder->intf_lock);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758..3a5cc80 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -183,7 +183,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
 
 static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 		enum mdp5_intf_type intf_type, int intf_num,
-		enum mdp5_intf_mode intf_mode)
+		enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl)
 {
 	struct drm_device *dev = mdp5_kms->dev;
 	struct msm_drm_private *priv = dev->dev_private;
@@ -196,9 +196,9 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 
 	if ((intf_type == INTF_DSI) &&
 		(intf_mode == MDP5_INTF_DSI_MODE_COMMAND))
-		encoder = mdp5_cmd_encoder_init(dev, &intf);
+		encoder = mdp5_cmd_encoder_init(dev, &intf, ctl);
 	else
-		encoder = mdp5_encoder_init(dev, &intf);
+		encoder = mdp5_encoder_init(dev, &intf, ctl);
 
 	if (IS_ERR(encoder)) {
 		dev_err(dev->dev, "failed to construct encoder\n");
@@ -236,6 +236,8 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 	const struct mdp5_cfg_hw *hw_cfg =
 					mdp5_cfg_get_hw_config(mdp5_kms->cfg);
 	enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
+	struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
+	struct mdp5_ctl *ctl;
 	struct drm_encoder *encoder;
 	int ret = 0;
 
@@ -246,8 +248,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 		if (!priv->edp)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -259,8 +267,14 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 		if (!priv->hdmi)
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num,
-					MDP5_INTF_MODE_NONE);
+					MDP5_INTF_MODE_NONE, ctl);
 		if (IS_ERR(encoder)) {
 			ret = PTR_ERR(encoder);
 			break;
@@ -285,14 +299,20 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
 		if (!priv->dsi[dsi_id])
 			break;
 
+		ctl = mdp5_ctlm_request(ctlm, intf_num);
+		if (!ctl) {
+			ret = -EINVAL;
+			break;
+		}
+
 		for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
 			mode = (i == MSM_DSI_CMD_ENCODER_ID) ?
 				MDP5_INTF_DSI_MODE_COMMAND :
 				MDP5_INTF_DSI_MODE_VIDEO;
 			dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI,
-							intf_num, mode);
-			if (IS_ERR(dsi_encs)) {
-				ret = PTR_ERR(dsi_encs);
+							intf_num, mode, ctl);
+			if (IS_ERR(dsi_encs[i])) {
+				ret = PTR_ERR(dsi_encs[i]);
 				break;
 			}
 		}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb245..6833557 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -234,21 +234,21 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
-void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
 		struct drm_plane *plane, int id);
 
 struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
-		struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 
 #ifdef CONFIG_DRM_MSM_DSI
 struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev,
-				struct mdp5_interface *intf);
+		struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder);
 #else
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] drm/msm/mdp5: Allocate CTL0/1 for dual DSI single FLUSH
  2015-06-26 20:03 [PATCH 0/2] drm/msm/mdp5: Refactor CTL code Hai Li
@ 2015-06-26 20:03   ` Hai Li
  2015-06-26 20:03   ` Hai Li
  1 sibling, 0 replies; 4+ messages in thread
From: Hai Li @ 2015-06-26 20:03 UTC (permalink / raw)
  To: dri-devel; +Cc: linux-arm-msm, linux-kernel

This change takes advantage of a HW feature that synchronize
flush operation on CTL1 to CTL0, to keep dual DSI pipes in
sync.

Signed-off-by: Hai Li <hali@codeaurora.org>
---
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c     | 140 ++++++++++++++++++++++++----
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h     |   4 +-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c |   6 +-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c     |   2 +-
 4 files changed, 129 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index c968ec5..62dac5c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
 /*
  * CTL - MDP Control Pool Manager
  *
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
  *
  * They are intended to be used for data path configuration.
  * The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
  *
  * In certain use cases (high-resolution dual pipe), one single CTL can be
  * shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
  */
 
+#define CTL_STAT_BUSY		0x1
+#define CTL_STAT_BOOKED	0x2
+
 struct op_mode {
 	struct mdp5_interface intf;
 
@@ -46,8 +45,8 @@ struct mdp5_ctl {
 	u32 id;
 	int lm;
 
-	/* whether this CTL has been allocated or not: */
-	bool busy;
+	/* CTL status bitmask */
+	u32 status;
 
 	/* Operation Mode Configuration for the Pipeline */
 	struct op_mode pipeline;
@@ -60,6 +59,11 @@ struct mdp5_ctl {
 	u32 pending_ctl_trigger;
 
 	bool cursor_on;
+
+	/* True if the current CTL has FLUSH bits pending for single FLUSH. */
+	bool flush_pending;
+
+	struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
 };
 
 struct mdp5_ctl_manager {
@@ -72,6 +76,10 @@ struct mdp5_ctl_manager {
 	/* to filter out non-present bits in the current hardware config */
 	u32 flush_hw_mask;
 
+	/* status for single FLUSH */
+	bool single_flush_supported;
+	u32 single_flush_pending_mask;
+
 	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
 	spinlock_t pool_lock;
 	struct mdp5_ctl ctls[MAX_CTL];
@@ -387,6 +395,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
 	return sw_mask;
 }
 
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+		u32 *flush_id)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+	if (ctl->pair) {
+		DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+		ctl->flush_pending = true;
+		ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+		*flush_mask = 0;
+
+		if (ctl->pair->flush_pending) {
+			*flush_id = min_t(u32, ctl->id, ctl->pair->id);
+			*flush_mask = ctl_mgr->single_flush_pending_mask;
+
+			ctl->flush_pending = false;
+			ctl->pair->flush_pending = false;
+			ctl_mgr->single_flush_pending_mask = 0;
+
+			DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+				*flush_id);
+		}
+	}
+}
+
 /**
  * mdp5_ctl_commit() - Register Flush
  *
@@ -408,6 +441,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct op_mode *pipeline = &ctl->pipeline;
 	unsigned long flags;
+	u32 flush_id = ctl->id;
+	u32 curr_ctl_flush_mask;
 
 	pipeline->start_mask &= ~flush_mask;
 
@@ -423,9 +458,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 
 	flush_mask &= ctl_mgr->flush_hw_mask;
 
+	curr_ctl_flush_mask = flush_mask;
+
+	fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
 	if (flush_mask) {
 		spin_lock_irqsave(&ctl->hw_lock, flags);
-		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
 	}
 
@@ -434,7 +473,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 		refill_start_mask(ctl);
 	}
 
-	return flush_mask;
+	return curr_ctl_flush_mask;
 }
 
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -448,32 +487,79 @@ int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 }
 
 /*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+	/* do nothing silently if hw doesn't support */
+	if (!ctl_mgr->single_flush_supported)
+		return 0;
+
+	if (!enable) {
+		ctlx->pair = NULL;
+		ctly->pair = NULL;
+		mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+		return 0;
+	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+		return -EINVAL;
+	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+		return -EINVAL;
+	}
+
+	ctlx->pair = ctly;
+	ctly->pair = ctlx;
+
+	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+
+	return 0;
+}
+
+/*
  * mdp5_ctl_request() - CTL allocation
  *
- * @return first free CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
+ *
+ * @return fail if no CTL is available.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
 		int intf_num)
 {
 	struct mdp5_ctl *ctl = NULL;
+	const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+	u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
 	unsigned long flags;
 	int c;
 
 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 
+	/* search the preferred */
 	for (c = 0; c < ctl_mgr->nctl; c++)
-		if (!ctl_mgr->ctls[c].busy)
-			break;
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
-	if (unlikely(c >= ctl_mgr->nctl)) {
-		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
-		goto unlock;
-	}
+	dev_warn(ctl_mgr->dev->dev,
+		"fall back to the other CTL category for INTF %d!\n", intf_num);
+
+	match ^= CTL_STAT_BOOKED;
+	for (c = 0; c < ctl_mgr->nctl; c++)
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
+	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+	goto unlock;
+
+found:
 	ctl = &ctl_mgr->ctls[c];
 	ctl->pipeline.intf.num = intf_num;
 	ctl->lm = -1;
-	ctl->busy = true;
+	ctl->status |= CTL_STAT_BUSY;
 	ctl->pending_ctl_trigger = 0;
 	DBG("CTL %d allocated", ctl->id);
 
@@ -502,9 +588,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
 }
 
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
 {
 	struct mdp5_ctl_manager *ctl_mgr;
+	const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+	int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
 	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
 	unsigned long flags;
 	int c, ret;
@@ -538,14 +626,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 		if (WARN_ON(!ctl_cfg->base[c])) {
 			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
 			ret = -EINVAL;
+			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 			goto fail;
 		}
 		ctl->ctlm = ctl_mgr;
 		ctl->id = c;
 		ctl->reg_offset = ctl_cfg->base[c];
-		ctl->busy = false;
+		ctl->status = 0;
 		spin_lock_init(&ctl->hw_lock);
 	}
+
+	/*
+	 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+	 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+	 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+	 * Single FLUSH is supported from hw rev v3.0.
+	 */
+	if (rev >= 3) {
+		ctl_mgr->single_flush_supported = true;
+		/* Reserve CTL0/1 for INTF1/2 */
+		ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+		ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+	}
 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index ac1530e..cd82def 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
  */
 struct mdp5_ctl_manager;
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
 
@@ -33,6 +33,7 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
  * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
 
 struct mdp5_interface;
@@ -41,6 +42,7 @@ int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
 
 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
 
 /*
  * blend_cfg (LM blender config):
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 3fa1913..c9e32b0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -297,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
 	struct mdp5_kms *mdp5_kms;
 	int intf_num;
 	u32 data = 0;
@@ -319,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 
 	/* Make sure clocks are on when connectors calling this function. */
 	mdp5_enable(mdp5_kms);
-	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
-		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 	/* Dumb Panel, Sync mode */
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+	mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
 	mdp5_disable(mdp5_kms);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 3a5cc80..3ade89a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -528,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 		goto fail;
 	}
 
-	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
 	if (IS_ERR(mdp5_kms->ctlm)) {
 		ret = PTR_ERR(mdp5_kms->ctlm);
 		mdp5_kms->ctlm = NULL;
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] drm/msm/mdp5: Allocate CTL0/1 for dual DSI single FLUSH
@ 2015-06-26 20:03   ` Hai Li
  0 siblings, 0 replies; 4+ messages in thread
From: Hai Li @ 2015-06-26 20:03 UTC (permalink / raw)
  To: dri-devel; +Cc: linux-arm-msm, linux-kernel, robdclark, Hai Li

This change takes advantage of a HW feature that synchronize
flush operation on CTL1 to CTL0, to keep dual DSI pipes in
sync.

Signed-off-by: Hai Li <hali@codeaurora.org>
---
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c     | 140 ++++++++++++++++++++++++----
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h     |   4 +-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c |   6 +-
 drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c     |   2 +-
 4 files changed, 129 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index c968ec5..62dac5c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -17,7 +17,7 @@
 /*
  * CTL - MDP Control Pool Manager
  *
- * Controls are shared between all CRTCs.
+ * Controls are shared between all display interfaces.
  *
  * They are intended to be used for data path configuration.
  * The top level register programming describes the complete data path for
@@ -27,12 +27,11 @@
  *
  * In certain use cases (high-resolution dual pipe), one single CTL can be
  * shared across multiple CRTCs.
- *
- * Because the number of CTLs can be less than the number of CRTCs,
- * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
- * requested by the client (in mdp5_crtc_mode_set()).
  */
 
+#define CTL_STAT_BUSY		0x1
+#define CTL_STAT_BOOKED	0x2
+
 struct op_mode {
 	struct mdp5_interface intf;
 
@@ -46,8 +45,8 @@ struct mdp5_ctl {
 	u32 id;
 	int lm;
 
-	/* whether this CTL has been allocated or not: */
-	bool busy;
+	/* CTL status bitmask */
+	u32 status;
 
 	/* Operation Mode Configuration for the Pipeline */
 	struct op_mode pipeline;
@@ -60,6 +59,11 @@ struct mdp5_ctl {
 	u32 pending_ctl_trigger;
 
 	bool cursor_on;
+
+	/* True if the current CTL has FLUSH bits pending for single FLUSH. */
+	bool flush_pending;
+
+	struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
 };
 
 struct mdp5_ctl_manager {
@@ -72,6 +76,10 @@ struct mdp5_ctl_manager {
 	/* to filter out non-present bits in the current hardware config */
 	u32 flush_hw_mask;
 
+	/* status for single FLUSH */
+	bool single_flush_supported;
+	u32 single_flush_pending_mask;
+
 	/* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
 	spinlock_t pool_lock;
 	struct mdp5_ctl ctls[MAX_CTL];
@@ -387,6 +395,31 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
 	return sw_mask;
 }
 
+static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
+		u32 *flush_id)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+
+	if (ctl->pair) {
+		DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
+		ctl->flush_pending = true;
+		ctl_mgr->single_flush_pending_mask |= (*flush_mask);
+		*flush_mask = 0;
+
+		if (ctl->pair->flush_pending) {
+			*flush_id = min_t(u32, ctl->id, ctl->pair->id);
+			*flush_mask = ctl_mgr->single_flush_pending_mask;
+
+			ctl->flush_pending = false;
+			ctl->pair->flush_pending = false;
+			ctl_mgr->single_flush_pending_mask = 0;
+
+			DBG("Single FLUSH mask %x,ID %d", *flush_mask,
+				*flush_id);
+		}
+	}
+}
+
 /**
  * mdp5_ctl_commit() - Register Flush
  *
@@ -408,6 +441,8 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 	struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
 	struct op_mode *pipeline = &ctl->pipeline;
 	unsigned long flags;
+	u32 flush_id = ctl->id;
+	u32 curr_ctl_flush_mask;
 
 	pipeline->start_mask &= ~flush_mask;
 
@@ -423,9 +458,13 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 
 	flush_mask &= ctl_mgr->flush_hw_mask;
 
+	curr_ctl_flush_mask = flush_mask;
+
+	fix_for_single_flush(ctl, &flush_mask, &flush_id);
+
 	if (flush_mask) {
 		spin_lock_irqsave(&ctl->hw_lock, flags);
-		ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
+		ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
 		spin_unlock_irqrestore(&ctl->hw_lock, flags);
 	}
 
@@ -434,7 +473,7 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
 		refill_start_mask(ctl);
 	}
 
-	return flush_mask;
+	return curr_ctl_flush_mask;
 }
 
 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
@@ -448,32 +487,79 @@ int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
 }
 
 /*
+ * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
+ */
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
+{
+	struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
+	struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+
+	/* do nothing silently if hw doesn't support */
+	if (!ctl_mgr->single_flush_supported)
+		return 0;
+
+	if (!enable) {
+		ctlx->pair = NULL;
+		ctly->pair = NULL;
+		mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0);
+		return 0;
+	} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
+		dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+		return -EINVAL;
+	} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
+		dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+		return -EINVAL;
+	}
+
+	ctlx->pair = ctly;
+	ctly->pair = ctlx;
+
+	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
+		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
+
+	return 0;
+}
+
+/*
  * mdp5_ctl_request() - CTL allocation
  *
- * @return first free CTL
+ * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
+ * If no CTL is available in preferred category, allocate from the other one.
+ *
+ * @return fail if no CTL is available.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
 		int intf_num)
 {
 	struct mdp5_ctl *ctl = NULL;
+	const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
+	u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
 	unsigned long flags;
 	int c;
 
 	spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
 
+	/* search the preferred */
 	for (c = 0; c < ctl_mgr->nctl; c++)
-		if (!ctl_mgr->ctls[c].busy)
-			break;
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
-	if (unlikely(c >= ctl_mgr->nctl)) {
-		dev_err(ctl_mgr->dev->dev, "No more CTL available!");
-		goto unlock;
-	}
+	dev_warn(ctl_mgr->dev->dev,
+		"fall back to the other CTL category for INTF %d!\n", intf_num);
+
+	match ^= CTL_STAT_BOOKED;
+	for (c = 0; c < ctl_mgr->nctl; c++)
+		if ((ctl_mgr->ctls[c].status & checkm) == match)
+			goto found;
 
+	dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+	goto unlock;
+
+found:
 	ctl = &ctl_mgr->ctls[c];
 	ctl->pipeline.intf.num = intf_num;
 	ctl->lm = -1;
-	ctl->busy = true;
+	ctl->status |= CTL_STAT_BUSY;
 	ctl->pending_ctl_trigger = 0;
 	DBG("CTL %d allocated", ctl->id);
 
@@ -502,9 +588,11 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
 }
 
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
 {
 	struct mdp5_ctl_manager *ctl_mgr;
+	const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
+	int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
 	const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
 	unsigned long flags;
 	int c, ret;
@@ -538,14 +626,28 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
 		if (WARN_ON(!ctl_cfg->base[c])) {
 			dev_err(dev->dev, "CTL_%d: base is null!\n", c);
 			ret = -EINVAL;
+			spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 			goto fail;
 		}
 		ctl->ctlm = ctl_mgr;
 		ctl->id = c;
 		ctl->reg_offset = ctl_cfg->base[c];
-		ctl->busy = false;
+		ctl->status = 0;
 		spin_lock_init(&ctl->hw_lock);
 	}
+
+	/*
+	 * In Dual DSI case, CTL0 and CTL1 are always assigned to two DSI
+	 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
+	 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
+	 * Single FLUSH is supported from hw rev v3.0.
+	 */
+	if (rev >= 3) {
+		ctl_mgr->single_flush_supported = true;
+		/* Reserve CTL0/1 for INTF1/2 */
+		ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
+		ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
+	}
 	spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
 	DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
 
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index ac1530e..cd82def 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -23,7 +23,7 @@
  */
 struct mdp5_ctl_manager;
 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
-		void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
+		void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd);
 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
 void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
 
@@ -33,6 +33,7 @@ void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
  * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
  */
 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
+
 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
 
 struct mdp5_interface;
@@ -41,6 +42,7 @@ int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
 
 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
+int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
 
 /*
  * blend_cfg (LM blender config):
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 3fa1913..c9e32b0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -297,6 +297,7 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 					struct drm_encoder *slave_encoder)
 {
 	struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+	struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder);
 	struct mdp5_kms *mdp5_kms;
 	int intf_num;
 	u32 data = 0;
@@ -319,12 +320,13 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
 
 	/* Make sure clocks are on when connectors calling this function. */
 	mdp5_enable(mdp5_kms);
-	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
-		MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
 	/* Dumb Panel, Sync mode */
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
 	mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
+
+	mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
+
 	mdp5_disable(mdp5_kms);
 
 	return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 3a5cc80..3ade89a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -528,7 +528,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
 		goto fail;
 	}
 
-	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
+	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
 	if (IS_ERR(mdp5_kms->ctlm)) {
 		ret = PTR_ERR(mdp5_kms->ctlm);
 		mdp5_kms->ctlm = NULL;
-- 
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
hosted by The Linux Foundation


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-06-26 20:04 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-06-26 20:03 [PATCH 0/2] drm/msm/mdp5: Refactor CTL code Hai Li
2015-06-26 20:03 ` [PATCH 1/2] drm/msm/mdp5: Allocate CTL for each display interface Hai Li
2015-06-26 20:03 ` [PATCH 2/2] drm/msm/mdp5: Allocate CTL0/1 for dual DSI single FLUSH Hai Li
2015-06-26 20:03   ` Hai Li

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.