All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff
@ 2018-10-10 13:04 Ville Syrjala
  2018-10-10 13:04 ` [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units Ville Syrjala
                   ` (21 more replies)
  0 siblings, 22 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Currently we store the watermark memory latency values in three
different units (.1 usec, .5 usec, and 1 usec). Let's make things
less confusing by picking .1 usec as the one true unit and unify
all platforms around that. And I've included some other cleanups
related to this latency stuff.

Ville Syrjälä (12):
  drm/i915: Store all wm memory latency values in .1 usec units
  drm/i915: Use the spr/cur latencies on vlv/chv/g4x
  drm/i915: Eliminate skl_latency[]
  drm/i915: Add dev_priv->wm.num_levels and use it everywhere
  drm/i915: Add DEFINE_SNPRINTF_ARRAY()
  drm/i915: Make the WM memory latency print more compact
  drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
  drm/i915: Split skl+ and ilk+ read_wm_latency()
  drm/i915: Sanitize wm latency values for ilk+
  drm/i915: Drop the funky ilk wm setup
  drm/i915: Allow LP3 watermarks on ILK
  drm/i915: Remove the remnants of the ilk+ LP0 wm hack

 drivers/gpu/drm/i915/i915_debugfs.c  |  82 +---
 drivers/gpu/drm/i915/i915_drv.h      |  26 +-
 drivers/gpu/drm/i915/i915_utils.h    |  16 +
 drivers/gpu/drm/i915/intel_display.c |  12 +-
 drivers/gpu/drm/i915/intel_dp.c      |  17 +-
 drivers/gpu/drm/i915/intel_pm.c      | 587 ++++++++++++++-------------
 6 files changed, 349 insertions(+), 391 deletions(-)

-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:12   ` Chris Wilson
  2018-10-26 18:14   ` [PATCH v2 " Ville Syrjala
  2018-10-10 13:04 ` [PATCH 02/12] drm/i915: Use the spr/cur latencies on vlv/chv/g4x Ville Syrjala
                   ` (20 subsequent siblings)
  21 siblings, 2 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

In order to simplify the code let's store all memory latency values in
0.1 usec units. This limits the platform specific units to the initial
setup code for the most part.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c |  12 ---
 drivers/gpu/drm/i915/i915_drv.h     |  14 +--
 drivers/gpu/drm/i915/intel_pm.c     | 149 ++++++++++++++++------------
 3 files changed, 87 insertions(+), 88 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 00c551d3e409..9e0cb995801f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3807,18 +3807,6 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 	for (level = 0; level < num_levels; level++) {
 		unsigned int latency = wm[level];
 
-		/*
-		 * - WM1+ latency values in 0.5us units
-		 * - latencies are in us on gen9/vlv/chv
-		 */
-		if (INTEL_GEN(dev_priv) >= 9 ||
-		    IS_VALLEYVIEW(dev_priv) ||
-		    IS_CHERRYVIEW(dev_priv) ||
-		    IS_G4X(dev_priv))
-			latency *= 10;
-		else if (level > 0)
-			latency *= 5;
-
 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
 			   level, wm[level], latency / 10, latency % 10);
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 794a8a03c7e6..e57b8cb8fa4d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1906,22 +1906,10 @@ struct drm_i915_private {
 	} sagv_status;
 
 	struct {
-		/*
-		 * Raw watermark latency values:
-		 * in 0.1us units for WM0,
-		 * in 0.5us units for WM1+.
-		 */
-		/* primary */
+		/* Watermark memory latency values in 0.1 us units */
 		uint16_t pri_latency[5];
-		/* sprite */
 		uint16_t spr_latency[5];
-		/* cursor */
 		uint16_t cur_latency[5];
-		/*
-		 * Raw watermark memory latency values
-		 * for SKL for all 8 levels
-		 * in 1us units.
-		 */
 		uint16_t skl_latency[8];
 
 		/* current hardware state */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1392aa56a55a..f871a6f152c3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -801,6 +801,27 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
 	return dev_priv->wm.max_level + 1;
 }
 
+static int intel_plane_wm_latency(struct intel_plane *plane,
+				  int level)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		return dev_priv->wm.skl_latency[level];
+
+	if (HAS_GMCH_DISPLAY(dev_priv))
+		return dev_priv->wm.pri_latency[level];
+
+	switch (plane->id) {
+	case PLANE_PRIMARY:
+		return dev_priv->wm.pri_latency[level];
+	case PLANE_CURSOR:
+		return dev_priv->wm.cur_latency[level];
+	default:
+		return dev_priv->wm.spr_latency[level];
+	}
+}
+
 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state)
 {
@@ -1039,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
 
 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	/* all latencies in usec */
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+	/* all latencies in .1 usec */
+	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 50;
+	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 120;
+	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 350;
 
 	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
 }
@@ -1097,7 +1118,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
-	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+	int latency = intel_plane_wm_latency(plane, level);
 	unsigned int clock, htotal, cpp, width, wm;
 
 	if (latency == 0)
@@ -1586,14 +1607,14 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
 
 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	/* all latencies in usec */
-	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+	/* all latencies in .1 usec */
+	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 30;
 
 	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
 
 	if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
-		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 120;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 330;
 
 		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
 	}
@@ -1604,12 +1625,12 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
 				     int level)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
+	int latency = intel_plane_wm_latency(plane, level);
 	unsigned int clock, htotal, cpp, width, wm;
 
-	if (dev_priv->wm.pri_latency[level] == 0)
+	if (latency == 0)
 		return USHRT_MAX;
 
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1629,8 +1650,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
 		 */
 		wm = 63;
 	} else {
-		wm = vlv_wm_method2(clock, htotal, width, cpp,
-				    dev_priv->wm.pri_latency[level] * 10);
+		wm = vlv_wm_method2(clock, htotal, width, cpp, latency);
 	}
 
 	return min_t(unsigned int, wm, USHRT_MAX);
@@ -2481,15 +2501,12 @@ struct ilk_wm_maximums {
 	uint16_t fbc;
 };
 
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 				   const struct intel_plane_state *pstate,
-				   uint32_t mem_value,
-				   bool is_lp)
+				   int level)
 {
+	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	int latency = intel_plane_wm_latency(plane, level);
 	uint32_t method1, method2;
 	int cpp;
 
@@ -2498,27 +2515,25 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 
 	cpp = pstate->base.fb->format->cpp[0];
 
-	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, latency);
 
-	if (!is_lp)
+	if (level == 0)
 		return method1;
 
 	method2 = ilk_wm_method2(cstate->pixel_rate,
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 drm_rect_width(&pstate->base.dst),
-				 cpp, mem_value);
+				 cpp, latency);
 
 	return min(method1, method2);
 }
 
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
 				   const struct intel_plane_state *pstate,
-				   uint32_t mem_value)
+				   int level)
 {
+	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	int latency = intel_plane_wm_latency(plane, level);
 	uint32_t method1, method2;
 	int cpp;
 
@@ -2527,22 +2542,20 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
 
 	cpp = pstate->base.fb->format->cpp[0];
 
-	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, latency);
 	method2 = ilk_wm_method2(cstate->pixel_rate,
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 drm_rect_width(&pstate->base.dst),
-				 cpp, mem_value);
+				 cpp, latency);
 	return min(method1, method2);
 }
 
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 				   const struct intel_plane_state *pstate,
-				   uint32_t mem_value)
+				   int level)
 {
+	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	int latency = intel_plane_wm_latency(plane, level);
 	int cpp;
 
 	if (!intel_wm_plane_visible(cstate, pstate))
@@ -2552,7 +2565,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 
 	return ilk_wm_method2(cstate->pixel_rate,
 			      cstate->base.adjusted_mode.crtc_htotal,
-			      pstate->base.crtc_w, cpp, mem_value);
+			      pstate->base.crtc_w, cpp, latency);
 }
 
 /* Only for WM_LP. */
@@ -2743,28 +2756,16 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 				 const struct intel_plane_state *curstate,
 				 struct intel_wm_level *result)
 {
-	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
-	uint16_t spr_latency = dev_priv->wm.spr_latency[level];
-	uint16_t cur_latency = dev_priv->wm.cur_latency[level];
-
-	/* WM1+ latency values stored in 0.5us units */
-	if (level > 0) {
-		pri_latency *= 5;
-		spr_latency *= 5;
-		cur_latency *= 5;
-	}
-
 	if (pristate) {
-		result->pri_val = ilk_compute_pri_wm(cstate, pristate,
-						     pri_latency, level);
+		result->pri_val = ilk_compute_pri_wm(cstate, pristate, level);
 		result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
 	}
 
 	if (sprstate)
-		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, level);
 
 	if (curstate)
-		result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+		result->cur_val = ilk_compute_cur_wm(cstate, curstate, level);
 
 	result->enable = true;
 }
@@ -2930,6 +2931,16 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
 		wm[0] = 13;
 }
 
+static void ilk_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
+				       u16 wm[5])
+{
+	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+	/* convert .5 usec to .1 usec units */
+	for (level = 1; level < num_levels; level++)
+		wm[level] *= 5;
+}
+
 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
 {
 	/* how many WM levels are we expecting */
@@ -2958,15 +2969,6 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 			continue;
 		}
 
-		/*
-		 * - latencies are in us on gen9.
-		 * - before then, WM1+ latency values are in 0.5us units
-		 */
-		if (INTEL_GEN(dev_priv) >= 9)
-			latency *= 10;
-		else if (level > 0)
-			latency *= 5;
-
 		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
 			      name, level, wm[level],
 			      latency / 10, latency % 10);
@@ -2982,8 +2984,12 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 		return false;
 
 	wm[0] = max(wm[0], min);
+
+	/* WM1+ latencies must be multiples of .5 usec */
+	min = roundup(min, 5);
+
 	for (level = 1; level <= max_level; level++)
-		wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+		wm[level] = max(wm[level], min);
 
 	return true;
 }
@@ -3013,6 +3019,8 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
+	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
@@ -3029,9 +3037,22 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 		snb_wm_latency_quirk(dev_priv);
 }
 
+static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
+				       u16 wm[8])
+{
+	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+	/* convert usec to .1 usec units */
+	for (level = 0; level < num_levels; level++)
+		wm[level] *= 10;
+}
+
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+
+	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.skl_latency);
+
 	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
 }
 
@@ -3303,7 +3324,8 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		return 2 * level;
 	else
-		return dev_priv->wm.pri_latency[level];
+		/* specified in .5 usec units */
+		return dev_priv->wm.pri_latency[level] / 5;
 }
 
 static void ilk_compute_wm_results(struct drm_device *dev,
@@ -3763,7 +3785,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 		     !wm->wm[level].plane_en; --level)
 		     { }
 
-		latency = dev_priv->wm.skl_latency[level];
+		latency = intel_plane_wm_latency(plane, level);
 
 		if (skl_needs_memory_bw_wa(intel_state) &&
 		    plane->base.state->fb->modifier ==
@@ -4636,7 +4658,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				struct skl_wm_level *result /* out */)
 {
 	const struct drm_plane_state *pstate = &intel_pstate->base;
-	uint32_t latency = dev_priv->wm.skl_latency[level];
+	uint32_t latency = intel_plane_wm_latency(to_intel_plane(pstate->plane),
+						  level);
 	uint_fixed_16_16_t method1, method2;
 	uint_fixed_16_16_t selected_result;
 	uint32_t res_blocks, res_lines;
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 02/12] drm/i915: Use the spr/cur latencies on vlv/chv/g4x
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
  2018-10-10 13:04 ` [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:04 ` [PATCH 03/12] drm/i915: Eliminate skl_latency[] Ville Syrjala
                   ` (19 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Reduce the differences between the platforms by using the spr/cur
watermark latency values on gmch platforms as well. We'll also
print the wm latencies the same way as we do for ilk+.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c |  4 +-
 drivers/gpu/drm/i915/intel_pm.c     | 63 ++++++++++++++++++-----------
 2 files changed, 41 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9e0cb995801f..35b4c49c9bca 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3873,7 +3873,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
 {
 	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev_priv))
+	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
 		return -ENODEV;
 
 	return single_open(file, spr_wm_latency_show, dev_priv);
@@ -3883,7 +3883,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
 {
 	struct drm_i915_private *dev_priv = inode->i_private;
 
-	if (HAS_GMCH_DISPLAY(dev_priv))
+	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
 		return -ENODEV;
 
 	return single_open(file, cur_wm_latency_show, dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f871a6f152c3..fe522ceae97a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -801,6 +801,27 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
 	return dev_priv->wm.max_level + 1;
 }
 
+static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
+				   const char *name,
+				   const uint16_t wm[8])
+{
+	int level, max_level = ilk_wm_max_level(dev_priv);
+
+	for (level = 0; level <= max_level; level++) {
+		unsigned int latency = wm[level];
+
+		if (latency == 0) {
+			DRM_DEBUG_KMS("%s WM%d latency not provided\n",
+				      name, level);
+			continue;
+		}
+
+		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
+			      name, level, wm[level],
+			      latency / 10, latency % 10);
+	}
+}
+
 static int intel_plane_wm_latency(struct intel_plane *plane,
 				  int level)
 {
@@ -809,9 +830,6 @@ static int intel_plane_wm_latency(struct intel_plane *plane,
 	if (INTEL_GEN(dev_priv) >= 9)
 		return dev_priv->wm.skl_latency[level];
 
-	if (HAS_GMCH_DISPLAY(dev_priv))
-		return dev_priv->wm.pri_latency[level];
-
 	switch (plane->id) {
 	case PLANE_PRIMARY:
 		return dev_priv->wm.pri_latency[level];
@@ -1066,6 +1084,15 @@ static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 350;
 
 	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+
+	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+
+	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
 static int g4x_plane_fifo_size(enum plane_id plane_id, int level)
@@ -1618,6 +1645,15 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
 
 		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
 	}
+
+	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+
+	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
 static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
@@ -2954,27 +2990,6 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
 		return 2;
 }
 
-static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
-				   const char *name,
-				   const uint16_t wm[8])
-{
-	int level, max_level = ilk_wm_max_level(dev_priv);
-
-	for (level = 0; level <= max_level; level++) {
-		unsigned int latency = wm[level];
-
-		if (latency == 0) {
-			DRM_DEBUG_KMS("%s WM%d latency not provided\n",
-				      name, level);
-			continue;
-		}
-
-		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
-			      name, level, wm[level],
-			      latency / 10, latency % 10);
-	}
-}
-
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 				    uint16_t wm[5], uint16_t min)
 {
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 03/12] drm/i915: Eliminate skl_latency[]
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
  2018-10-10 13:04 ` [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units Ville Syrjala
  2018-10-10 13:04 ` [PATCH 02/12] drm/i915: Use the spr/cur latencies on vlv/chv/g4x Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:04 ` [PATCH 04/12] drm/i915: Add dev_priv->wm.num_levels and use it everywhere Ville Syrjala
                   ` (18 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Make SKL+ less special by eliminating the separate skl_latency[]
array and just using the normal pri/spr/cur latency arrays. The
result is 4 bytes larger (3*5+8 vs. 3*8)

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c | 42 +++++------------------------
 drivers/gpu/drm/i915/i915_drv.h     |  8 +++---
 drivers/gpu/drm/i915/intel_pm.c     | 16 ++++++-----
 3 files changed, 20 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 35b4c49c9bca..e4b668f28ec0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3817,12 +3817,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 static int pri_wm_latency_show(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = m->private;
-	const uint16_t *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.pri_latency;
+	const u16 *latencies = dev_priv->wm.pri_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -3832,12 +3827,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
 static int spr_wm_latency_show(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = m->private;
-	const uint16_t *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.spr_latency;
+	const u16 *latencies = dev_priv->wm.spr_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -3847,12 +3837,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
 static int cur_wm_latency_show(struct seq_file *m, void *data)
 {
 	struct drm_i915_private *dev_priv = m->private;
-	const uint16_t *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.cur_latency;
+	const u16 *latencies = dev_priv->wm.cur_latency;
 
 	wm_latency_show(m, latencies);
 
@@ -3940,12 +3925,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
 {
 	struct seq_file *m = file->private_data;
 	struct drm_i915_private *dev_priv = m->private;
-	uint16_t *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.pri_latency;
+	u16 *latencies = dev_priv->wm.pri_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -3955,12 +3935,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
 {
 	struct seq_file *m = file->private_data;
 	struct drm_i915_private *dev_priv = m->private;
-	uint16_t *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.spr_latency;
+	u16 *latencies = dev_priv->wm.spr_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
@@ -3970,12 +3945,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
 {
 	struct seq_file *m = file->private_data;
 	struct drm_i915_private *dev_priv = m->private;
-	uint16_t *latencies;
-
-	if (INTEL_GEN(dev_priv) >= 9)
-		latencies = dev_priv->wm.skl_latency;
-	else
-		latencies = dev_priv->wm.cur_latency;
+	u16 *latencies = dev_priv->wm.cur_latency;
 
 	return wm_latency_write(file, ubuf, len, offp, latencies);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e57b8cb8fa4d..4dc8826f24b2 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1907,10 +1907,10 @@ struct drm_i915_private {
 
 	struct {
 		/* Watermark memory latency values in 0.1 us units */
-		uint16_t pri_latency[5];
-		uint16_t spr_latency[5];
-		uint16_t cur_latency[5];
-		uint16_t skl_latency[8];
+		/* Watermark memory latency values in .1 us units */
+		u16 pri_latency[8];
+		u16 spr_latency[8];
+		u16 cur_latency[8];
 
 		/* current hardware state */
 		union {
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index fe522ceae97a..4bb640acf291 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -827,9 +827,6 @@ static int intel_plane_wm_latency(struct intel_plane *plane,
 {
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 
-	if (INTEL_GEN(dev_priv) >= 9)
-		return dev_priv->wm.skl_latency[level];
-
 	switch (plane->id) {
 	case PLANE_PRIMARY:
 		return dev_priv->wm.pri_latency[level];
@@ -3064,11 +3061,18 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
-	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.skl_latency);
+	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
-	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
+	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+	       sizeof(dev_priv->wm.pri_latency));
+
+	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
 static bool ilk_validate_pipe_wm(struct drm_device *dev,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 04/12] drm/i915: Add dev_priv->wm.num_levels and use it everywhere
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (2 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 03/12] drm/i915: Eliminate skl_latency[] Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-26 18:27   ` [PATCH v2 " Ville Syrjala
  2018-10-10 13:04 ` [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY() Ville Syrjala
                   ` (17 subsequent siblings)
  21 siblings, 1 reply; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Unify our approach to figuring out how many wm levels are supported by
having dev_priv->wm.num_levels. This replaces the older
dev_priv->wm.max_level which was used on some of the platforms. I think
num_levels is less confusing than max_level in most places. The +/-1 is
now mostly isolated to the mempry latency init code.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |  24 +----
 drivers/gpu/drm/i915/i915_drv.h      |   4 +-
 drivers/gpu/drm/i915/intel_display.c |   6 +-
 drivers/gpu/drm/i915/intel_pm.c      | 132 +++++++++++++--------------
 4 files changed, 70 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e4b668f28ec0..461edef71fc2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3790,17 +3790,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
 	struct drm_i915_private *dev_priv = m->private;
 	struct drm_device *dev = &dev_priv->drm;
-	int level;
-	int num_levels;
-
-	if (IS_CHERRYVIEW(dev_priv))
-		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
-		num_levels = 1;
-	else if (IS_G4X(dev_priv))
-		num_levels = 3;
-	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	drm_modeset_lock_all(dev);
 
@@ -3881,20 +3871,10 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 	struct drm_i915_private *dev_priv = m->private;
 	struct drm_device *dev = &dev_priv->drm;
 	uint16_t new[8] = { 0 };
-	int num_levels;
-	int level;
+	int level, num_levels = dev_priv->wm.num_levels;
 	int ret;
 	char tmp[32];
 
-	if (IS_CHERRYVIEW(dev_priv))
-		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
-		num_levels = 1;
-	else if (IS_G4X(dev_priv))
-		num_levels = 3;
-	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
-
 	if (len >= sizeof(tmp))
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4dc8826f24b2..3db45ddab925 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1920,8 +1920,6 @@ struct drm_i915_private {
 			struct g4x_wm_values g4x;
 		};
 
-		uint8_t max_level;
-
 		/*
 		 * Should be held around atomic WM register writing; also
 		 * protects * intel_crtc->wm.active and
@@ -1929,6 +1927,8 @@ struct drm_i915_private {
 		 */
 		struct mutex wm_mutex;
 
+		u8 num_levels;
+
 		/*
 		 * Set during HW readout of watermarks/DDB.  Some platforms
 		 * need to know when we're still using BIOS-provided values
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a145efba9157..eb324a784f8f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11737,7 +11737,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const enum pipe pipe = intel_crtc->pipe;
-	int plane, level, max_level = ilk_wm_max_level(dev_priv);
+	int plane, level, num_levels = dev_priv->wm.num_levels;
 
 	if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
 		return;
@@ -11759,7 +11759,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		sw_plane_wm = &sw_wm->planes[plane];
 
 		/* Watermarks */
-		for (level = 0; level <= max_level; level++) {
+		for (level = 0; level < num_levels; level++) {
 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
 						&sw_plane_wm->wm[level]))
 				continue;
@@ -11809,7 +11809,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
 
 		/* Watermarks */
-		for (level = 0; level <= max_level; level++) {
+		for (level = 0; level < num_levels; level++) {
 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
 						&sw_plane_wm->wm[level]))
 				continue;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4bb640acf291..c03d4835d0e0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -796,23 +796,18 @@ static bool is_enabling(int old, int new, int threshold)
 	return old < threshold && new >= threshold;
 }
 
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->wm.max_level + 1;
-}
-
 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 				   const char *name,
 				   const uint16_t wm[8])
 {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		unsigned int latency = wm[level];
 
 		if (latency == 0) {
-			DRM_DEBUG_KMS("%s WM%d latency not provided\n",
-				      name, level);
+			DRM_ERROR("%s WM%d latency not provided\n",
+				  name, level);
 			continue;
 		}
 
@@ -1080,7 +1075,7 @@ static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 120;
 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 350;
 
-	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+	dev_priv->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
@@ -1204,7 +1199,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	bool dirty = false;
 
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < dev_priv->wm.num_levels; level++) {
 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
 
 		dirty |= raw->plane[plane_id] != value;
@@ -1223,7 +1218,7 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
 	/* NORMAL level doesn't have an FBC watermark */
 	level = max(level, G4X_WM_LEVEL_SR);
 
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < dev_priv->wm.num_levels; level++) {
 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
 
 		dirty |= raw->fbc != value;
@@ -1241,7 +1236,8 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 				     const struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	int num_levels = dev_priv->wm.num_levels;
 	enum plane_id plane_id = plane->id;
 	bool dirty = false;
 	int level;
@@ -1321,7 +1317,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
-	if (level > dev_priv->wm.max_level)
+	if (level >= dev_priv->wm.num_levels)
 		return false;
 
 	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1634,13 +1630,13 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
 	/* all latencies in .1 usec */
 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 30;
 
-	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+	dev_priv->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
 
 	if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 120;
 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 330;
 
-		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+		dev_priv->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
 	}
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
@@ -1783,7 +1779,7 @@ static void vlv_invalidate_wms(struct intel_crtc *crtc,
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < dev_priv->wm.num_levels; level++) {
 		enum plane_id plane_id;
 
 		for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1810,7 +1806,7 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 				 int level, enum plane_id plane_id, u16 value)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	int num_levels = intel_wm_num_levels(dev_priv);
+	int num_levels = dev_priv->wm.num_levels;
 	bool dirty = false;
 
 	for (; level < num_levels; level++) {
@@ -1827,8 +1823,9 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 				     const struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum plane_id plane_id = plane->id;
-	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
+	int num_levels = dev_priv->wm.num_levels;
 	int level;
 	bool dirty = false;
 
@@ -1942,7 +1939,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 	}
 
 	/* initially allow all levels */
-	wm_state->num_levels = intel_wm_num_levels(dev_priv);
+	wm_state->num_levels = dev_priv->wm.num_levels;
 	/*
 	 * Note that enabling cxsr with no primary/sprite planes
 	 * enabled can wedge the pipe. Hence we only allow cxsr
@@ -2143,7 +2140,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
 	struct intel_crtc *crtc;
 	int num_active_crtcs = 0;
 
-	wm->level = dev_priv->wm.max_level;
+	wm->level = dev_priv->wm.num_levels - 1;
 	wm->cxsr = true;
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2663,7 +2660,7 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
 }
 
 /* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+static unsigned int ilk_plane_wm_max(struct drm_device *dev,
 				     int level,
 				     const struct intel_wm_config *config,
 				     enum intel_ddb_partitioning ddb_partitioning,
@@ -2705,7 +2702,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
 }
 
 /* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+static unsigned int ilk_cursor_wm_max(struct drm_device *dev,
 				      int level,
 				      const struct intel_wm_config *config)
 {
@@ -2717,7 +2714,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
 	return ilk_cursor_wm_reg_max(to_i915(dev), level);
 }
 
-static void ilk_compute_wm_maximums(const struct drm_device *dev,
+static void ilk_compute_wm_maximums(struct drm_device *dev,
 				    int level,
 				    const struct intel_wm_config *config,
 				    enum intel_ddb_partitioning ddb_partitioning,
@@ -2837,7 +2834,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 9) {
 		uint32_t val;
 		int ret, i;
-		int level, max_level = ilk_wm_max_level(dev_priv);
+		int level, num_levels = dev_priv->wm.num_levels;
 
 		/* read the first set of memory latencies[0:3] */
 		val = 0; /* data0 to be programmed to 0 for first set */
@@ -2885,9 +2882,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		 * need to be disabled. We make sure to sanitize the values out
 		 * of the punit to satisfy this requirement.
 		 */
-		for (level = 1; level <= max_level; level++) {
+		for (level = 1; level < num_levels; level++) {
 			if (wm[level] == 0) {
-				for (i = level + 1; i <= max_level; i++)
+				for (i = level + 1; i < num_levels; i++)
 					wm[i] = 0;
 				break;
 			}
@@ -2902,7 +2899,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		 */
 		if (wm[0] == 0) {
 			wm[0] += 2;
-			for (level = 1; level <= max_level; level++) {
+			for (level = 1; level < num_levels; level++) {
 				if (wm[level] == 0)
 					break;
 				wm[level] += 2;
@@ -2967,30 +2964,17 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
 static void ilk_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[5])
 {
-	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	/* convert .5 usec to .1 usec units */
 	for (level = 1; level < num_levels; level++)
 		wm[level] *= 5;
 }
 
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
-	/* how many WM levels are we expecting */
-	if (INTEL_GEN(dev_priv) >= 9)
-		return 7;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		return 4;
-	else if (INTEL_GEN(dev_priv) >= 6)
-		return 3;
-	else
-		return 2;
-}
-
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 				    uint16_t wm[5], uint16_t min)
 {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	if (wm[0] >= min)
 		return false;
@@ -3000,7 +2984,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 	/* WM1+ latencies must be multiples of .5 usec */
 	min = roundup(min, 5);
 
-	for (level = 1; level <= max_level; level++)
+	for (level = 1; level < num_levels; level++)
 		wm[level] = max(wm[level], min);
 
 	return true;
@@ -3029,6 +3013,13 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
 
 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		dev_priv->wm.num_levels = 5;
+	else if (INTEL_GEN(dev_priv) >= 6)
+		dev_priv->wm.num_levels = 4;
+	else
+		dev_priv->wm.num_levels = 3;
+
 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
 	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
@@ -3052,7 +3043,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[8])
 {
-	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	/* convert usec to .1 usec units */
 	for (level = 0; level < num_levels; level++)
@@ -3061,6 +3052,8 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
+	dev_priv->wm.num_levels = 8;
+
 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
 	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
@@ -3111,7 +3104,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	const struct intel_plane_state *pristate = NULL;
 	const struct intel_plane_state *sprstate = NULL;
 	const struct intel_plane_state *curstate = NULL;
-	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
+	int level, num_levels = dev_priv->wm.num_levels, usable_level;
 	struct ilk_wm_maximums max;
 
 	pipe_wm = &cstate->wm.ilk.optimal;
@@ -3135,7 +3128,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 			 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
 	}
 
-	usable_level = max_level;
+	usable_level = num_levels - 1;
 
 	/* ILK/SNB: LP2+ watermarks only w/o sprites */
 	if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
@@ -3186,13 +3179,14 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 				       struct intel_crtc *intel_crtc,
 				       struct intel_crtc_state *newstate)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
 	struct intel_atomic_state *intel_state =
 		to_intel_atomic_state(newstate->base.state);
 	const struct intel_crtc_state *oldstate =
 		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
 	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
-	int level, max_level = ilk_wm_max_level(to_i915(dev));
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	/*
 	 * Start with the final, target watermarks, then combine with the
@@ -3207,7 +3201,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 	a->sprites_enabled |= b->sprites_enabled;
 	a->sprites_scaled |= b->sprites_scaled;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		struct intel_wm_level *a_wm = &a->wm[level];
 		const struct intel_wm_level *b_wm = &b->wm[level];
 
@@ -3279,8 +3273,8 @@ static void ilk_wm_merge(struct drm_device *dev,
 			 struct intel_pipe_wm *merged)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
-	int last_enabled_level = max_level;
+	int level, num_levels = dev_priv->wm.num_levels;
+	int last_enabled_level = num_levels - 1;
 
 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
 	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
@@ -3291,7 +3285,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 	merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
 
 	/* merge each WM1+ level */
-	for (level = 1; level <= max_level; level++) {
+	for (level = 1; level < num_levels; level++) {
 		struct intel_wm_level *wm = &merged->wm[level];
 
 		ilk_merge_wm_level(dev, level, wm);
@@ -3321,7 +3315,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 	 */
 	if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
 	    intel_fbc_is_active(dev_priv)) {
-		for (level = 2; level <= max_level; level++) {
+		for (level = 2; level < num_levels; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
 
 			wm->enable = false;
@@ -3421,10 +3415,11 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
 						  struct intel_pipe_wm *r1,
 						  struct intel_pipe_wm *r2)
 {
-	int level, max_level = ilk_wm_max_level(to_i915(dev));
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int level, num_levels = dev_priv->wm.num_levels;
 	int level1 = 0, level2 = 0;
 
-	for (level = 1; level <= max_level; level++) {
+	for (level = 1; level < num_levels; level++) {
 		if (r1->wm[level].enable)
 			level1 = level;
 		if (r2->wm[level].enable)
@@ -3800,7 +3795,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 			continue;
 
 		/* Find the highest enabled wm level for this plane */
-		for (level = ilk_wm_max_level(dev_priv);
+		for (level = dev_priv->wm.num_levels - 1;
 		     !wm->wm[level].plane_en; --level)
 		     { }
 
@@ -4831,7 +4826,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 	struct intel_plane *intel_plane = to_intel_plane(plane);
 	uint16_t ddb_blocks;
 	enum pipe pipe = intel_crtc->pipe;
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum plane_id intel_plane_id = intel_plane->id;
 	int ret;
 
@@ -4842,7 +4837,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 		     skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
 		     skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
 							  &wm->wm[level];
 		struct skl_wm_level *result_prev;
@@ -4976,6 +4971,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
 		uint16_t ddb_blocks;
 
 		wm = &pipe_wm->planes[plane_id];
+
 		ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
 
 		ret = skl_compute_plane_wm_params(dev_priv, cstate,
@@ -5048,10 +5044,10 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum pipe pipe = intel_crtc->pipe;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
 				   &wm->wm[level]);
 	}
@@ -5085,10 +5081,10 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum pipe pipe = intel_crtc->pipe;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
 				   &wm->wm[level]);
 	}
@@ -5554,16 +5550,14 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	int level, max_level;
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum plane_id plane_id;
 	uint32_t val;
 
-	max_level = ilk_wm_max_level(dev_priv);
-
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 		struct skl_plane_wm *wm = &out->planes[plane_id];
 
-		for (level = 0; level <= max_level; level++) {
+		for (level = 0; level < num_levels; level++) {
 			if (plane_id != PLANE_CURSOR)
 				val = I915_READ(PLANE_WM(pipe, plane_id, level));
 			else
@@ -5657,14 +5651,14 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
 		active->linetime = hw->wm_linetime[pipe];
 	} else {
-		int level, max_level = ilk_wm_max_level(dev_priv);
+		int level, num_levels = dev_priv->wm.num_levels;
 
 		/*
 		 * For inactive pipes, all watermark levels
 		 * should be marked as enabled but zeroed,
 		 * which is what we'd compute them to.
 		 */
-		for (level = 0; level <= max_level; level++)
+		for (level = 0; level < num_levels; level++)
 			active->wm[level].enable = true;
 	}
 
@@ -5955,7 +5949,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
 			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
 			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
 				      "assuming DDR DVFS is disabled\n");
-			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+			dev_priv->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
 		} else {
 			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
 			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY()
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (3 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 04/12] drm/i915: Add dev_priv->wm.num_levels and use it everywhere Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-11 12:14   ` Jani Nikula
  2018-10-10 13:04 ` [PATCH 06/12] drm/i915: Make the WM memory latency print more compact Ville Syrjala
                   ` (16 subsequent siblings)
  21 siblings, 1 reply; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Templatize snprintf_int_array() to allow us to print
different kinds of arrays without having to type all
the boilerplate for the snprintf() loop.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_utils.h | 16 ++++++++++++++++
 drivers/gpu/drm/i915/intel_dp.c   | 17 ++---------------
 2 files changed, 18 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5858a43e19da..079aefa20bee 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -161,4 +161,20 @@ static inline const char *enableddisabled(bool v)
 	return v ? "enabled" : "disabled";
 }
 
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}
+
 #endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13ff89be6ad6..dd8634b40179 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1774,21 +1774,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
 	}
 }
 
-static void snprintf_int_array(char *str, size_t len,
-			       const int *array, int nelem)
-{
-	int i;
-
-	str[0] = '\0';
-
-	for (i = 0; i < nelem; i++) {
-		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
-		if (r >= len)
-			return;
-		str += r;
-		len -= r;
-	}
-}
+static DEFINE_SNPRINTF_ARRAY(snprintf_int_array,
+			     int, array, i, "%d", array[i]);
 
 static void intel_dp_print_rates(struct intel_dp *intel_dp)
 {
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 06/12] drm/i915: Make the WM memory latency print more compact
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (4 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY() Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:04 ` [PATCH 07/12] drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code Ville Syrjala
                   ` (15 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Print all the latency values on a single line.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c03d4835d0e0..9fe5a390caa9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -796,25 +796,22 @@ static bool is_enabling(int old, int new, int threshold)
 	return old < threshold && new >= threshold;
 }
 
+static DEFINE_SNPRINTF_ARRAY(snprintf_wm_array,
+			     u16, wm, level, "%d.%d",
+			     wm[level] / 10, wm[level] % 10);
+
 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 				   const char *name,
 				   const uint16_t wm[8])
 {
-	int level, num_levels = dev_priv->wm.num_levels;
+	char str[64];
 
-	for (level = 0; level < num_levels; level++) {
-		unsigned int latency = wm[level];
+	if ((drm_debug & DRM_UT_KMS) == 0)
+		return;
 
-		if (latency == 0) {
-			DRM_ERROR("%s WM%d latency not provided\n",
-				  name, level);
-			continue;
-		}
+	snprintf_wm_array(str, sizeof(str), wm, dev_priv->wm.num_levels);
 
-		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
-			      name, level, wm[level],
-			      latency / 10, latency % 10);
-	}
+	DRM_DEBUG_KMS("%s: %s (usec)", name, str);
 }
 
 static int intel_plane_wm_latency(struct intel_plane *plane,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 07/12] drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (5 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 06/12] drm/i915: Make the WM memory latency print more compact Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:04 ` [PATCH 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency() Ville Syrjala
                   ` (14 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

The functions to fix up the sprite and cursor watermarks on ilk are
identical. Unify them to one, and give it an ilk_ prefix to make it
clear where it should be used.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 18 +++++-------------
 1 file changed, 5 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 9fe5a390caa9..067dc1ac4521 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2942,18 +2942,10 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 	}
 }
 
-static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
-				       uint16_t wm[5])
+static void ilk_fixup_spr_cur_wm_latency(struct drm_i915_private *dev_priv,
+					 u16 wm[5])
 {
-	/* ILK sprite LP0 latency is 1300 ns */
-	if (IS_GEN5(dev_priv))
-		wm[0] = 13;
-}
-
-static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
-				       uint16_t wm[5])
-{
-	/* ILK cursor LP0 latency is 1300 ns */
+	/* ILK sprite/cursor LP0 latency is 1300 ns */
 	if (IS_GEN5(dev_priv))
 		wm[0] = 13;
 }
@@ -3026,8 +3018,8 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 
-	intel_fixup_spr_wm_latency(dev_priv, dev_priv->wm.spr_latency);
-	intel_fixup_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
 
 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency()
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (6 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 07/12] drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-26 18:45   ` [PATCH v2 " Ville Syrjala
  2018-10-10 13:04 ` [PATCH 09/12] drm/i915: Sanitize wm latency values for ilk+ Ville Syrjala
                   ` (13 subsequent siblings)
  21 siblings, 1 reply; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

There's no point it having the skl+ and ilk+ codepaths for reading
the wm latency values in the same function. Split them apart.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 197 +++++++++++++++++---------------
 1 file changed, 104 insertions(+), 93 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 067dc1ac4521..8289c6378db3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,95 +2825,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
-				  uint16_t wm[8])
+static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
+				u16 wm[5])
 {
-	if (INTEL_GEN(dev_priv) >= 9) {
-		uint32_t val;
-		int ret, i;
-		int level, num_levels = dev_priv->wm.num_levels;
-
-		/* read the first set of memory latencies[0:3] */
-		val = 0; /* data0 to be programmed to 0 for first set */
-		mutex_lock(&dev_priv->pcu_lock);
-		ret = sandybridge_pcode_read(dev_priv,
-					     GEN9_PCODE_READ_MEM_LATENCY,
-					     &val);
-		mutex_unlock(&dev_priv->pcu_lock);
-
-		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-			return;
-		}
-
-		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-
-		/* read the second set of memory latencies[4:7] */
-		val = 1; /* data0 to be programmed to 1 for second set */
-		mutex_lock(&dev_priv->pcu_lock);
-		ret = sandybridge_pcode_read(dev_priv,
-					     GEN9_PCODE_READ_MEM_LATENCY,
-					     &val);
-		mutex_unlock(&dev_priv->pcu_lock);
-		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-			return;
-		}
-
-		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-
-		/*
-		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-		 * need to be disabled. We make sure to sanitize the values out
-		 * of the punit to satisfy this requirement.
-		 */
-		for (level = 1; level < num_levels; level++) {
-			if (wm[level] == 0) {
-				for (i = level + 1; i < num_levels; i++)
-					wm[i] = 0;
-				break;
-			}
-		}
-
-		/*
-		 * WaWmMemoryReadLatency:skl+,glk
-		 *
-		 * punit doesn't take into account the read latency so we need
-		 * to add 2us to the various latency levels we retrieve from the
-		 * punit when level 0 response data us 0us.
-		 */
-		if (wm[0] == 0) {
-			wm[0] += 2;
-			for (level = 1; level < num_levels; level++) {
-				if (wm[level] == 0)
-					break;
-				wm[level] += 2;
-			}
-		}
-
-		/*
-		 * WA Level-0 adjustment for 16GB DIMMs: SKL+
-		 * If we could not get dimm info enable this WA to prevent from
-		 * any underrun. If not able to get Dimm info assume 16GB dimm
-		 * to avoid any underrun.
-		 */
-		if (!dev_priv->dram_info.valid_dimm ||
-		    dev_priv->dram_info.is_16gb_dimm)
-			wm[0] += 1;
-
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
 		wm[0] = (sskpd >> 56) & 0xFF;
@@ -2930,7 +2845,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
-	} else if (INTEL_GEN(dev_priv) >= 5) {
+	} else if (IS_GEN5(dev_priv)) {
 		uint32_t mltr = I915_READ(MLTR_ILK);
 
 		/* ILK primary LP0 latency is 700 ns */
@@ -3009,8 +2924,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	else
 		dev_priv->wm.num_levels = 3;
 
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
-
+	ilk_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
@@ -3029,6 +2943,68 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 		snb_wm_latency_quirk(dev_priv);
 }
 
+static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
+				u16 wm[8])
+{
+	int level, num_levels = dev_priv->wm.num_levels;
+	int ret, i;
+	u32 val;
+
+	/* read the first set of memory latencies[0:3] */
+	val = 0; /* data0 to be programmed to 0 for first set */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_read(dev_priv,
+				     GEN9_PCODE_READ_MEM_LATENCY,
+				     &val);
+	mutex_unlock(&dev_priv->pcu_lock);
+
+	if (ret) {
+		DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+		return;
+	}
+
+	wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+
+	/* read the second set of memory latencies[4:7] */
+	val = 1; /* data0 to be programmed to 1 for second set */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_read(dev_priv,
+				     GEN9_PCODE_READ_MEM_LATENCY,
+				     &val);
+	mutex_unlock(&dev_priv->pcu_lock);
+	if (ret) {
+		DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+		return;
+	}
+
+	wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+
+	/*
+	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+	 * need to be disabled. We make sure to sanitize the values out
+	 * of the punit to satisfy this requirement.
+	 */
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] == 0) {
+			for (i = level + 1; i < num_levels; i++)
+				wm[i] = 0;
+			break;
+		}
+	}
+}
+
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[8])
 {
@@ -3039,14 +3015,49 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 		wm[level] *= 10;
 }
 
+static void skl_wm_latency_wa(struct drm_i915_private *dev_priv,
+			      u16 wm[8])
+{
+	/*
+	 * WaWmMemoryReadLatency:skl,glk
+	 *
+	 * punit doesn't take into account the read latency so we need
+	 * to add 2us to the various latency levels we retrieve from the
+	 * punit when level 0 response data us 0us.
+	 */
+	if (wm[0] == 0) {
+		int level, num_levels = dev_priv->wm.num_levels;
+
+		wm[0] += 20;
+
+		for (level = 1; level < num_levels; level++) {
+			if (wm[level] == 0)
+				break;
+
+			wm[level] += 20;
+		}
+	}
+
+	/*
+	 * WA Level-0 adjustment for 16GB DIMMs: SKL+
+	 * If we could not get dimm info enable this WA to prevent from
+	 * any underrun. If not able to get Dimm info assume 16GB dimm
+	 * to avoid any underrun.
+	 */
+	if (!dev_priv->dram_info.valid_dimm ||
+	    dev_priv->dram_info.is_16gb_dimm)
+		wm[0] += 10;
+}
+
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	dev_priv->wm.num_levels = 8;
 
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
-
+	skl_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
+	skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 09/12] drm/i915: Sanitize wm latency values for ilk+
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (7 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency() Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-26 19:11   ` [PATCH v2 " Ville Syrjala
  2018-10-10 13:04 ` [PATCH 10/12] drm/i915: Drop the funky ilk wm setup Ville Syrjala
                   ` (12 subsequent siblings)
  21 siblings, 1 reply; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

For skl+ we disable all wm levels with a decreasing memory latency
value. Let's generalize the same code to work for all platoforms,
and let's use it for ilk-bdw as well since those platforms also
read the latency values from a scratch register.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 58 ++++++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8289c6378db3..62334e413220 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,6 +2825,37 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
+static void intel_sanitize_wm_latency(struct drm_i915_private *dev_priv,
+				      u16 *wm)
+{
+	int level, num_levels = dev_priv->wm.num_levels;
+
+	/*
+	 * If we don't have WM0 latency, assume
+	 * 5 usec and disable all WM1+ levels.
+	 * 5 usec seems like a safe(ish) fallback value.
+	 */
+	if (WARN(wm[0] == 0, "WM0 memory latency value is zero")) {
+		wm[0] = 50;
+
+		for (level = 1; level < num_levels; level++)
+			wm[level] = 0;
+		return;
+	}
+
+	/* Make sure the latencies are non-decreasing */
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] < wm[level - 1]) {
+			WARN(wm[level] != 0,
+			     "Decreasing WM memory latency value(s)");
+
+			for (; level < num_levels; level++)
+				wm[level] = 0;
+			break;
+		}
+	}
+}
+
 static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
 				u16 wm[5])
 {
@@ -2888,8 +2919,11 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 	/* WM1+ latencies must be multiples of .5 usec */
 	min = roundup(min, 5);
 
-	for (level = 1; level < num_levels; level++)
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] == 0)
+			break;
 		wm[level] = max(wm[level], min);
+	}
 
 	return true;
 }
@@ -2935,6 +2969,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.spr_latency);
 	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
 
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+
 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -2946,8 +2984,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
 				u16 wm[8])
 {
-	int level, num_levels = dev_priv->wm.num_levels;
-	int ret, i;
+	int ret;
 	u32 val;
 
 	/* read the first set of memory latencies[0:3] */
@@ -2990,19 +3027,6 @@ static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
 		GEN9_MEM_LATENCY_LEVEL_MASK;
 	wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
 		GEN9_MEM_LATENCY_LEVEL_MASK;
-
-	/*
-	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-	 * need to be disabled. We make sure to sanitize the values out
-	 * of the punit to satisfy this requirement.
-	 */
-	for (level = 1; level < num_levels; level++) {
-		if (wm[level] == 0) {
-			for (i = level + 1; i < num_levels; i++)
-				wm[i] = 0;
-			break;
-		}
-	}
 }
 
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
@@ -3058,6 +3082,8 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 
 	skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
 
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 10/12] drm/i915: Drop the funky ilk wm setup
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (8 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 09/12] drm/i915: Sanitize wm latency values for ilk+ Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:04 ` [PATCH 11/12] drm/i915: Allow LP3 watermarks on ILK Ville Syrjala
                   ` (11 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Currently we try to disable the wm code entirely for ilk if the BIOS
doesn't provide proper latency values. Now that we have real fallbacks
in place we should be able to alawys rely on the wm code instead.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_display.c |  6 ++----
 drivers/gpu/drm/i915/intel_pm.c      | 20 ++++----------------
 2 files changed, 6 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index eb324a784f8f..3493da7f102b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5590,8 +5590,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
 	 */
 	intel_color_load_luts(&pipe_config->base);
 
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+	dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
 	intel_enable_pipe(pipe_config);
 
 	if (pipe_config->has_pch_encoder)
@@ -5738,8 +5737,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
 	if (!transcoder_is_dsi(cpu_transcoder))
 		intel_ddi_enable_transcoder_func(pipe_config);
 
-	if (dev_priv->display.initial_watermarks != NULL)
-		dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+	dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
 
 	if (INTEL_GEN(dev_priv) >= 11)
 		icl_pipe_mbus_enable(intel_crtc);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 62334e413220..7bd29bba81c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9389,22 +9389,10 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
 		dev_priv->display.compute_global_watermarks = skl_compute_wm;
 	} else if (HAS_PCH_SPLIT(dev_priv)) {
 		ilk_setup_wm_latency(dev_priv);
-
-		if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
-		     dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
-		    (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
-		     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
-			dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
-			dev_priv->display.compute_intermediate_wm =
-				ilk_compute_intermediate_wm;
-			dev_priv->display.initial_watermarks =
-				ilk_initial_watermarks;
-			dev_priv->display.optimize_watermarks =
-				ilk_optimize_watermarks;
-		} else {
-			DRM_DEBUG_KMS("Failed to read display plane latency. "
-				      "Disable CxSR\n");
-		}
+		dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
+		dev_priv->display.compute_intermediate_wm = ilk_compute_intermediate_wm;
+		dev_priv->display.initial_watermarks = ilk_initial_watermarks;
+		dev_priv->display.optimize_watermarks = ilk_optimize_watermarks;
 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 		vlv_setup_wm_latency(dev_priv);
 		dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 11/12] drm/i915: Allow LP3 watermarks on ILK
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (9 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 10/12] drm/i915: Drop the funky ilk wm setup Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 13:04 ` [PATCH 12/12] drm/i915: Remove the remnants of the ilk+ LP0 wm hack Ville Syrjala
                   ` (10 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

ILK already has the watermark registers for the LP3 level so
bump num_levels to 4. The BIOS still does not provide memory
latency values for for LP3 though so it will not be used
unless the latency values are overridden via debugfs.

This also requires that we check for latency==0 zero when
computing the watermarks so that we keep disabling any wm
level which isn't supposed to be used. The behaviour now matches
that of the g4x/vlv wm code.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 7bd29bba81c1..dd5edd984fb5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2537,6 +2537,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 	uint32_t method1, method2;
 	int cpp;
 
+	if (latency == 0)
+		return USHRT_MAX;
+
 	if (!intel_wm_plane_visible(cstate, pstate))
 		return 0;
 
@@ -2564,6 +2567,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
 	uint32_t method1, method2;
 	int cpp;
 
+	if (latency == 0)
+		return USHRT_MAX;
+
 	if (!intel_wm_plane_visible(cstate, pstate))
 		return 0;
 
@@ -2585,6 +2591,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 	int latency = intel_plane_wm_latency(plane, level);
 	int cpp;
 
+	if (latency == 0)
+		return USHRT_MAX;
+
 	if (!intel_wm_plane_visible(cstate, pstate))
 		return 0;
 
@@ -2953,10 +2962,8 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		dev_priv->wm.num_levels = 5;
-	else if (INTEL_GEN(dev_priv) >= 6)
-		dev_priv->wm.num_levels = 4;
 	else
-		dev_priv->wm.num_levels = 3;
+		dev_priv->wm.num_levels = 4;
 
 	ilk_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* [PATCH 12/12] drm/i915: Remove the remnants of the ilk+ LP0 wm hack
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (10 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 11/12] drm/i915: Allow LP3 watermarks on ILK Ville Syrjala
@ 2018-10-10 13:04 ` Ville Syrjala
  2018-10-10 14:34 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff Patchwork
                   ` (9 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-10 13:04 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

In the past we couldn't properly reject the operation if the level 0
watermarks exceeded the limits, thus we had a hack to clamp the values.
commit 86c8bbbeb8d1 ("drm/i915: Calculate ILK-style watermarks during
atomic check (v3)") changed the behaviour to fail the operation
instead, but neglected to remove all the code for the hack.
Remove the leftovers.

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 28 +---------------------------
 1 file changed, 1 insertion(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index dd5edd984fb5..0dce22fcda2d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2746,8 +2746,6 @@ static bool ilk_validate_wm_level(int level,
 				  const struct ilk_wm_maximums *max,
 				  struct intel_wm_level *result)
 {
-	bool ret;
-
 	/* already determined to be invalid? */
 	if (!result->enable)
 		return false;
@@ -2756,31 +2754,7 @@ static bool ilk_validate_wm_level(int level,
 			 result->spr_val <= max->spr &&
 			 result->cur_val <= max->cur;
 
-	ret = result->enable;
-
-	/*
-	 * HACK until we can pre-compute everything,
-	 * and thus fail gracefully if LP0 watermarks
-	 * are exceeded...
-	 */
-	if (level == 0 && !result->enable) {
-		if (result->pri_val > max->pri)
-			DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
-				      level, result->pri_val, max->pri);
-		if (result->spr_val > max->spr)
-			DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
-				      level, result->spr_val, max->spr);
-		if (result->cur_val > max->cur)
-			DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
-				      level, result->cur_val, max->cur);
-
-		result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
-		result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
-		result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
-		result->enable = true;
-	}
-
-	return ret;
+	return result->enable;
 }
 
 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* Re: [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units
  2018-10-10 13:04 ` [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units Ville Syrjala
@ 2018-10-10 13:12   ` Chris Wilson
  2018-10-10 15:35     ` Ville Syrjälä
  2018-10-26 18:14   ` [PATCH v2 " Ville Syrjala
  1 sibling, 1 reply; 32+ messages in thread
From: Chris Wilson @ 2018-10-10 13:12 UTC (permalink / raw)
  To: Ville Syrjala, intel-gfx

Quoting Ville Syrjala (2018-10-10 14:04:43)
> From: Ville Syrjälä <ville.syrjala@linux.intel.com>
> 
> In order to simplify the code let's store all memory latency values in
> 0.1 usec units. This limits the platform specific units to the initial
> setup code for the most part.

Asking for a friend: is it 0.1us or 128ns?
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (11 preceding siblings ...)
  2018-10-10 13:04 ` [PATCH 12/12] drm/i915: Remove the remnants of the ilk+ LP0 wm hack Ville Syrjala
@ 2018-10-10 14:34 ` Patchwork
  2018-10-10 14:38 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (8 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-10 14:34 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff
URL   : https://patchwork.freedesktop.org/series/50802/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
3acd8b6f70a5 drm/i915: Store all wm memory latency values in .1 usec units
77e42913e7cf drm/i915: Use the spr/cur latencies on vlv/chv/g4x
720e1b59c0b9 drm/i915: Eliminate skl_latency[]
835ecb45c0fd drm/i915: Add dev_priv->wm.num_levels and use it everywhere
56e36d0fdf5f drm/i915: Add DEFINE_SNPRINTF_ARRAY()
-:23: CHECK:MACRO_ARG_PRECEDENCE: Macro argument 'values' may be better as '(values)' to avoid precedence issues
#23: FILE: drivers/gpu/drm/i915/i915_utils.h:164:
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}

-:23: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'index' - possible side-effects?
#23: FILE: drivers/gpu/drm/i915/i915_utils.h:164:
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}

-:23: WARNING:MACRO_WITH_FLOW_CONTROL: Macros with flow control statements should be avoided
#23: FILE: drivers/gpu/drm/i915/i915_utils.h:164:
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}

-:24: CHECK:SPACING: spaces preferred around that '*' (ctx:WxV)
#24: FILE: drivers/gpu/drm/i915/i915_utils.h:165:
+void name(char *_str, size_t _len, const type *values, int _nelems) \
                                               ^

total: 0 errors, 1 warnings, 3 checks, 43 lines checked
f3ddc18d8e10 drm/i915: Make the WM memory latency print more compact
3e81eb4b15b0 drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
aaa6aab9e9f2 drm/i915: Split skl+ and ilk+ read_wm_latency()
2e6e43af1f55 drm/i915: Sanitize wm latency values for ilk+
4d7823f8cfea drm/i915: Drop the funky ilk wm setup
cc9ff8c60c9f drm/i915: Allow LP3 watermarks on ILK
9ca6798844ad drm/i915: Remove the remnants of the ilk+ LP0 wm hack

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.SPARSE: warning for drm/i915: Clean up the wm mem latency stuff
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (12 preceding siblings ...)
  2018-10-10 14:34 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff Patchwork
@ 2018-10-10 14:38 ` Patchwork
  2018-10-10 14:50 ` ✗ Fi.CI.BAT: failure " Patchwork
                   ` (7 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-10 14:38 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff
URL   : https://patchwork.freedesktop.org/series/50802/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915: Store all wm memory latency values in .1 usec units
-O:drivers/gpu/drm/i915/intel_pm.c:1636:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:1656:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2511:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2511:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2535:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2535:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2528:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2528:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2550:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2550:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2984:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2984:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2986:29: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2986:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2986:17: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2986:17: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2992:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2992:29: warning: expression using sizeof(void)
-drivers/gpu/drm/i915/selftests/../i915_drv.h:3725:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/selftests/../i915_drv.h:3713:16: warning: expression using sizeof(void)

Commit: drm/i915: Use the spr/cur latencies on vlv/chv/g4x
Okay!

Commit: drm/i915: Eliminate skl_latency[]
Okay!

Commit: drm/i915: Add dev_priv->wm.num_levels and use it everywhere
-O:drivers/gpu/drm/i915/intel_pm.c:1224:17: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:1219:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:3004:29: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:3004:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2988:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2988:29: warning: expression using sizeof(void)

Commit: drm/i915: Add DEFINE_SNPRINTF_ARRAY()
Okay!

Commit: drm/i915: Make the WM memory latency print more compact
Okay!

Commit: drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
Okay!

Commit: drm/i915: Split skl+ and ilk+ read_wm_latency()
Okay!

Commit: drm/i915: Sanitize wm latency values for ilk+
-O:drivers/gpu/drm/i915/intel_pm.c:2892:29: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2892:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2925:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2925:29: warning: expression using sizeof(void)

Commit: drm/i915: Drop the funky ilk wm setup
Okay!

Commit: drm/i915: Allow LP3 watermarks on ILK
Okay!

Commit: drm/i915: Remove the remnants of the ilk+ LP0 wm hack
-O:drivers/gpu/drm/i915/intel_pm.c:2777:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2777:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2778:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2778:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2779:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2779:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6616:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6616:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6616:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6616:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6616:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6616:35: warning: expression using sizeof(void)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (13 preceding siblings ...)
  2018-10-10 14:38 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2018-10-10 14:50 ` Patchwork
  2018-10-26 18:17 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev2) Patchwork
                   ` (6 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-10 14:50 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff
URL   : https://patchwork.freedesktop.org/series/50802/
State : failure

== Summary ==

= CI Bug Log - changes from CI_DRM_4963 -> Patchwork_10414 =

== Summary - FAILURE ==

  Serious unknown changes coming with Patchwork_10414 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_10414, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/50802/revisions/1/mbox/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_10414:

  === IGT changes ===

    ==== Possible regressions ====

    igt@drv_selftest@live_hugepages:
      fi-glk-dsi:         PASS -> DMESG-WARN +18

    igt@drv_selftest@live_sanitycheck:
      fi-glk-j4005:       PASS -> DMESG-WARN +18

    
== Known issues ==

  Here are the changes found in Patchwork_10414 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@amdgpu/amd_cs_nop@sync-fork-gfx0:
      fi-kbl-8809g:       PASS -> DMESG-WARN (fdo#107762)

    
    ==== Possible fixes ====

    igt@amdgpu/amd_cs_nop@fork-gfx0:
      fi-kbl-8809g:       DMESG-WARN (fdo#107762) -> PASS

    igt@gem_exec_suspend@basic-s4-devices:
      fi-kbl-7500u:       DMESG-WARN (fdo#105128, fdo#107139) -> PASS

    
  fdo#105128 https://bugs.freedesktop.org/show_bug.cgi?id=105128
  fdo#107139 https://bugs.freedesktop.org/show_bug.cgi?id=107139
  fdo#107762 https://bugs.freedesktop.org/show_bug.cgi?id=107762


== Participating hosts (47 -> 42) ==

  Additional (1): fi-gdg-551 
  Missing    (6): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-u2 fi-ctg-p8600 


== Build changes ==

    * Linux: CI_DRM_4963 -> Patchwork_10414

  CI_DRM_4963: bc57a8e99f2f81581a9657a02682902f80488bb3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4673: 54cb1aeb4e50dea9f3abae632e317875d147c4ab @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10414: 9ca6798844ad2db6a72afaf6ac3f6d4c9f36357b @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

9ca6798844ad drm/i915: Remove the remnants of the ilk+ LP0 wm hack
cc9ff8c60c9f drm/i915: Allow LP3 watermarks on ILK
4d7823f8cfea drm/i915: Drop the funky ilk wm setup
2e6e43af1f55 drm/i915: Sanitize wm latency values for ilk+
aaa6aab9e9f2 drm/i915: Split skl+ and ilk+ read_wm_latency()
3e81eb4b15b0 drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
f3ddc18d8e10 drm/i915: Make the WM memory latency print more compact
56e36d0fdf5f drm/i915: Add DEFINE_SNPRINTF_ARRAY()
835ecb45c0fd drm/i915: Add dev_priv->wm.num_levels and use it everywhere
720e1b59c0b9 drm/i915: Eliminate skl_latency[]
77e42913e7cf drm/i915: Use the spr/cur latencies on vlv/chv/g4x
3acd8b6f70a5 drm/i915: Store all wm memory latency values in .1 usec units

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10414/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units
  2018-10-10 13:12   ` Chris Wilson
@ 2018-10-10 15:35     ` Ville Syrjälä
  0 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjälä @ 2018-10-10 15:35 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Wed, Oct 10, 2018 at 02:12:30PM +0100, Chris Wilson wrote:
> Quoting Ville Syrjala (2018-10-10 14:04:43)
> > From: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > 
> > In order to simplify the code let's store all memory latency values in
> > 0.1 usec units. This limits the platform specific units to the initial
> > setup code for the most part.
> 
> Asking for a friend: is it 0.1us or 128ns?

100ns

-- 
Ville Syrjälä
Intel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY()
  2018-10-10 13:04 ` [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY() Ville Syrjala
@ 2018-10-11 12:14   ` Jani Nikula
  2018-10-11 12:47     ` Ville Syrjälä
  0 siblings, 1 reply; 32+ messages in thread
From: Jani Nikula @ 2018-10-11 12:14 UTC (permalink / raw)
  To: Ville Syrjala, intel-gfx

On Wed, 10 Oct 2018, Ville Syrjala <ville.syrjala@linux.intel.com> wrote:
> From: Ville Syrjälä <ville.syrjala@linux.intel.com>
>
> Templatize snprintf_int_array() to allow us to print
> different kinds of arrays without having to type all
> the boilerplate for the snprintf() loop.

I might just feel happier about duplicating the boilerplate...

BR,
Jani.



>
> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_utils.h | 16 ++++++++++++++++
>  drivers/gpu/drm/i915/intel_dp.c   | 17 ++---------------
>  2 files changed, 18 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
> index 5858a43e19da..079aefa20bee 100644
> --- a/drivers/gpu/drm/i915/i915_utils.h
> +++ b/drivers/gpu/drm/i915/i915_utils.h
> @@ -161,4 +161,20 @@ static inline const char *enableddisabled(bool v)
>  	return v ? "enabled" : "disabled";
>  }
>  
> +#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
> +void name(char *_str, size_t _len, const type *values, int _nelems) \
> +{ \
> +	int index; \
> +	if (_len) \
> +		_str[0] = '\0'; \
> +	for (index = 0; index < _nelems; index++) { \
> +		int _r = snprintf(_str, _len, "%s" fmt, \
> +				  index ? ", " : "", __VA_ARGS__); \
> +		if (_r >= _len) \
> +			return; \
> +		_str += _r; \
> +		_len -= _r; \
> +	} \
> +}
> +
>  #endif /* !__I915_UTILS_H */
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 13ff89be6ad6..dd8634b40179 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -1774,21 +1774,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
>  	}
>  }
>  
> -static void snprintf_int_array(char *str, size_t len,
> -			       const int *array, int nelem)
> -{
> -	int i;
> -
> -	str[0] = '\0';
> -
> -	for (i = 0; i < nelem; i++) {
> -		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
> -		if (r >= len)
> -			return;
> -		str += r;
> -		len -= r;
> -	}
> -}
> +static DEFINE_SNPRINTF_ARRAY(snprintf_int_array,
> +			     int, array, i, "%d", array[i]);
>  
>  static void intel_dp_print_rates(struct intel_dp *intel_dp)
>  {

-- 
Jani Nikula, Intel Open Source Graphics Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY()
  2018-10-11 12:14   ` Jani Nikula
@ 2018-10-11 12:47     ` Ville Syrjälä
  2018-10-11 16:07       ` Jani Nikula
  0 siblings, 1 reply; 32+ messages in thread
From: Ville Syrjälä @ 2018-10-11 12:47 UTC (permalink / raw)
  To: Jani Nikula; +Cc: intel-gfx

On Thu, Oct 11, 2018 at 03:14:41PM +0300, Jani Nikula wrote:
> On Wed, 10 Oct 2018, Ville Syrjala <ville.syrjala@linux.intel.com> wrote:
> > From: Ville Syrjälä <ville.syrjala@linux.intel.com>
> >
> > Templatize snprintf_int_array() to allow us to print
> > different kinds of arrays without having to type all
> > the boilerplate for the snprintf() loop.
> 
> I might just feel happier about duplicating the boilerplate...

How about when the third user appears? :)

Not sure I have a third user for this actually.
snprintf_output_types() is pretty close, and there are other
bitmask I'd probably like to decode. But I couldn't immediately
think of a nice way to handle bitmasks and arrays in the same
function.

> 
> BR,
> Jani.
> 
> 
> 
> >
> > Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_utils.h | 16 ++++++++++++++++
> >  drivers/gpu/drm/i915/intel_dp.c   | 17 ++---------------
> >  2 files changed, 18 insertions(+), 15 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
> > index 5858a43e19da..079aefa20bee 100644
> > --- a/drivers/gpu/drm/i915/i915_utils.h
> > +++ b/drivers/gpu/drm/i915/i915_utils.h
> > @@ -161,4 +161,20 @@ static inline const char *enableddisabled(bool v)
> >  	return v ? "enabled" : "disabled";
> >  }
> >  
> > +#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
> > +void name(char *_str, size_t _len, const type *values, int _nelems) \
> > +{ \
> > +	int index; \
> > +	if (_len) \
> > +		_str[0] = '\0'; \
> > +	for (index = 0; index < _nelems; index++) { \
> > +		int _r = snprintf(_str, _len, "%s" fmt, \
> > +				  index ? ", " : "", __VA_ARGS__); \
> > +		if (_r >= _len) \
> > +			return; \
> > +		_str += _r; \
> > +		_len -= _r; \
> > +	} \
> > +}
> > +
> >  #endif /* !__I915_UTILS_H */
> > diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> > index 13ff89be6ad6..dd8634b40179 100644
> > --- a/drivers/gpu/drm/i915/intel_dp.c
> > +++ b/drivers/gpu/drm/i915/intel_dp.c
> > @@ -1774,21 +1774,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
> >  	}
> >  }
> >  
> > -static void snprintf_int_array(char *str, size_t len,
> > -			       const int *array, int nelem)
> > -{
> > -	int i;
> > -
> > -	str[0] = '\0';
> > -
> > -	for (i = 0; i < nelem; i++) {
> > -		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
> > -		if (r >= len)
> > -			return;
> > -		str += r;
> > -		len -= r;
> > -	}
> > -}
> > +static DEFINE_SNPRINTF_ARRAY(snprintf_int_array,
> > +			     int, array, i, "%d", array[i]);
> >  
> >  static void intel_dp_print_rates(struct intel_dp *intel_dp)
> >  {
> 
> -- 
> Jani Nikula, Intel Open Source Graphics Center

-- 
Ville Syrjälä
Intel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* Re: [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY()
  2018-10-11 12:47     ` Ville Syrjälä
@ 2018-10-11 16:07       ` Jani Nikula
  0 siblings, 0 replies; 32+ messages in thread
From: Jani Nikula @ 2018-10-11 16:07 UTC (permalink / raw)
  To: Ville Syrjälä; +Cc: intel-gfx

On Thu, 11 Oct 2018, Ville Syrjälä <ville.syrjala@linux.intel.com> wrote:
> On Thu, Oct 11, 2018 at 03:14:41PM +0300, Jani Nikula wrote:
>> On Wed, 10 Oct 2018, Ville Syrjala <ville.syrjala@linux.intel.com> wrote:
>> > From: Ville Syrjälä <ville.syrjala@linux.intel.com>
>> >
>> > Templatize snprintf_int_array() to allow us to print
>> > different kinds of arrays without having to type all
>> > the boilerplate for the snprintf() loop.
>> 
>> I might just feel happier about duplicating the boilerplate...
>
> How about when the third user appears? :)
>
> Not sure I have a third user for this actually.
> snprintf_output_types() is pretty close, and there are other
> bitmask I'd probably like to decode. But I couldn't immediately
> think of a nice way to handle bitmasks and arrays in the same
> function.

So here's the non-macro generic approach. It's not perfect, it just has
different wrinkles:

static int snprintf_int(char *str, size_t len, const void *elem)
{
	return snprintf(str, len, "%d", *((const int *)elem));
}

static void snprintf_array(char *str, size_t len, const void *array, int nelem,
			   size_t size, const char *sep,
			   int (*print)(char *str, size_t len, const void *elem))
{
	int i, r;

	if (len)
		str[0] = '\0';

	for (i = 0; i < nelem; i++) {
		const void *elem = array + i * size;

		if (i) {
			r = snprintf(str, len, "%s", sep);
			if (r >= len)
				return;
			str += r;
			len -= r;
		}

		r = print(str, len, elem);
		if (r >= len)
			return;
		str += r;
		len -= r;
	}
}

static void snprintf_int_array(char *str, size_t len, const int *array, int nelem)
{
	snprintf_array(str, len, array, nelem, sizeof(array[0]), ", ",
		       snprintf_int);
}

Of course, this doesn't help with bitmasks either. You'd first have to
split the bitmask into an array.


BR,
Jani.



>
>> 
>> BR,
>> Jani.
>> 
>> 
>> 
>> >
>> > Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
>> > ---
>> >  drivers/gpu/drm/i915/i915_utils.h | 16 ++++++++++++++++
>> >  drivers/gpu/drm/i915/intel_dp.c   | 17 ++---------------
>> >  2 files changed, 18 insertions(+), 15 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
>> > index 5858a43e19da..079aefa20bee 100644
>> > --- a/drivers/gpu/drm/i915/i915_utils.h
>> > +++ b/drivers/gpu/drm/i915/i915_utils.h
>> > @@ -161,4 +161,20 @@ static inline const char *enableddisabled(bool v)
>> >  	return v ? "enabled" : "disabled";
>> >  }
>> >  
>> > +#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
>> > +void name(char *_str, size_t _len, const type *values, int _nelems) \
>> > +{ \
>> > +	int index; \
>> > +	if (_len) \
>> > +		_str[0] = '\0'; \
>> > +	for (index = 0; index < _nelems; index++) { \
>> > +		int _r = snprintf(_str, _len, "%s" fmt, \
>> > +				  index ? ", " : "", __VA_ARGS__); \
>> > +		if (_r >= _len) \
>> > +			return; \
>> > +		_str += _r; \
>> > +		_len -= _r; \
>> > +	} \
>> > +}
>> > +
>> >  #endif /* !__I915_UTILS_H */
>> > diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
>> > index 13ff89be6ad6..dd8634b40179 100644
>> > --- a/drivers/gpu/drm/i915/intel_dp.c
>> > +++ b/drivers/gpu/drm/i915/intel_dp.c
>> > @@ -1774,21 +1774,8 @@ intel_dp_set_clock(struct intel_encoder *encoder,
>> >  	}
>> >  }
>> >  
>> > -static void snprintf_int_array(char *str, size_t len,
>> > -			       const int *array, int nelem)
>> > -{
>> > -	int i;
>> > -
>> > -	str[0] = '\0';
>> > -
>> > -	for (i = 0; i < nelem; i++) {
>> > -		int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
>> > -		if (r >= len)
>> > -			return;
>> > -		str += r;
>> > -		len -= r;
>> > -	}
>> > -}
>> > +static DEFINE_SNPRINTF_ARRAY(snprintf_int_array,
>> > +			     int, array, i, "%d", array[i]);
>> >  
>> >  static void intel_dp_print_rates(struct intel_dp *intel_dp)
>> >  {
>> 
>> -- 
>> Jani Nikula, Intel Open Source Graphics Center

-- 
Jani Nikula, Intel Open Source Graphics Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 01/12] drm/i915: Store all wm memory latency values in .1 usec units
  2018-10-10 13:04 ` [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units Ville Syrjala
  2018-10-10 13:12   ` Chris Wilson
@ 2018-10-26 18:14   ` Ville Syrjala
  1 sibling, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-26 18:14 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

In order to simplify the code let's store all memory latency values in
0.1 usec units. This limits the platform specific units to the initial
setup code for the most part.

v2: Write the constants as '<usec> * 10' for clarity
    Fix up the SKL+ wm calculations to match the .1 usec latency units

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c |  12 --
 drivers/gpu/drm/i915/i915_drv.h     |  14 +--
 drivers/gpu/drm/i915/intel_pm.c     | 166 ++++++++++++++++------------
 3 files changed, 95 insertions(+), 97 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5cadfcd03ea9..189402f7164f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3806,18 +3806,6 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 	for (level = 0; level < num_levels; level++) {
 		unsigned int latency = wm[level];
 
-		/*
-		 * - WM1+ latency values in 0.5us units
-		 * - latencies are in us on gen9/vlv/chv
-		 */
-		if (INTEL_GEN(dev_priv) >= 9 ||
-		    IS_VALLEYVIEW(dev_priv) ||
-		    IS_CHERRYVIEW(dev_priv) ||
-		    IS_G4X(dev_priv))
-			latency *= 10;
-		else if (level > 0)
-			latency *= 5;
-
 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
 			   level, wm[level], latency / 10, latency % 10);
 	}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2d7761b8ac07..2c6141590f2f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1888,22 +1888,10 @@ struct drm_i915_private {
 	} sagv_status;
 
 	struct {
-		/*
-		 * Raw watermark latency values:
-		 * in 0.1us units for WM0,
-		 * in 0.5us units for WM1+.
-		 */
-		/* primary */
+		/* Watermark memory latency values in 0.1 us units */
 		uint16_t pri_latency[5];
-		/* sprite */
 		uint16_t spr_latency[5];
-		/* cursor */
 		uint16_t cur_latency[5];
-		/*
-		 * Raw watermark memory latency values
-		 * for SKL for all 8 levels
-		 * in 1us units.
-		 */
 		uint16_t skl_latency[8];
 
 		/* current hardware state */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 688298cf1aaf..bdd47f74fc74 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -801,6 +801,27 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
 	return dev_priv->wm.max_level + 1;
 }
 
+static int intel_plane_wm_latency(struct intel_plane *plane,
+				  int level)
+{
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+	if (INTEL_GEN(dev_priv) >= 9)
+		return dev_priv->wm.skl_latency[level];
+
+	if (HAS_GMCH_DISPLAY(dev_priv))
+		return dev_priv->wm.pri_latency[level];
+
+	switch (plane->id) {
+	case PLANE_PRIMARY:
+		return dev_priv->wm.pri_latency[level];
+	case PLANE_CURSOR:
+		return dev_priv->wm.cur_latency[level];
+	default:
+		return dev_priv->wm.spr_latency[level];
+	}
+}
+
 static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
 				   const struct intel_plane_state *plane_state)
 {
@@ -1039,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
 
 static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	/* all latencies in usec */
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5;
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12;
-	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35;
+	/* all latencies in .1 usec */
+	dev_priv->wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5 * 10;
+	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12 * 10;
+	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35 * 10;
 
 	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
 }
@@ -1097,7 +1118,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
-	unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
+	int latency = intel_plane_wm_latency(plane, level);
 	unsigned int clock, htotal, cpp, width, wm;
 
 	if (latency == 0)
@@ -1586,14 +1607,14 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
 
 static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
-	/* all latencies in usec */
-	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+	/* all latencies in .1 usec */
+	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3 * 10;
 
 	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
 
 	if (IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
-		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12 * 10;
+		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33 * 10;
 
 		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
 	}
@@ -1604,12 +1625,12 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
 				     int level)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	const struct drm_display_mode *adjusted_mode =
 		&crtc_state->base.adjusted_mode;
+	int latency = intel_plane_wm_latency(plane, level);
 	unsigned int clock, htotal, cpp, width, wm;
 
-	if (dev_priv->wm.pri_latency[level] == 0)
+	if (latency == 0)
 		return USHRT_MAX;
 
 	if (!intel_wm_plane_visible(crtc_state, plane_state))
@@ -1629,8 +1650,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
 		 */
 		wm = 63;
 	} else {
-		wm = vlv_wm_method2(clock, htotal, width, cpp,
-				    dev_priv->wm.pri_latency[level] * 10);
+		wm = vlv_wm_method2(clock, htotal, width, cpp, latency);
 	}
 
 	return min_t(unsigned int, wm, USHRT_MAX);
@@ -2481,15 +2501,12 @@ struct ilk_wm_maximums {
 	uint16_t fbc;
 };
 
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 				   const struct intel_plane_state *pstate,
-				   uint32_t mem_value,
-				   bool is_lp)
+				   int level)
 {
+	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	int latency = intel_plane_wm_latency(plane, level);
 	uint32_t method1, method2;
 	int cpp;
 
@@ -2498,27 +2515,25 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
 
 	cpp = pstate->base.fb->format->cpp[0];
 
-	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, latency);
 
-	if (!is_lp)
+	if (level == 0)
 		return method1;
 
 	method2 = ilk_wm_method2(cstate->pixel_rate,
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 drm_rect_width(&pstate->base.dst),
-				 cpp, mem_value);
+				 cpp, latency);
 
 	return min(method1, method2);
 }
 
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
 				   const struct intel_plane_state *pstate,
-				   uint32_t mem_value)
+				   int level)
 {
+	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	int latency = intel_plane_wm_latency(plane, level);
 	uint32_t method1, method2;
 	int cpp;
 
@@ -2527,22 +2542,20 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
 
 	cpp = pstate->base.fb->format->cpp[0];
 
-	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+	method1 = ilk_wm_method1(cstate->pixel_rate, cpp, latency);
 	method2 = ilk_wm_method2(cstate->pixel_rate,
 				 cstate->base.adjusted_mode.crtc_htotal,
 				 drm_rect_width(&pstate->base.dst),
-				 cpp, mem_value);
+				 cpp, latency);
 	return min(method1, method2);
 }
 
-/*
- * For both WM_PIPE and WM_LP.
- * mem_value must be in 0.1us units.
- */
 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 				   const struct intel_plane_state *pstate,
-				   uint32_t mem_value)
+				   int level)
 {
+	struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+	int latency = intel_plane_wm_latency(plane, level);
 	int cpp;
 
 	if (!intel_wm_plane_visible(cstate, pstate))
@@ -2552,7 +2565,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 
 	return ilk_wm_method2(cstate->pixel_rate,
 			      cstate->base.adjusted_mode.crtc_htotal,
-			      pstate->base.crtc_w, cpp, mem_value);
+			      pstate->base.crtc_w, cpp, latency);
 }
 
 /* Only for WM_LP. */
@@ -2743,28 +2756,16 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 				 const struct intel_plane_state *curstate,
 				 struct intel_wm_level *result)
 {
-	uint16_t pri_latency = dev_priv->wm.pri_latency[level];
-	uint16_t spr_latency = dev_priv->wm.spr_latency[level];
-	uint16_t cur_latency = dev_priv->wm.cur_latency[level];
-
-	/* WM1+ latency values stored in 0.5us units */
-	if (level > 0) {
-		pri_latency *= 5;
-		spr_latency *= 5;
-		cur_latency *= 5;
-	}
-
 	if (pristate) {
-		result->pri_val = ilk_compute_pri_wm(cstate, pristate,
-						     pri_latency, level);
+		result->pri_val = ilk_compute_pri_wm(cstate, pristate, level);
 		result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
 	}
 
 	if (sprstate)
-		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+		result->spr_val = ilk_compute_spr_wm(cstate, sprstate, level);
 
 	if (curstate)
-		result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+		result->cur_val = ilk_compute_cur_wm(cstate, curstate, level);
 
 	result->enable = true;
 }
@@ -2929,6 +2930,16 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
 		wm[0] = 13;
 }
 
+static void ilk_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
+				       u16 wm[5])
+{
+	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+	/* convert .5 usec to .1 usec units */
+	for (level = 1; level < num_levels; level++)
+		wm[level] *= 5;
+}
+
 int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
 {
 	/* how many WM levels are we expecting */
@@ -2957,15 +2968,6 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 			continue;
 		}
 
-		/*
-		 * - latencies are in us on gen9.
-		 * - before then, WM1+ latency values are in 0.5us units
-		 */
-		if (INTEL_GEN(dev_priv) >= 9)
-			latency *= 10;
-		else if (level > 0)
-			latency *= 5;
-
 		DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
 			      name, level, wm[level],
 			      latency / 10, latency % 10);
@@ -2973,16 +2975,19 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 }
 
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
-				    uint16_t wm[5], uint16_t min)
+				    u16 wm[5], u16 min)
 {
 	int level, max_level = ilk_wm_max_level(dev_priv);
 
 	if (wm[0] >= min)
 		return false;
 
-	wm[0] = max(wm[0], min);
+	wm[0] = min;
+
+	/* WM1+ latencies must be multiples of .5 usec */
+	min = roundup(min, 5);
 	for (level = 1; level <= max_level; level++)
-		wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+		wm[level] = max(wm[level], min);
 
 	return true;
 }
@@ -3012,6 +3017,8 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
+	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
@@ -3028,9 +3035,22 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 		snb_wm_latency_quirk(dev_priv);
 }
 
+static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
+				       u16 wm[8])
+{
+	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+	/* convert usec to .1 usec units */
+	for (level = 0; level < num_levels; level++)
+		wm[level] *= 10;
+}
+
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	intel_read_wm_latency(dev_priv, dev_priv->wm.skl_latency);
+
+	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.skl_latency);
+
 	intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
 }
 
@@ -3302,7 +3322,8 @@ static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		return 2 * level;
 	else
-		return dev_priv->wm.pri_latency[level];
+		/* specified in .5 usec units */
+		return dev_priv->wm.pri_latency[level] / 5;
 }
 
 static void ilk_compute_wm_results(struct drm_device *dev,
@@ -3762,19 +3783,19 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 		     !wm->wm[level].plane_en; --level)
 		     { }
 
-		latency = dev_priv->wm.skl_latency[level];
+		latency = intel_plane_wm_latency(plane, level);
 
 		if (skl_needs_memory_bw_wa(intel_state) &&
 		    plane->base.state->fb->modifier ==
 		    I915_FORMAT_MOD_X_TILED)
-			latency += 15;
+			latency += 15 * 10;
 
 		/*
 		 * If any of the planes on this pipe don't enable wm levels that
 		 * incur memory latencies higher than sagv_block_time_us we
 		 * can't enable the SAGV.
 		 */
-		if (latency < sagv_block_time_us)
+		if (latency < sagv_block_time_us * 10)
 			return false;
 	}
 
@@ -4514,7 +4535,7 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
 		return FP_16_16_MAX;
 
 	wm_intermediate_val = latency * pixel_rate * cpp;
-	ret = div_fixed16(wm_intermediate_val, 1000 * dbuf_block_size);
+	ret = div_fixed16(wm_intermediate_val, 10000 * dbuf_block_size);
 
 	if (INTEL_GEN(dev_priv) >= 10)
 		ret = add_fixed16_u32(ret, 1);
@@ -4535,7 +4556,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
 
 	wm_intermediate_val = latency * pixel_rate;
 	wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
-					   pipe_htotal * 1000);
+					   pipe_htotal * 10000);
 	ret = mul_u32_fixed16(wm_intermediate_val, plane_blocks_per_line);
 	return ret;
 }
@@ -4701,7 +4722,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				struct skl_wm_level *result /* out */)
 {
 	const struct drm_plane_state *pstate = &intel_pstate->base;
-	uint32_t latency = dev_priv->wm.skl_latency[level];
+	uint32_t latency = intel_plane_wm_latency(to_intel_plane(pstate->plane),
+						  level);
 	uint_fixed_16_16_t method1, method2;
 	uint_fixed_16_16_t selected_result;
 	uint32_t res_blocks, res_lines;
@@ -4720,10 +4742,10 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 	if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
 	    IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) &&
 	    dev_priv->ipc_enabled)
-		latency += 4;
+		latency += 4 * 10;
 
 	if (apply_memory_bw_wa && wp->x_tiled)
-		latency += 15;
+		latency += 15 * 10;
 
 	method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
 				 wp->cpp, latency, wp->dbuf_block_size);
@@ -4746,7 +4768,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
 				selected_result = min_fixed16(method1, method2);
 			else
 				selected_result = method2;
-		} else if (latency >= wp->linetime_us) {
+		} else if (latency >= wp->linetime_us * 10) {
 			if (INTEL_GEN(dev_priv) == 9 &&
 			    !IS_GEMINILAKE(dev_priv))
 				selected_result = min_fixed16(method1, method2);
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev2)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (14 preceding siblings ...)
  2018-10-10 14:50 ` ✗ Fi.CI.BAT: failure " Patchwork
@ 2018-10-26 18:17 ` Patchwork
  2018-10-26 18:37 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev3) Patchwork
                   ` (5 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-26 18:17 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev2)
URL   : https://patchwork.freedesktop.org/series/50802/
State : failure

== Summary ==

Applying: drm/i915: Store all wm memory latency values in .1 usec units
Applying: drm/i915: Use the spr/cur latencies on vlv/chv/g4x
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_debugfs.c
M	drivers/gpu/drm/i915/intel_pm.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_pm.c
Auto-merging drivers/gpu/drm/i915/i915_debugfs.c
Applying: drm/i915: Eliminate skl_latency[]
Applying: drm/i915: Add dev_priv->wm.num_levels and use it everywhere
error: sha1 information is lacking or useless (drivers/gpu/drm/i915/i915_drv.h).
error: could not build fake ancestor
Patch failed at 0004 drm/i915: Add dev_priv->wm.num_levels and use it everywhere
Use 'git am --show-current-patch' to see the failed patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 04/12] drm/i915: Add dev_priv->wm.num_levels and use it everywhere
  2018-10-10 13:04 ` [PATCH 04/12] drm/i915: Add dev_priv->wm.num_levels and use it everywhere Ville Syrjala
@ 2018-10-26 18:27   ` Ville Syrjala
  0 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-26 18:27 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Unify our approach to figuring out how many wm levels are supported by
having dev_priv->wm.num_levels. This replaces the older
dev_priv->wm.max_level which was used on some of the platforms. I think
num_levels is less confusing than max_level in most places. The +/-1 is
now mostly isolated to the mempry latency init code.

v2: Rebase

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |  24 +----
 drivers/gpu/drm/i915/i915_drv.h      |   4 +-
 drivers/gpu/drm/i915/intel_display.c |   6 +-
 drivers/gpu/drm/i915/intel_pm.c      | 131 +++++++++++++--------------
 4 files changed, 69 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8fd783aa226e..d3250c674eb8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3789,17 +3789,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
 {
 	struct drm_i915_private *dev_priv = m->private;
 	struct drm_device *dev = &dev_priv->drm;
-	int level;
-	int num_levels;
-
-	if (IS_CHERRYVIEW(dev_priv))
-		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
-		num_levels = 1;
-	else if (IS_G4X(dev_priv))
-		num_levels = 3;
-	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	drm_modeset_lock_all(dev);
 
@@ -3880,20 +3870,10 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
 	struct drm_i915_private *dev_priv = m->private;
 	struct drm_device *dev = &dev_priv->drm;
 	uint16_t new[8] = { 0 };
-	int num_levels;
-	int level;
+	int level, num_levels = dev_priv->wm.num_levels;
 	int ret;
 	char tmp[32];
 
-	if (IS_CHERRYVIEW(dev_priv))
-		num_levels = 3;
-	else if (IS_VALLEYVIEW(dev_priv))
-		num_levels = 1;
-	else if (IS_G4X(dev_priv))
-		num_levels = 3;
-	else
-		num_levels = ilk_wm_max_level(dev_priv) + 1;
-
 	if (len >= sizeof(tmp))
 		return -EINVAL;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 406f4123eab6..e6d6ec2e0e71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1902,8 +1902,6 @@ struct drm_i915_private {
 			struct g4x_wm_values g4x;
 		};
 
-		uint8_t max_level;
-
 		/*
 		 * Should be held around atomic WM register writing; also
 		 * protects * intel_crtc->wm.active and
@@ -1911,6 +1909,8 @@ struct drm_i915_private {
 		 */
 		struct mutex wm_mutex;
 
+		u8 num_levels;
+
 		/*
 		 * Set during HW readout of watermarks/DDB.  Some platforms
 		 * need to know when we're still using BIOS-provided values
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fe045abb6472..f4dd297d4804 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11802,7 +11802,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	const enum pipe pipe = intel_crtc->pipe;
-	int plane, level, max_level = ilk_wm_max_level(dev_priv);
+	int plane, level, num_levels = dev_priv->wm.num_levels;
 
 	if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
 		return;
@@ -11824,7 +11824,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		sw_plane_wm = &sw_wm->planes[plane];
 
 		/* Watermarks */
-		for (level = 0; level <= max_level; level++) {
+		for (level = 0; level < num_levels; level++) {
 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
 						&sw_plane_wm->wm[level]))
 				continue;
@@ -11874,7 +11874,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
 		sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
 
 		/* Watermarks */
-		for (level = 0; level <= max_level; level++) {
+		for (level = 0; level < num_levels; level++) {
 			if (skl_wm_level_equals(&hw_plane_wm->wm[level],
 						&sw_plane_wm->wm[level]))
 				continue;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 22cbb8e25f5b..4ffaa17fc4e0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -796,23 +796,18 @@ static bool is_enabling(int old, int new, int threshold)
 	return old < threshold && new >= threshold;
 }
 
-static int intel_wm_num_levels(struct drm_i915_private *dev_priv)
-{
-	return dev_priv->wm.max_level + 1;
-}
-
 static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
 				   const char *name,
 				   const uint16_t wm[8])
 {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		unsigned int latency = wm[level];
 
 		if (latency == 0) {
-			DRM_DEBUG_KMS("%s WM%d latency not provided\n",
-				      name, level);
+			DRM_ERROR("%s WM%d latency not provided\n",
+				  name, level);
 			continue;
 		}
 
@@ -1080,7 +1075,7 @@ static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv)
 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_SR] = 12 * 10;
 	dev_priv->wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35 * 10;
 
-	dev_priv->wm.max_level = G4X_WM_LEVEL_HPLL;
+	dev_priv->wm.num_levels = G4X_WM_LEVEL_HPLL + 1;
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
@@ -1204,7 +1199,7 @@ static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 	bool dirty = false;
 
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < dev_priv->wm.num_levels; level++) {
 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
 
 		dirty |= raw->plane[plane_id] != value;
@@ -1223,7 +1218,7 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
 	/* NORMAL level doesn't have an FBC watermark */
 	level = max(level, G4X_WM_LEVEL_SR);
 
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < dev_priv->wm.num_levels; level++) {
 		struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level];
 
 		dirty |= raw->fbc != value;
@@ -1241,7 +1236,8 @@ static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 				     const struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
-	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+	int num_levels = dev_priv->wm.num_levels;
 	enum plane_id plane_id = plane->id;
 	bool dirty = false;
 	int level;
@@ -1321,7 +1317,7 @@ static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state,
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
 
-	if (level > dev_priv->wm.max_level)
+	if (level >= dev_priv->wm.num_levels)
 		return false;
 
 	return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
@@ -1634,13 +1630,13 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
 	/* all latencies in .1 usec */
 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3 * 10;
 
-	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+	dev_priv->wm.num_levels = VLV_WM_LEVEL_PM2 + 1;
 
 	if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12 * 10;
 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33 * 10;
 
-		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
+		dev_priv->wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1;
 	}
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
@@ -1783,7 +1779,7 @@ static void vlv_invalidate_wms(struct intel_crtc *crtc,
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
 
-	for (; level < intel_wm_num_levels(dev_priv); level++) {
+	for (; level < dev_priv->wm.num_levels; level++) {
 		enum plane_id plane_id;
 
 		for_each_plane_id_on_crtc(crtc, plane_id)
@@ -1810,7 +1806,7 @@ static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
 				 int level, enum plane_id plane_id, u16 value)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-	int num_levels = intel_wm_num_levels(dev_priv);
+	int num_levels = dev_priv->wm.num_levels;
 	bool dirty = false;
 
 	for (; level < num_levels; level++) {
@@ -1827,8 +1823,9 @@ static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
 				     const struct intel_plane_state *plane_state)
 {
 	struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
 	enum plane_id plane_id = plane->id;
-	int num_levels = intel_wm_num_levels(to_i915(plane->base.dev));
+	int num_levels = dev_priv->wm.num_levels;
 	int level;
 	bool dirty = false;
 
@@ -1942,7 +1939,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
 	}
 
 	/* initially allow all levels */
-	wm_state->num_levels = intel_wm_num_levels(dev_priv);
+	wm_state->num_levels = dev_priv->wm.num_levels;
 	/*
 	 * Note that enabling cxsr with no primary/sprite planes
 	 * enabled can wedge the pipe. Hence we only allow cxsr
@@ -2143,7 +2140,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
 	struct intel_crtc *crtc;
 	int num_active_crtcs = 0;
 
-	wm->level = dev_priv->wm.max_level;
+	wm->level = dev_priv->wm.num_levels - 1;
 	wm->cxsr = true;
 
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -2663,7 +2660,7 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
 }
 
 /* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+static unsigned int ilk_plane_wm_max(struct drm_device *dev,
 				     int level,
 				     const struct intel_wm_config *config,
 				     enum intel_ddb_partitioning ddb_partitioning,
@@ -2705,7 +2702,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
 }
 
 /* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+static unsigned int ilk_cursor_wm_max(struct drm_device *dev,
 				      int level,
 				      const struct intel_wm_config *config)
 {
@@ -2717,7 +2714,7 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
 	return ilk_cursor_wm_reg_max(to_i915(dev), level);
 }
 
-static void ilk_compute_wm_maximums(const struct drm_device *dev,
+static void ilk_compute_wm_maximums(struct drm_device *dev,
 				    int level,
 				    const struct intel_wm_config *config,
 				    enum intel_ddb_partitioning ddb_partitioning,
@@ -2837,7 +2834,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 9) {
 		uint32_t val;
 		int ret, i;
-		int level, max_level = ilk_wm_max_level(dev_priv);
+		int level, num_levels = dev_priv->wm.num_levels;
 
 		/* read the first set of memory latencies[0:3] */
 		val = 0; /* data0 to be programmed to 0 for first set */
@@ -2885,9 +2882,9 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		 * need to be disabled. We make sure to sanitize the values out
 		 * of the punit to satisfy this requirement.
 		 */
-		for (level = 1; level <= max_level; level++) {
+		for (level = 1; level < num_levels; level++) {
 			if (wm[level] == 0) {
-				for (i = level + 1; i <= max_level; i++)
+				for (i = level + 1; i < num_levels; i++)
 					wm[i] = 0;
 				break;
 			}
@@ -2902,7 +2899,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		 */
 		if (wm[0] == 0) {
 			wm[0] += 2;
-			for (level = 1; level <= max_level; level++) {
+			for (level = 1; level < num_levels; level++) {
 				if (wm[level] == 0)
 					break;
 				wm[level] += 2;
@@ -2966,30 +2963,17 @@ static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
 static void ilk_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[5])
 {
-	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	/* convert .5 usec to .1 usec units */
 	for (level = 1; level < num_levels; level++)
 		wm[level] *= 5;
 }
 
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
-{
-	/* how many WM levels are we expecting */
-	if (INTEL_GEN(dev_priv) >= 9)
-		return 7;
-	else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-		return 4;
-	else if (INTEL_GEN(dev_priv) >= 6)
-		return 3;
-	else
-		return 2;
-}
-
 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 				    u16 wm[5], u16 min)
 {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	if (wm[0] >= min)
 		return false;
@@ -2998,7 +2982,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
 
 	/* WM1+ latencies must be multiples of .5 usec */
 	min = roundup(min, 5);
-	for (level = 1; level <= max_level; level++)
+	for (level = 1; level < num_levels; level++)
 		wm[level] = max(wm[level], min);
 
 	return true;
@@ -3027,6 +3011,13 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
 
 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+		dev_priv->wm.num_levels = 5;
+	else if (INTEL_GEN(dev_priv) >= 6)
+		dev_priv->wm.num_levels = 4;
+	else
+		dev_priv->wm.num_levels = 3;
+
 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
 	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
@@ -3050,7 +3041,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[8])
 {
-	int level, num_levels = ilk_wm_max_level(dev_priv) + 1;
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	/* convert usec to .1 usec units */
 	for (level = 0; level < num_levels; level++)
@@ -3059,6 +3050,8 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
+	dev_priv->wm.num_levels = 8;
+
 	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 
 	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
@@ -3109,7 +3102,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 	const struct intel_plane_state *pristate = NULL;
 	const struct intel_plane_state *sprstate = NULL;
 	const struct intel_plane_state *curstate = NULL;
-	int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
+	int level, num_levels = dev_priv->wm.num_levels, usable_level;
 	struct ilk_wm_maximums max;
 
 	pipe_wm = &cstate->wm.ilk.optimal;
@@ -3133,7 +3126,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
 			 drm_rect_height(&sprstate->base.dst) != drm_rect_height(&sprstate->base.src) >> 16);
 	}
 
-	usable_level = max_level;
+	usable_level = num_levels - 1;
 
 	/* ILK/SNB: LP2+ watermarks only w/o sprites */
 	if (INTEL_GEN(dev_priv) <= 6 && pipe_wm->sprites_enabled)
@@ -3184,13 +3177,14 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 				       struct intel_crtc *intel_crtc,
 				       struct intel_crtc_state *newstate)
 {
+	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
 	struct intel_atomic_state *intel_state =
 		to_intel_atomic_state(newstate->base.state);
 	const struct intel_crtc_state *oldstate =
 		intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
 	const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
-	int level, max_level = ilk_wm_max_level(to_i915(dev));
+	int level, num_levels = dev_priv->wm.num_levels;
 
 	/*
 	 * Start with the final, target watermarks, then combine with the
@@ -3205,7 +3199,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
 	a->sprites_enabled |= b->sprites_enabled;
 	a->sprites_scaled |= b->sprites_scaled;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		struct intel_wm_level *a_wm = &a->wm[level];
 		const struct intel_wm_level *b_wm = &b->wm[level];
 
@@ -3277,8 +3271,8 @@ static void ilk_wm_merge(struct drm_device *dev,
 			 struct intel_pipe_wm *merged)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
-	int last_enabled_level = max_level;
+	int level, num_levels = dev_priv->wm.num_levels;
+	int last_enabled_level = num_levels - 1;
 
 	/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
 	if ((INTEL_GEN(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
@@ -3289,7 +3283,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 	merged->fbc_wm_enabled = INTEL_GEN(dev_priv) >= 6;
 
 	/* merge each WM1+ level */
-	for (level = 1; level <= max_level; level++) {
+	for (level = 1; level < num_levels; level++) {
 		struct intel_wm_level *wm = &merged->wm[level];
 
 		ilk_merge_wm_level(dev, level, wm);
@@ -3319,7 +3313,7 @@ static void ilk_wm_merge(struct drm_device *dev,
 	 */
 	if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
 	    intel_fbc_is_active(dev_priv)) {
-		for (level = 2; level <= max_level; level++) {
+		for (level = 2; level < num_levels; level++) {
 			struct intel_wm_level *wm = &merged->wm[level];
 
 			wm->enable = false;
@@ -3419,10 +3413,11 @@ static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
 						  struct intel_pipe_wm *r1,
 						  struct intel_pipe_wm *r2)
 {
-	int level, max_level = ilk_wm_max_level(to_i915(dev));
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	int level, num_levels = dev_priv->wm.num_levels;
 	int level1 = 0, level2 = 0;
 
-	for (level = 1; level <= max_level; level++) {
+	for (level = 1; level < num_levels; level++) {
 		if (r1->wm[level].enable)
 			level1 = level;
 		if (r2->wm[level].enable)
@@ -3798,7 +3793,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
 			continue;
 
 		/* Find the highest enabled wm level for this plane */
-		for (level = ilk_wm_max_level(dev_priv);
+		for (level = dev_priv->wm.num_levels - 1;
 		     !wm->wm[level].plane_en; --level)
 		     { }
 
@@ -4900,14 +4895,14 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
 		      struct skl_plane_wm *wm,
 		      struct skl_wm_level *levels)
 {
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 	struct skl_wm_level *result_prev = &levels[0];
 	int ret;
 
 	if (WARN_ON(!intel_pstate->base.fb))
 		return -EINVAL;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		struct skl_wm_level *result = &levels[level];
 
 		ret = skl_compute_plane_wm(dev_priv,
@@ -5181,10 +5176,10 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum pipe pipe = intel_crtc->pipe;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
 				   &wm->wm[level]);
 	}
@@ -5212,10 +5207,10 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
 	struct drm_crtc *crtc = &intel_crtc->base;
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	int level, max_level = ilk_wm_max_level(dev_priv);
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum pipe pipe = intel_crtc->pipe;
 
-	for (level = 0; level <= max_level; level++) {
+	for (level = 0; level < num_levels; level++) {
 		skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
 				   &wm->wm[level]);
 	}
@@ -5691,16 +5686,14 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	enum pipe pipe = intel_crtc->pipe;
-	int level, max_level;
+	int level, num_levels = dev_priv->wm.num_levels;
 	enum plane_id plane_id;
 	uint32_t val;
 
-	max_level = ilk_wm_max_level(dev_priv);
-
 	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
 		struct skl_plane_wm *wm = &out->planes[plane_id];
 
-		for (level = 0; level <= max_level; level++) {
+		for (level = 0; level < num_levels; level++) {
 			if (plane_id != PLANE_CURSOR)
 				val = I915_READ(PLANE_WM(pipe, plane_id, level));
 			else
@@ -5794,14 +5787,14 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
 		active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
 		active->linetime = hw->wm_linetime[pipe];
 	} else {
-		int level, max_level = ilk_wm_max_level(dev_priv);
+		int level, num_levels = dev_priv->wm.num_levels;
 
 		/*
 		 * For inactive pipes, all watermark levels
 		 * should be marked as enabled but zeroed,
 		 * which is what we'd compute them to.
 		 */
-		for (level = 0; level <= max_level; level++)
+		for (level = 0; level < num_levels; level++)
 			active->wm[level].enable = true;
 	}
 
@@ -6092,7 +6085,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
 			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
 			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
 				      "assuming DDR DVFS is disabled\n");
-			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+			dev_priv->wm.num_levels = VLV_WM_LEVEL_PM5 + 1;
 		} else {
 			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
 			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev3)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (15 preceding siblings ...)
  2018-10-26 18:17 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev2) Patchwork
@ 2018-10-26 18:37 ` Patchwork
  2018-10-26 19:01 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev4) Patchwork
                   ` (4 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-26 18:37 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev3)
URL   : https://patchwork.freedesktop.org/series/50802/
State : failure

== Summary ==

Applying: drm/i915: Store all wm memory latency values in .1 usec units
Applying: drm/i915: Use the spr/cur latencies on vlv/chv/g4x
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_debugfs.c
M	drivers/gpu/drm/i915/intel_pm.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_pm.c
Auto-merging drivers/gpu/drm/i915/i915_debugfs.c
Applying: drm/i915: Eliminate skl_latency[]
Applying: drm/i915: Add dev_priv->wm.num_levels and use it everywhere
Applying: drm/i915: Add DEFINE_SNPRINTF_ARRAY()
Applying: drm/i915: Make the WM memory latency print more compact
Applying: drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
Applying: drm/i915: Split skl+ and ilk+ read_wm_latency()
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/intel_pm.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_pm.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/intel_pm.c
error: Failed to merge in the changes.
Patch failed at 0008 drm/i915: Split skl+ and ilk+ read_wm_latency()
Use 'git am --show-current-patch' to see the failed patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency()
  2018-10-10 13:04 ` [PATCH 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency() Ville Syrjala
@ 2018-10-26 18:45   ` Ville Syrjala
  0 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-26 18:45 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

There's no point it having the skl+ and ilk+ codepaths for reading
the wm latency values in the same function. Split them apart.

v2: <usec> * 10

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 195 +++++++++++++++++---------------
 1 file changed, 103 insertions(+), 92 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 74ac9c98e6d0..8820c59c56e4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,94 +2825,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
-static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
-				  uint16_t wm[8])
+static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
+				u16 wm[5])
 {
-	if (INTEL_GEN(dev_priv) >= 9) {
-		uint32_t val;
-		int ret, i;
-		int level, num_levels = dev_priv->wm.num_levels;
-
-		/* read the first set of memory latencies[0:3] */
-		val = 0; /* data0 to be programmed to 0 for first set */
-		mutex_lock(&dev_priv->pcu_lock);
-		ret = sandybridge_pcode_read(dev_priv,
-					     GEN9_PCODE_READ_MEM_LATENCY,
-					     &val);
-		mutex_unlock(&dev_priv->pcu_lock);
-
-		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-			return;
-		}
-
-		wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-
-		/* read the second set of memory latencies[4:7] */
-		val = 1; /* data0 to be programmed to 1 for second set */
-		mutex_lock(&dev_priv->pcu_lock);
-		ret = sandybridge_pcode_read(dev_priv,
-					     GEN9_PCODE_READ_MEM_LATENCY,
-					     &val);
-		mutex_unlock(&dev_priv->pcu_lock);
-		if (ret) {
-			DRM_ERROR("SKL Mailbox read error = %d\n", ret);
-			return;
-		}
-
-		wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-		wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
-				GEN9_MEM_LATENCY_LEVEL_MASK;
-
-		/*
-		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-		 * need to be disabled. We make sure to sanitize the values out
-		 * of the punit to satisfy this requirement.
-		 */
-		for (level = 1; level < num_levels; level++) {
-			if (wm[level] == 0) {
-				for (i = level + 1; i < num_levels; i++)
-					wm[i] = 0;
-				break;
-			}
-		}
-
-		/*
-		 * WaWmMemoryReadLatency:skl+,glk
-		 *
-		 * punit doesn't take into account the read latency so we need
-		 * to add 2us to the various latency levels we retrieve from the
-		 * punit when level 0 response data us 0us.
-		 */
-		if (wm[0] == 0) {
-			wm[0] += 2;
-			for (level = 1; level < num_levels; level++) {
-				if (wm[level] == 0)
-					break;
-				wm[level] += 2;
-			}
-		}
-
-		/*
-		 * WA Level-0 adjustment for 16GB DIMMs: SKL+
-		 * If we could not get dimm info enable this WA to prevent from
-		 * any underrun. If not able to get Dimm info assume 16GB dimm
-		 * to avoid any underrun.
-		 */
-		if (dev_priv->dram_info.is_16gb_dimm)
-			wm[0] += 1;
-
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
 
 		wm[0] = (sskpd >> 56) & 0xFF;
@@ -2929,7 +2845,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
 		wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
 		wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
 		wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
-	} else if (INTEL_GEN(dev_priv) >= 5) {
+	} else if (IS_GEN5(dev_priv)) {
 		uint32_t mltr = I915_READ(MLTR_ILK);
 
 		/* ILK primary LP0 latency is 700 ns */
@@ -3007,8 +2923,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	else
 		dev_priv->wm.num_levels = 3;
 
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
-
+	ilk_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	ilk_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
@@ -3027,6 +2942,68 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 		snb_wm_latency_quirk(dev_priv);
 }
 
+static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
+				u16 wm[8])
+{
+	int level, num_levels = dev_priv->wm.num_levels;
+	int ret, i;
+	u32 val;
+
+	/* read the first set of memory latencies[0:3] */
+	val = 0; /* data0 to be programmed to 0 for first set */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_read(dev_priv,
+				     GEN9_PCODE_READ_MEM_LATENCY,
+				     &val);
+	mutex_unlock(&dev_priv->pcu_lock);
+
+	if (ret) {
+		DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+		return;
+	}
+
+	wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+
+	/* read the second set of memory latencies[4:7] */
+	val = 1; /* data0 to be programmed to 1 for second set */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_read(dev_priv,
+				     GEN9_PCODE_READ_MEM_LATENCY,
+				     &val);
+	mutex_unlock(&dev_priv->pcu_lock);
+	if (ret) {
+		DRM_ERROR("SKL Mailbox read error = %d\n", ret);
+		return;
+	}
+
+	wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+	wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+		GEN9_MEM_LATENCY_LEVEL_MASK;
+
+	/*
+	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
+	 * need to be disabled. We make sure to sanitize the values out
+	 * of the punit to satisfy this requirement.
+	 */
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] == 0) {
+			for (i = level + 1; i < num_levels; i++)
+				wm[i] = 0;
+			break;
+		}
+	}
+}
+
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 				       u16 wm[8])
 {
@@ -3037,14 +3014,48 @@ static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
 		wm[level] *= 10;
 }
 
+static void skl_wm_latency_wa(struct drm_i915_private *dev_priv,
+			      u16 wm[8])
+{
+	/*
+	 * WaWmMemoryReadLatency:skl,glk
+	 *
+	 * punit doesn't take into account the read latency so we need
+	 * to add 2us to the various latency levels we retrieve from the
+	 * punit when level 0 response data us 0us.
+	 */
+	if (wm[0] == 0) {
+		int level, num_levels = dev_priv->wm.num_levels;
+
+		wm[0] += 2 * 10;
+
+		for (level = 1; level < num_levels; level++) {
+			if (wm[level] == 0)
+				break;
+
+			wm[level] += 2 * 10;
+		}
+	}
+
+	/*
+	 * WA Level-0 adjustment for 16GB DIMMs: SKL+
+	 * If we could not get dimm info enable this WA to prevent from
+	 * any underrun. If not able to get Dimm info assume 16GB dimm
+	 * to avoid any underrun.
+	 */
+	if (dev_priv->dram_info.is_16gb_dimm)
+		wm[0] += 1 * 10;
+}
+
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
 	dev_priv->wm.num_levels = 8;
 
-	intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
-
+	skl_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
 	skl_fixup_wm_latency_units(dev_priv, dev_priv->wm.pri_latency);
 
+	skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev4)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (16 preceding siblings ...)
  2018-10-26 18:37 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev3) Patchwork
@ 2018-10-26 19:01 ` Patchwork
  2018-10-26 19:29 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff (rev5) Patchwork
                   ` (3 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-26 19:01 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev4)
URL   : https://patchwork.freedesktop.org/series/50802/
State : failure

== Summary ==

Applying: drm/i915: Store all wm memory latency values in .1 usec units
Applying: drm/i915: Use the spr/cur latencies on vlv/chv/g4x
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_debugfs.c
M	drivers/gpu/drm/i915/intel_pm.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_pm.c
Auto-merging drivers/gpu/drm/i915/i915_debugfs.c
Applying: drm/i915: Eliminate skl_latency[]
Applying: drm/i915: Add dev_priv->wm.num_levels and use it everywhere
Applying: drm/i915: Add DEFINE_SNPRINTF_ARRAY()
Applying: drm/i915: Make the WM memory latency print more compact
Applying: drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
Applying: drm/i915: Split skl+ and ilk+ read_wm_latency()
Applying: drm/i915: Sanitize wm latency values for ilk+
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/intel_pm.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_pm.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/intel_pm.c
error: Failed to merge in the changes.
Patch failed at 0009 drm/i915: Sanitize wm latency values for ilk+
Use 'git am --show-current-patch' to see the failed patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* [PATCH v2 09/12] drm/i915: Sanitize wm latency values for ilk+
  2018-10-10 13:04 ` [PATCH 09/12] drm/i915: Sanitize wm latency values for ilk+ Ville Syrjala
@ 2018-10-26 19:11   ` Ville Syrjala
  0 siblings, 0 replies; 32+ messages in thread
From: Ville Syrjala @ 2018-10-26 19:11 UTC (permalink / raw)
  To: intel-gfx

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

For skl+ we disable all wm levels with a decreasing memory latency
value. Let's generalize the same code to work for all platoforms,
and let's use it for ilk-bdw as well since those platforms also
read the latency values from a scratch register.

v2: n*10 usec, rebase

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 57 ++++++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8820c59c56e4..c72d3fd71b6f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2825,6 +2825,41 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
 	       PIPE_WM_LINETIME_TIME(linetime);
 }
 
+static void intel_sanitize_wm_latency(struct drm_i915_private *dev_priv,
+				      u16 *wm)
+{
+	int level, num_levels = dev_priv->wm.num_levels;
+
+	/*
+	 * If we don't have WM0 latency, assume
+	 * 5 usec and disable all WM1+ levels.
+	 * 5 usec seems like a safe(ish) fallback value.
+	 */
+	if (WARN(wm[0] == 0, "WM0 memory latency value is zero")) {
+		intel_print_wm_latency(dev_priv, "Bad", wm);
+
+		wm[0] = 5 * 10;
+
+		for (level = 1; level < num_levels; level++)
+			wm[level] = 0;
+		return;
+	}
+
+	/* Make sure the latencies are non-decreasing */
+	for (level = 1; level < num_levels; level++) {
+		if (wm[level] >= wm[level - 1])
+			continue;
+
+		if (WARN(wm[level] != 0,
+			 "Decreasing WM memory latency value(s)"))
+			intel_print_wm_latency(dev_priv, "Bad", wm);
+
+		for (; level < num_levels; level++)
+			wm[level] = 0;
+		break;
+	}
+}
+
 static void ilk_read_wm_latency(struct drm_i915_private *dev_priv,
 				u16 wm[5])
 {
@@ -2934,6 +2969,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.spr_latency);
 	ilk_fixup_spr_cur_wm_latency(dev_priv, dev_priv->wm.cur_latency);
 
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.spr_latency);
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.cur_latency);
+
 	intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
 	intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
 	intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
@@ -2945,8 +2984,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
 				u16 wm[8])
 {
-	int level, num_levels = dev_priv->wm.num_levels;
-	int ret, i;
+	int ret;
 	u32 val;
 
 	/* read the first set of memory latencies[0:3] */
@@ -2989,19 +3027,6 @@ static void skl_read_wm_latency(struct drm_i915_private *dev_priv,
 		GEN9_MEM_LATENCY_LEVEL_MASK;
 	wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
 		GEN9_MEM_LATENCY_LEVEL_MASK;
-
-	/*
-	 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
-	 * need to be disabled. We make sure to sanitize the values out
-	 * of the punit to satisfy this requirement.
-	 */
-	for (level = 1; level < num_levels; level++) {
-		if (wm[level] == 0) {
-			for (i = level + 1; i < num_levels; i++)
-				wm[i] = 0;
-			break;
-		}
-	}
 }
 
 static void skl_fixup_wm_latency_units(struct drm_i915_private *dev_priv,
@@ -3056,6 +3081,8 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
 
 	skl_wm_latency_wa(dev_priv, dev_priv->wm.pri_latency);
 
+	intel_sanitize_wm_latency(dev_priv, dev_priv->wm.pri_latency);
+
 	memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
 	       sizeof(dev_priv->wm.pri_latency));
 	memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
-- 
2.18.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff (rev5)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (17 preceding siblings ...)
  2018-10-26 19:01 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev4) Patchwork
@ 2018-10-26 19:29 ` Patchwork
  2018-10-26 19:34 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (2 subsequent siblings)
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-26 19:29 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev5)
URL   : https://patchwork.freedesktop.org/series/50802/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
220f3eac5592 drm/i915: Store all wm memory latency values in .1 usec units
488b0f8bd38a drm/i915: Use the spr/cur latencies on vlv/chv/g4x
85fb6da04007 drm/i915: Eliminate skl_latency[]
d203e7e82c7a drm/i915: Add dev_priv->wm.num_levels and use it everywhere
1d35686d5190 drm/i915: Add DEFINE_SNPRINTF_ARRAY()
-:23: CHECK:MACRO_ARG_PRECEDENCE: Macro argument 'values' may be better as '(values)' to avoid precedence issues
#23: FILE: drivers/gpu/drm/i915/i915_utils.h:164:
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}

-:23: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'index' - possible side-effects?
#23: FILE: drivers/gpu/drm/i915/i915_utils.h:164:
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}

-:23: WARNING:MACRO_WITH_FLOW_CONTROL: Macros with flow control statements should be avoided
#23: FILE: drivers/gpu/drm/i915/i915_utils.h:164:
+#define DEFINE_SNPRINTF_ARRAY(name, type, values, index, fmt, ...) \
+void name(char *_str, size_t _len, const type *values, int _nelems) \
+{ \
+	int index; \
+	if (_len) \
+		_str[0] = '\0'; \
+	for (index = 0; index < _nelems; index++) { \
+		int _r = snprintf(_str, _len, "%s" fmt, \
+				  index ? ", " : "", __VA_ARGS__); \
+		if (_r >= _len) \
+			return; \
+		_str += _r; \
+		_len -= _r; \
+	} \
+}

-:24: CHECK:SPACING: spaces preferred around that '*' (ctx:WxV)
#24: FILE: drivers/gpu/drm/i915/i915_utils.h:165:
+void name(char *_str, size_t _len, const type *values, int _nelems) \
                                               ^

total: 0 errors, 1 warnings, 3 checks, 43 lines checked
77b004210eef drm/i915: Make the WM memory latency print more compact
6f3fc5d3ba43 drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
10560badd17a drm/i915: Split skl+ and ilk+ read_wm_latency()
9ccb87dce343 drm/i915: Sanitize wm latency values for ilk+
6e843686038c drm/i915: Drop the funky ilk wm setup
d470fc61e2ab drm/i915: Allow LP3 watermarks on ILK
d0ae9981700d drm/i915: Remove the remnants of the ilk+ LP0 wm hack

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* ✗ Fi.CI.SPARSE: warning for drm/i915: Clean up the wm mem latency stuff (rev5)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (18 preceding siblings ...)
  2018-10-26 19:29 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff (rev5) Patchwork
@ 2018-10-26 19:34 ` Patchwork
  2018-10-26 19:47 ` ✓ Fi.CI.BAT: success " Patchwork
  2018-10-27  5:17 ` ✓ Fi.CI.IGT: " Patchwork
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-26 19:34 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev5)
URL   : https://patchwork.freedesktop.org/series/50802/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915: Store all wm memory latency values in .1 usec units
-O:drivers/gpu/drm/i915/intel_pm.c:1636:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:1656:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2511:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2511:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2535:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2535:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2528:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2528:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2550:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2550:16: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2983:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2983:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2985:29: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2985:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2990:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2990:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6733:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6733:35: warning: expression using sizeof(void)
-drivers/gpu/drm/i915/selftests/../i915_drv.h:3707:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/selftests/../i915_drv.h:3695:16: warning: expression using sizeof(void)

Commit: drm/i915: Use the spr/cur latencies on vlv/chv/g4x
Okay!

Commit: drm/i915: Eliminate skl_latency[]
Okay!

Commit: drm/i915: Add dev_priv->wm.num_levels and use it everywhere
-O:drivers/gpu/drm/i915/intel_pm.c:1224:17: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:1219:17: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:3002:29: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:3002:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2986:29: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:2986:29: warning: expression using sizeof(void)

Commit: drm/i915: Add DEFINE_SNPRINTF_ARRAY()
Okay!

Commit: drm/i915: Make the WM memory latency print more compact
Okay!

Commit: drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
Okay!

Commit: drm/i915: Split skl+ and ilk+ read_wm_latency()
Okay!

Commit: drm/i915: Sanitize wm latency values for ilk+
Okay!

Commit: drm/i915: Drop the funky ilk wm setup
Okay!

Commit: drm/i915: Allow LP3 watermarks on ILK
Okay!

Commit: drm/i915: Remove the remnants of the ilk+ LP0 wm hack
-O:drivers/gpu/drm/i915/intel_pm.c:2777:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2777:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2778:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2778:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2779:35: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/intel_pm.c:2779:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6753:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6753:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6753:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6753:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6753:35: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/intel_pm.c:6753:35: warning: expression using sizeof(void)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: Clean up the wm mem latency stuff (rev5)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (19 preceding siblings ...)
  2018-10-26 19:34 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2018-10-26 19:47 ` Patchwork
  2018-10-27  5:17 ` ✓ Fi.CI.IGT: " Patchwork
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-26 19:47 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev5)
URL   : https://patchwork.freedesktop.org/series/50802/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_5044 -> Patchwork_10610 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/50802/revisions/5/mbox/

== Known issues ==

  Here are the changes found in Patchwork_10610 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_flip@basic-flip-vs-wf_vblank:
      fi-glk-j4005:       PASS -> FAIL (fdo#100368)

    igt@kms_flip@basic-plain-flip:
      fi-glk-j4005:       PASS -> DMESG-WARN (fdo#106097)

    
    ==== Possible fixes ====

    igt@gem_exec_suspend@basic-s3:
      fi-blb-e6850:       INCOMPLETE (fdo#107718) -> PASS

    igt@gem_sync@basic-each:
      fi-glk-j4005:       DMESG-WARN (fdo#105719) -> PASS

    igt@kms_flip@basic-flip-vs-modeset:
      fi-glk-j4005:       DMESG-WARN (fdo#106000) -> PASS

    igt@kms_pipe_crc_basic@read-crc-pipe-b-frame-sequence:
      fi-byt-clapper:     FAIL (fdo#107362, fdo#103191) -> PASS

    igt@kms_pipe_crc_basic@read-crc-pipe-c-frame-sequence:
      fi-glk-j4005:       DMESG-FAIL (fdo#106000) -> PASS

    
  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
  fdo#105719 https://bugs.freedesktop.org/show_bug.cgi?id=105719
  fdo#106000 https://bugs.freedesktop.org/show_bug.cgi?id=106000
  fdo#106097 https://bugs.freedesktop.org/show_bug.cgi?id=106097
  fdo#107362 https://bugs.freedesktop.org/show_bug.cgi?id=107362
  fdo#107718 https://bugs.freedesktop.org/show_bug.cgi?id=107718


== Participating hosts (47 -> 43) ==

  Additional (1): fi-gdg-551 
  Missing    (5): fi-ilk-m540 fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 fi-icl-u 


== Build changes ==

    * Linux: CI_DRM_5044 -> Patchwork_10610

  CI_DRM_5044: c4487dca27970879bf67f331614142c749984d65 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4698: af57164fcb16950187ad402ed31f565e88c42a78 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10610: d0ae9981700dd232e2e7bd08389fd8fc4343a343 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

d0ae9981700d drm/i915: Remove the remnants of the ilk+ LP0 wm hack
d470fc61e2ab drm/i915: Allow LP3 watermarks on ILK
6e843686038c drm/i915: Drop the funky ilk wm setup
9ccb87dce343 drm/i915: Sanitize wm latency values for ilk+
10560badd17a drm/i915: Split skl+ and ilk+ read_wm_latency()
6f3fc5d3ba43 drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code
77b004210eef drm/i915: Make the WM memory latency print more compact
1d35686d5190 drm/i915: Add DEFINE_SNPRINTF_ARRAY()
d203e7e82c7a drm/i915: Add dev_priv->wm.num_levels and use it everywhere
85fb6da04007 drm/i915: Eliminate skl_latency[]
488b0f8bd38a drm/i915: Use the spr/cur latencies on vlv/chv/g4x
220f3eac5592 drm/i915: Store all wm memory latency values in .1 usec units

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10610/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

* ✓ Fi.CI.IGT: success for drm/i915: Clean up the wm mem latency stuff (rev5)
  2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
                   ` (20 preceding siblings ...)
  2018-10-26 19:47 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2018-10-27  5:17 ` Patchwork
  21 siblings, 0 replies; 32+ messages in thread
From: Patchwork @ 2018-10-27  5:17 UTC (permalink / raw)
  To: Ville Syrjala; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Clean up the wm mem latency stuff (rev5)
URL   : https://patchwork.freedesktop.org/series/50802/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_5044_full -> Patchwork_10610_full =

== Summary - WARNING ==

  Minor unknown changes coming with Patchwork_10610_full need to be verified
  manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_10610_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_10610_full:

  === IGT changes ===

    ==== Warnings ====

    igt@pm_rc6_residency@rc6-accuracy:
      shard-snb:          SKIP -> PASS

    
== Known issues ==

  Here are the changes found in Patchwork_10610_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@debugfs_test@emon_crash:
      shard-hsw:          PASS -> DMESG-WARN (fdo#102614)

    igt@gem_ctx_isolation@bcs0-s3:
      shard-kbl:          PASS -> INCOMPLETE (fdo#103665)

    igt@gem_exec_schedule@pi-ringfull-blt:
      shard-skl:          NOTRUN -> FAIL (fdo#103158) +1

    igt@kms_busy@extended-modeset-hang-newfb-with-reset-render-c:
      shard-skl:          NOTRUN -> DMESG-WARN (fdo#107956) +1

    igt@kms_ccs@pipe-a-crc-primary-basic:
      shard-skl:          NOTRUN -> FAIL (fdo#107725)

    igt@kms_cursor_crc@cursor-256x85-random:
      shard-apl:          PASS -> FAIL (fdo#103232)

    igt@kms_cursor_crc@cursor-64x21-offscreen:
      shard-skl:          NOTRUN -> FAIL (fdo#103232)

    igt@kms_flip@2x-flip-vs-wf_vblank-interruptible:
      shard-hsw:          PASS -> DMESG-FAIL (fdo#102614)

    igt@kms_flip@dpms-vs-vblank-race-interruptible:
      shard-apl:          PASS -> DMESG-WARN (fdo#108549) +11

    igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-pwrite:
      shard-glk:          PASS -> FAIL (fdo#103167) +1

    igt@kms_frontbuffer_tracking@fbcpsr-rgb565-draw-pwrite:
      shard-snb:          SKIP -> INCOMPLETE (fdo#105411)

    igt@kms_plane@pixel-format-pipe-c-planes:
      shard-kbl:          NOTRUN -> FAIL (fdo#103166)

    igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
      shard-skl:          NOTRUN -> INCOMPLETE (fdo#107773, fdo#104108)

    igt@kms_plane@plane-position-covered-pipe-a-planes:
      shard-apl:          PASS -> FAIL (fdo#103166)

    igt@kms_plane_alpha_blend@pipe-a-alpha-7efc:
      shard-skl:          NOTRUN -> FAIL (fdo#107815, fdo#108145) +1

    igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb:
      shard-skl:          NOTRUN -> FAIL (fdo#108145) +3

    igt@kms_plane_lowres@pipe-c-tiling-y:
      shard-kbl:          PASS -> DMESG-WARN (fdo#105345)

    igt@kms_plane_multiple@atomic-pipe-c-tiling-y:
      shard-skl:          NOTRUN -> FAIL (fdo#107815, fdo#103166)

    igt@kms_setmode@basic:
      shard-kbl:          PASS -> FAIL (fdo#99912)

    igt@kms_vblank@pipe-c-ts-continuation-dpms-suspend:
      shard-apl:          PASS -> DMESG-FAIL (fdo#108549)

    
    ==== Possible fixes ====

    igt@gem_workarounds@suspend-resume-context:
      shard-kbl:          INCOMPLETE (fdo#103665) -> PASS

    igt@kms_ccs@pipe-b-crc-sprite-planes-basic:
      shard-glk:          FAIL (fdo#108145) -> PASS

    igt@kms_color@pipe-b-ctm-max:
      shard-apl:          DMESG-WARN (fdo#108549) -> PASS +14

    igt@kms_cursor_crc@cursor-256x85-onscreen:
      shard-glk:          FAIL (fdo#103232) -> PASS

    igt@kms_cursor_crc@cursor-64x64-onscreen:
      shard-apl:          FAIL (fdo#103232) -> PASS

    igt@kms_cursor_legacy@cursora-vs-flipa-toggle:
      shard-glk:          DMESG-WARN (fdo#106538, fdo#105763) -> PASS

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-pwrite:
      shard-apl:          FAIL (fdo#103167) -> PASS +1

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-fullscreen:
      shard-glk:          FAIL (fdo#103167) -> PASS

    igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
      shard-apl:          FAIL (fdo#103375) -> PASS

    igt@kms_plane@plane-panning-top-left-pipe-b-planes:
      shard-apl:          INCOMPLETE (fdo#103927) -> PASS

    igt@kms_plane_multiple@atomic-pipe-b-tiling-yf:
      shard-apl:          FAIL (fdo#103166) -> PASS

    igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend:
      shard-snb:          DMESG-WARN (fdo#102365) -> PASS

    igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm:
      shard-apl:          DMESG-FAIL (fdo#108549) -> PASS

    
    ==== Warnings ====

    igt@kms_cursor_crc@cursor-64x21-sliding:
      shard-apl:          FAIL (fdo#103232) -> DMESG-WARN (fdo#108549)

    igt@kms_cursor_crc@cursor-64x64-sliding:
      shard-apl:          DMESG-FAIL (fdo#103232, fdo#108549) -> DMESG-WARN (fdo#108549)

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-cpu:
      shard-glk:          DMESG-WARN (fdo#106538, fdo#105763) -> FAIL (fdo#103167)

    
  fdo#102365 https://bugs.freedesktop.org/show_bug.cgi?id=102365
  fdo#102614 https://bugs.freedesktop.org/show_bug.cgi?id=102614
  fdo#103158 https://bugs.freedesktop.org/show_bug.cgi?id=103158
  fdo#103166 https://bugs.freedesktop.org/show_bug.cgi?id=103166
  fdo#103167 https://bugs.freedesktop.org/show_bug.cgi?id=103167
  fdo#103232 https://bugs.freedesktop.org/show_bug.cgi?id=103232
  fdo#103375 https://bugs.freedesktop.org/show_bug.cgi?id=103375
  fdo#103665 https://bugs.freedesktop.org/show_bug.cgi?id=103665
  fdo#103927 https://bugs.freedesktop.org/show_bug.cgi?id=103927
  fdo#104108 https://bugs.freedesktop.org/show_bug.cgi?id=104108
  fdo#105345 https://bugs.freedesktop.org/show_bug.cgi?id=105345
  fdo#105411 https://bugs.freedesktop.org/show_bug.cgi?id=105411
  fdo#105763 https://bugs.freedesktop.org/show_bug.cgi?id=105763
  fdo#106538 https://bugs.freedesktop.org/show_bug.cgi?id=106538
  fdo#107725 https://bugs.freedesktop.org/show_bug.cgi?id=107725
  fdo#107773 https://bugs.freedesktop.org/show_bug.cgi?id=107773
  fdo#107815 https://bugs.freedesktop.org/show_bug.cgi?id=107815
  fdo#107956 https://bugs.freedesktop.org/show_bug.cgi?id=107956
  fdo#108145 https://bugs.freedesktop.org/show_bug.cgi?id=108145
  fdo#108549 https://bugs.freedesktop.org/show_bug.cgi?id=108549
  fdo#99912 https://bugs.freedesktop.org/show_bug.cgi?id=99912


== Participating hosts (6 -> 6) ==

  No changes in participating hosts


== Build changes ==

    * Linux: CI_DRM_5044 -> Patchwork_10610

  CI_DRM_5044: c4487dca27970879bf67f331614142c749984d65 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4698: af57164fcb16950187ad402ed31f565e88c42a78 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10610: d0ae9981700dd232e2e7bd08389fd8fc4343a343 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10610/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 32+ messages in thread

end of thread, other threads:[~2018-10-27  5:17 UTC | newest]

Thread overview: 32+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-10 13:04 [PATCH 00/12] drm/i915: Clean up the wm mem latency stuff Ville Syrjala
2018-10-10 13:04 ` [PATCH 01/12] drm/i915: Store all wm memory latency values in .1 usec units Ville Syrjala
2018-10-10 13:12   ` Chris Wilson
2018-10-10 15:35     ` Ville Syrjälä
2018-10-26 18:14   ` [PATCH v2 " Ville Syrjala
2018-10-10 13:04 ` [PATCH 02/12] drm/i915: Use the spr/cur latencies on vlv/chv/g4x Ville Syrjala
2018-10-10 13:04 ` [PATCH 03/12] drm/i915: Eliminate skl_latency[] Ville Syrjala
2018-10-10 13:04 ` [PATCH 04/12] drm/i915: Add dev_priv->wm.num_levels and use it everywhere Ville Syrjala
2018-10-26 18:27   ` [PATCH v2 " Ville Syrjala
2018-10-10 13:04 ` [PATCH 05/12] drm/i915: Add DEFINE_SNPRINTF_ARRAY() Ville Syrjala
2018-10-11 12:14   ` Jani Nikula
2018-10-11 12:47     ` Ville Syrjälä
2018-10-11 16:07       ` Jani Nikula
2018-10-10 13:04 ` [PATCH 06/12] drm/i915: Make the WM memory latency print more compact Ville Syrjala
2018-10-10 13:04 ` [PATCH 07/12] drm/i915: Eliminate redundant ilk sprite/cursor wm fixup code Ville Syrjala
2018-10-10 13:04 ` [PATCH 08/12] drm/i915: Split skl+ and ilk+ read_wm_latency() Ville Syrjala
2018-10-26 18:45   ` [PATCH v2 " Ville Syrjala
2018-10-10 13:04 ` [PATCH 09/12] drm/i915: Sanitize wm latency values for ilk+ Ville Syrjala
2018-10-26 19:11   ` [PATCH v2 " Ville Syrjala
2018-10-10 13:04 ` [PATCH 10/12] drm/i915: Drop the funky ilk wm setup Ville Syrjala
2018-10-10 13:04 ` [PATCH 11/12] drm/i915: Allow LP3 watermarks on ILK Ville Syrjala
2018-10-10 13:04 ` [PATCH 12/12] drm/i915: Remove the remnants of the ilk+ LP0 wm hack Ville Syrjala
2018-10-10 14:34 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff Patchwork
2018-10-10 14:38 ` ✗ Fi.CI.SPARSE: " Patchwork
2018-10-10 14:50 ` ✗ Fi.CI.BAT: failure " Patchwork
2018-10-26 18:17 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev2) Patchwork
2018-10-26 18:37 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev3) Patchwork
2018-10-26 19:01 ` ✗ Fi.CI.BAT: failure for drm/i915: Clean up the wm mem latency stuff (rev4) Patchwork
2018-10-26 19:29 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Clean up the wm mem latency stuff (rev5) Patchwork
2018-10-26 19:34 ` ✗ Fi.CI.SPARSE: " Patchwork
2018-10-26 19:47 ` ✓ Fi.CI.BAT: success " Patchwork
2018-10-27  5:17 ` ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.