All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] drm/i915: Enable second dbuf slice for ICL
@ 2019-10-09  7:39 Stanislav Lisovskiy
  2019-10-09 10:03 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Stanislav Lisovskiy @ 2019-10-09  7:39 UTC (permalink / raw)
  To: intel-gfx; +Cc: martin.peres

Also implemented algorithm for choosing DBuf slice configuration
based on active pipes, pipe ratio as stated in BSpec 12716.

Now pipe allocation still stays proportional to pipe width as before,
however within allowed DBuf slice for this particular configuration.

v2: Remove unneeded check from commit as ddb enabled slices might
    now differ from hw state.

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display.c |   6 -
 drivers/gpu/drm/i915/intel_pm.c              | 208 ++++++++++++++++++-
 2 files changed, 202 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 1a533ccdb54f..4683731ac1ca 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -12989,12 +12989,6 @@ static void verify_wm_state(struct intel_crtc *crtc,
 	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
 
-	if (INTEL_GEN(dev_priv) >= 11 &&
-	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
-		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
-			  sw_ddb->enabled_slices,
-			  hw->ddb.enabled_slices);
-
 	/* planes */
 	for_each_universal_plane(dev_priv, pipe, plane) {
 		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index bfcf03ab5245..0fbeea61299f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3616,7 +3616,7 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
 	 * only that 1 slice enabled until we have a proper way for on-demand
 	 * toggling of the second slice.
 	 */
-	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+	if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
 		enabled_slices++;
 
 	return enabled_slices;
@@ -3821,7 +3821,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	 * - plane straddling both slices is illegal in multi-pipe scenarios
 	 * - should validate we stay within the hw bandwidth limits
 	 */
-	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
+	if ((num_active > 1 || total_data_bw >= GBps(12))) {
 		ddb->enabled_slices = 2;
 	} else {
 		ddb->enabled_slices = 1;
@@ -3831,6 +3831,35 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	return ddb_size;
 }
 
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static u32 skl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+					   u32 slice_size, u32 ddb_size)
+{
+	u32 offset = 0;
+
+	if (!dbuf_slice_mask)
+		return 0;
+
+	while (!(dbuf_slice_mask & 1)) {
+		dbuf_slice_mask >>= 1;
+		offset += slice_size;
+		if (offset >= ddb_size)
+			break;
+	}
+	return offset;
+}
+
+static u32 i915_get_allowed_dbuf_mask(struct drm_i915_private *dev_priv,
+				      int pipe, u32 active_pipes,
+				      uint_fixed_16_16_t pipe_ratio);
+
+static uint_fixed_16_16_t
+skl_get_pipe_ratio(const struct intel_crtc_state *crtc_state);
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   const struct intel_crtc_state *crtc_state,
@@ -3846,7 +3875,13 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
 	u16 ddb_size;
+	u16 ddb_range_size;
 	u32 i;
+	u32 dbuf_slice_mask;
+	u32 active_pipes;
+	u32 offset;
+	u32 slice_size;
+	uint_fixed_16_16_t pipe_ratio;
 
 	if (WARN_ON(!state) || !crtc_state->base.active) {
 		alloc->start = 0;
@@ -3855,14 +3890,23 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		return;
 	}
 
-	if (intel_state->active_pipe_changes)
+	if (intel_state->active_pipe_changes) {
 		*num_active = hweight8(intel_state->active_pipes);
-	else
+		active_pipes = intel_state->active_pipes;
+	} else {
 		*num_active = hweight8(dev_priv->active_pipes);
+		active_pipes = dev_priv->active_pipes;
+	}
 
 	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
 				      *num_active, ddb);
 
+	DRM_DEBUG_KMS("Got total ddb size %d\n", ddb_size);
+
+	slice_size = ddb_size / ddb->enabled_slices;
+
+	DRM_DEBUG_KMS("Got DBuf slice size %d\n", slice_size);
+
 	/*
 	 * If the state doesn't change the active CRTC's or there is no
 	 * modeset request, then there's no need to recalculate;
@@ -3880,6 +3924,39 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		return;
 	}
 
+	/*
+	 * Calculate pipe ratio as stated in BSpec 28692
+	 */
+	pipe_ratio = skl_get_pipe_ratio(crtc_state);
+
+	/*
+	 * Get allowed DBuf slices for correspondent pipe and platform.
+	 */
+	dbuf_slice_mask = i915_get_allowed_dbuf_mask(dev_priv, for_pipe,
+						     active_pipes, pipe_ratio);
+
+	DRM_DEBUG_KMS("DBuf slice mask %x pipe %d active pipes %x\n",
+		      dbuf_slice_mask,
+		      for_pipe, active_pipes);
+
+	/*
+	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+	 * and slice size is 1024, the offset would be 1024
+	 */
+	offset = skl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+						 slice_size, ddb_size);
+
+	/*
+	 * Figure out total size of allowed DBuf slices, which is basically
+	 * a number of allowed slices for that pipe multiplied by slice size.
+	 * We might still have some dbuf slices disabled in case if those
+	 * are not needed based on bandwidth requirements and num_active pipes,
+	 * so stick to real ddb size if it happens to be less. Inside of this
+	 * range ddb entries are still allocated in proportion to display width.
+	 */
+	ddb_range_size = min(hweight8(dbuf_slice_mask) * slice_size,
+			     (unsigned int)ddb_size);
+
 	/*
 	 * Watermark/ddb requirement highly depends upon width of the
 	 * framebuffer, So instead of allocating DDB equally among pipes
@@ -3890,10 +3967,23 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 			&crtc_state->base.adjusted_mode;
 		enum pipe pipe = crtc->pipe;
 		int hdisplay, vdisplay;
+		uint_fixed_16_16_t ratio = skl_get_pipe_ratio(crtc_state);
+		u32 pipe_dbuf_slice_mask = i915_get_allowed_dbuf_mask(dev_priv,
+								      pipe,
+								      active_pipes,
+								      ratio);
 
 		if (!crtc_state->base.enable)
 			continue;
 
+		/*
+		 * According to BSpec pipe can share one dbuf slice with another pipes or pipe can use
+		 * multiple dbufs, in both cases we account for other pipes only if they have
+		 * exactly same mask.
+		 */
+		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
+			continue;
+
 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
 		total_width += hdisplay;
 
@@ -3903,8 +3993,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 			pipe_width = hdisplay;
 	}
 
-	alloc->start = ddb_size * width_before_pipe / total_width;
-	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+	alloc->start = offset + ddb_range_size * width_before_pipe / total_width;
+	alloc->end = offset + ddb_range_size * (width_before_pipe + pipe_width) / total_width;
+
+	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+		      alloc->start, alloc->end);
 }
 
 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4255,6 +4348,109 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 	return total_data_rate;
 }
 
+uint_fixed_16_16_t
+skl_get_pipe_ratio(const struct intel_crtc_state *crtc_state)
+{
+	struct drm_plane *plane;
+	const struct drm_plane_state *drm_plane_state;
+	uint_fixed_16_16_t pipe_downscale;
+	uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
+
+	if (!crtc_state->base.enable)
+		return max_downscale;
+
+	drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
+		uint_fixed_16_16_t plane_downscale;
+		const struct intel_plane_state *plane_state =
+			to_intel_plane_state(drm_plane_state);
+
+		if (!intel_wm_plane_visible(crtc_state, plane_state))
+			continue;
+
+		plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
+
+		max_downscale = max_fixed16(plane_downscale, max_downscale);
+	}
+
+	pipe_downscale = skl_pipe_downscale_amount(crtc_state);
+
+	pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
+
+	return pipe_downscale;
+}
+
+#define DBUF_S1_BIT 1
+#define DBUF_S2_BIT 2
+
+struct dbuf_slice_conf_entry {
+	u32 dbuf_mask[I915_MAX_PIPES];
+	u32 active_pipes;
+};
+
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+struct dbuf_slice_conf_entry icl_allowed_dbufs[] = {
+{ { 0, 0, 0, 0 }, 0 },
+{ { DBUF_S1_BIT | DBUF_S2_BIT, 0, 0, 0 }, BIT(PIPE_A) },
+{ { DBUF_S1_BIT, 0, 0, 0 }, BIT(PIPE_A) },
+{ { 0, DBUF_S1_BIT | DBUF_S2_BIT, 0, 0 }, BIT(PIPE_B) },
+{ { 0, DBUF_S1_BIT, 0, 0 }, BIT(PIPE_B) },
+{ { 0, 0, DBUF_S1_BIT | DBUF_S2_BIT }, BIT(PIPE_C) },
+{ { 0, 0, DBUF_S2_BIT, 0 }, BIT(PIPE_C) },
+{ { DBUF_S1_BIT, DBUF_S2_BIT, 0, 0 }, BIT(PIPE_A) | BIT(PIPE_B) },
+{ { DBUF_S1_BIT, 0, DBUF_S2_BIT, 0 }, BIT(PIPE_A) | BIT(PIPE_C) },
+{ { 0, DBUF_S1_BIT, DBUF_S2_BIT, 0 }, BIT(PIPE_B) | BIT(PIPE_C) },
+{ { DBUF_S1_BIT, DBUF_S1_BIT, DBUF_S2_BIT, 0 }, BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) },
+};
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u32 icl_get_allowed_dbuf_mask(int pipe,
+				     u32 active_pipes,
+				     uint_fixed_16_16_t pipe_ratio)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(icl_allowed_dbufs); i++) {
+		if (icl_allowed_dbufs[i].active_pipes == active_pipes) {
+			/*
+			 * According to BSpec 12716: if pipe_ratio >= 88.8
+			 * use single pipe, even if two are accessible.
+			 */
+			if (pipe_ratio.val >= div_fixed16(888, 10).val)
+				++i;
+			return icl_allowed_dbufs[i].dbuf_mask[pipe];
+		}
+	}
+	return 0;
+}
+
+u32 i915_get_allowed_dbuf_mask(struct drm_i915_private *dev_priv,
+				      int pipe, u32 active_pipes,
+				      uint_fixed_16_16_t pipe_ratio)
+{
+	if (IS_GEN(dev_priv, 11))
+		return icl_get_allowed_dbuf_mask(pipe,
+						 active_pipes,
+						 pipe_ratio);
+	/*
+	 * For anything else just return one slice yet.
+	 * Should be extended for other platforms.
+	 */
+	return DBUF_S1_BIT;
+}
+
 static u64
 icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 				 u64 *plane_data_rate)
-- 
2.17.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Enable second dbuf slice for ICL
  2019-10-09  7:39 [PATCH v2] drm/i915: Enable second dbuf slice for ICL Stanislav Lisovskiy
@ 2019-10-09 10:03 ` Patchwork
  2019-10-09 10:03 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-10-09 10:03 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Enable second dbuf slice for ICL
URL   : https://patchwork.freedesktop.org/series/67771/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
8c8c36e0a241 drm/i915: Enable second dbuf slice for ICL
-:52: CHECK:CAMELCASE: Avoid CamelCase: <GBps>
#52: FILE: drivers/gpu/drm/i915/intel_pm.c:3824:
+	if ((num_active > 1 || total_data_bw >= GBps(12))) {

-:186: WARNING:LONG_LINE_COMMENT: line over 100 characters
#186: FILE: drivers/gpu/drm/i915/intel_pm.c:3980:
+		 * According to BSpec pipe can share one dbuf slice with another pipes or pipe can use

-:253: CHECK:LINE_SPACING: Please don't use multiple blank lines
#253: FILE: drivers/gpu/drm/i915/intel_pm.c:4390:
+
+

-:288: WARNING:LINE_SPACING: Missing a blank line after declarations
#288: FILE: drivers/gpu/drm/i915/intel_pm.c:4425:
+	int i;
+	for (i = 0; i < ARRAY_SIZE(icl_allowed_dbufs); i++) {

-:303: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#303: FILE: drivers/gpu/drm/i915/intel_pm.c:4440:
+u32 i915_get_allowed_dbuf_mask(struct drm_i915_private *dev_priv,
+				      int pipe, u32 active_pipes,

total: 0 errors, 2 warnings, 3 checks, 285 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* ✗ Fi.CI.SPARSE: warning for drm/i915: Enable second dbuf slice for ICL
  2019-10-09  7:39 [PATCH v2] drm/i915: Enable second dbuf slice for ICL Stanislav Lisovskiy
  2019-10-09 10:03 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
@ 2019-10-09 10:03 ` Patchwork
  2019-10-09 10:27 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-10-09 10:03 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Enable second dbuf slice for ICL
URL   : https://patchwork.freedesktop.org/series/67771/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.6.0
Commit: drm/i915: Enable second dbuf slice for ICL
+drivers/gpu/drm/i915/intel_pm.c:4352:1: warning: symbol 'skl_get_pipe_ratio' was not declared. Should it be static?
+drivers/gpu/drm/i915/intel_pm.c:4401:30: warning: symbol 'icl_allowed_dbufs' was not declared. Should it be static?
+drivers/gpu/drm/i915/intel_pm.c:4439:5: warning: symbol 'i915_get_allowed_dbuf_mask' was not declared. Should it be static?

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: Enable second dbuf slice for ICL
  2019-10-09  7:39 [PATCH v2] drm/i915: Enable second dbuf slice for ICL Stanislav Lisovskiy
  2019-10-09 10:03 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
  2019-10-09 10:03 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2019-10-09 10:27 ` Patchwork
  2019-10-09 14:07 ` ✓ Fi.CI.IGT: " Patchwork
  2019-10-16 23:41 ` [PATCH v2] " Matt Roper
  4 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-10-09 10:27 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Enable second dbuf slice for ICL
URL   : https://patchwork.freedesktop.org/series/67771/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7039 -> Patchwork_14719
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/index.html

Known issues
------------

  Here are the changes found in Patchwork_14719 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live_hangcheck:
    - fi-icl-u2:          [PASS][1] -> [DMESG-FAIL][2] ([fdo#111678])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/fi-icl-u2/igt@i915_selftest@live_hangcheck.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/fi-icl-u2/igt@i915_selftest@live_hangcheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][3] -> [FAIL][4] ([fdo#111407])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@gem_mmap_gtt@basic-write-no-prefault:
    - fi-icl-u3:          [DMESG-WARN][5] ([fdo#107724]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/fi-icl-u3/igt@gem_mmap_gtt@basic-write-no-prefault.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/fi-icl-u3/igt@gem_mmap_gtt@basic-write-no-prefault.html

  * igt@i915_pm_rpm@basic-pci-d3-state:
    - {fi-icl-dsi}:       [INCOMPLETE][7] ([fdo#107713] / [fdo#108840]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/fi-icl-dsi/igt@i915_pm_rpm@basic-pci-d3-state.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/fi-icl-dsi/igt@i915_pm_rpm@basic-pci-d3-state.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108840]: https://bugs.freedesktop.org/show_bug.cgi?id=108840
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111678]: https://bugs.freedesktop.org/show_bug.cgi?id=111678


Participating hosts (44 -> 46)
------------------------------

  Additional (7): fi-cml-u2 fi-cml-s fi-tgl-u2 fi-cfl-guc fi-apl-guc fi-cfl-8109u fi-kbl-8809g 
  Missing    (5): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7039 -> Patchwork_14719

  CI-20190529: 20190529
  CI_DRM_7039: dda653c9731fe2ba4898964ffdb723cd35a633a5 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5218: 869ed1ee0b71ce17f0a864512488f8b1a6cb8545 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14719: 8c8c36e0a241d46244693b31f7d3ab428fda2c1f @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

8c8c36e0a241 drm/i915: Enable second dbuf slice for ICL

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* ✓ Fi.CI.IGT: success for drm/i915: Enable second dbuf slice for ICL
  2019-10-09  7:39 [PATCH v2] drm/i915: Enable second dbuf slice for ICL Stanislav Lisovskiy
                   ` (2 preceding siblings ...)
  2019-10-09 10:27 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-10-09 14:07 ` Patchwork
  2019-10-16 23:41 ` [PATCH v2] " Matt Roper
  4 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-10-09 14:07 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Enable second dbuf slice for ICL
URL   : https://patchwork.freedesktop.org/series/67771/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7039_full -> Patchwork_14719_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_14719_full:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@kms_rotation_crc@primary-yf-tiled-reflect-x-180:
    - {shard-tglb}:       NOTRUN -> [SKIP][1]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-tglb6/igt@kms_rotation_crc@primary-yf-tiled-reflect-x-180.html

  
Known issues
------------

  Here are the changes found in Patchwork_14719_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [PASS][2] -> [SKIP][3] ([fdo#110841])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb5/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [PASS][4] -> [SKIP][5] ([fdo#111325]) +3 similar issues
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb5/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb1/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_exec_schedule@preempt-queue-bsd1:
    - shard-iclb:         [PASS][6] -> [SKIP][7] ([fdo#109276]) +24 similar issues
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb2/igt@gem_exec_schedule@preempt-queue-bsd1.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb8/igt@gem_exec_schedule@preempt-queue-bsd1.html

  * igt@gem_userptr_blits@dmabuf-sync:
    - shard-snb:          [PASS][8] -> [DMESG-WARN][9] ([fdo#111870]) +1 similar issue
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-snb2/igt@gem_userptr_blits@dmabuf-sync.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-snb5/igt@gem_userptr_blits@dmabuf-sync.html

  * igt@gem_workarounds@suspend-resume-context:
    - shard-apl:          [PASS][10] -> [DMESG-WARN][11] ([fdo#108566]) +5 similar issues
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-apl4/igt@gem_workarounds@suspend-resume-context.html
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-apl7/igt@gem_workarounds@suspend-resume-context.html

  * igt@i915_pm_rpm@debugfs-forcewake-user:
    - shard-apl:          [PASS][12] -> [INCOMPLETE][13] ([fdo#103927]) +1 similar issue
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-apl1/igt@i915_pm_rpm@debugfs-forcewake-user.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-apl4/igt@i915_pm_rpm@debugfs-forcewake-user.html

  * igt@i915_selftest@live_hangcheck:
    - shard-iclb:         [PASS][14] -> [DMESG-FAIL][15] ([fdo#111144] / [fdo#111678])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb7/igt@i915_selftest@live_hangcheck.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb7/igt@i915_selftest@live_hangcheck.html

  * igt@i915_suspend@fence-restore-untiled:
    - shard-snb:          [PASS][16] -> [INCOMPLETE][17] ([fdo#105411])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-snb2/igt@i915_suspend@fence-restore-untiled.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-snb1/igt@i915_suspend@fence-restore-untiled.html

  * igt@kms_cursor_crc@pipe-c-cursor-256x85-offscreen:
    - shard-skl:          [PASS][18] -> [FAIL][19] ([fdo#103232])
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl4/igt@kms_cursor_crc@pipe-c-cursor-256x85-offscreen.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl7/igt@kms_cursor_crc@pipe-c-cursor-256x85-offscreen.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          [PASS][20] -> [FAIL][21] ([fdo#105363]) +1 similar issue
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl6/igt@kms_flip@flip-vs-expired-vblank.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl8/igt@kms_flip@flip-vs-expired-vblank.html
    - shard-apl:          [PASS][22] -> [FAIL][23] ([fdo#105363])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-apl6/igt@kms_flip@flip-vs-expired-vblank.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-apl3/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff:
    - shard-iclb:         [PASS][24] -> [FAIL][25] ([fdo#103167]) +4 similar issues
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb8/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-onoff.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min:
    - shard-skl:          [PASS][26] -> [FAIL][27] ([fdo#108145]) +1 similar issue
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl4/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl6/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][28] -> [FAIL][29] ([fdo#108145] / [fdo#110403])
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl4/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl7/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [PASS][30] -> [FAIL][31] ([fdo#103166])
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb5/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb1/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [PASS][32] -> [SKIP][33] ([fdo#109441]) +3 similar issues
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb8/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@kms_setmode@basic:
    - shard-hsw:          [PASS][34] -> [FAIL][35] ([fdo#99912])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-hsw5/igt@kms_setmode@basic.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-hsw4/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-b-ts-continuation-suspend:
    - shard-skl:          [PASS][36] -> [INCOMPLETE][37] ([fdo#104108])
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl7/igt@kms_vblank@pipe-b-ts-continuation-suspend.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl3/igt@kms_vblank@pipe-b-ts-continuation-suspend.html

  * igt@kms_vblank@pipe-c-wait-busy:
    - shard-iclb:         [PASS][38] -> [INCOMPLETE][39] ([fdo#107713])
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb7/igt@kms_vblank@pipe-c-wait-busy.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb7/igt@kms_vblank@pipe-c-wait-busy.html

  * igt@tools_test@sysfs_l3_parity:
    - shard-hsw:          [PASS][40] -> [SKIP][41] ([fdo#109271])
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-hsw8/igt@tools_test@sysfs_l3_parity.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-hsw7/igt@tools_test@sysfs_l3_parity.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@vcs0-s3:
    - shard-skl:          [INCOMPLETE][42] ([fdo#104108]) -> [PASS][43]
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl5/igt@gem_ctx_isolation@vcs0-s3.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl4/igt@gem_ctx_isolation@vcs0-s3.html

  * igt@gem_eio@reset-stress:
    - shard-snb:          [FAIL][44] ([fdo#109661]) -> [PASS][45]
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-snb6/igt@gem_eio@reset-stress.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-snb6/igt@gem_eio@reset-stress.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [SKIP][46] ([fdo#110854]) -> [PASS][47]
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb5/igt@gem_exec_balancer@smoke.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb1/igt@gem_exec_balancer@smoke.html

  * igt@gem_exec_flush@basic-wb-prw-default:
    - shard-skl:          [DMESG-WARN][48] -> [PASS][49]
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl3/igt@gem_exec_flush@basic-wb-prw-default.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl10/igt@gem_exec_flush@basic-wb-prw-default.html

  * igt@gem_exec_schedule@out-order-bsd2:
    - shard-iclb:         [SKIP][50] ([fdo#109276]) -> [PASS][51] +16 similar issues
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb6/igt@gem_exec_schedule@out-order-bsd2.html
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb4/igt@gem_exec_schedule@out-order-bsd2.html

  * igt@gem_exec_schedule@preempt-self-bsd:
    - shard-iclb:         [SKIP][52] ([fdo#111325]) -> [PASS][53] +6 similar issues
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb2/igt@gem_exec_schedule@preempt-self-bsd.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb8/igt@gem_exec_schedule@preempt-self-bsd.html

  * igt@gem_mocs_settings@mocs-rc6-vebox:
    - shard-apl:          [INCOMPLETE][54] ([fdo#103927]) -> [PASS][55]
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-apl3/igt@gem_mocs_settings@mocs-rc6-vebox.html
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-apl2/igt@gem_mocs_settings@mocs-rc6-vebox.html

  * igt@gem_tiled_swapping@non-threaded:
    - shard-iclb:         [INCOMPLETE][56] ([fdo#107713] / [fdo#108686]) -> [PASS][57]
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb1/igt@gem_tiled_swapping@non-threaded.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb8/igt@gem_tiled_swapping@non-threaded.html

  * igt@gem_userptr_blits@dmabuf-unsync:
    - shard-hsw:          [DMESG-WARN][58] ([fdo#111870]) -> [PASS][59]
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-hsw4/igt@gem_userptr_blits@dmabuf-unsync.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-hsw5/igt@gem_userptr_blits@dmabuf-unsync.html

  * igt@gem_wait@await-vecs0:
    - shard-skl:          [DMESG-WARN][60] ([fdo#106107]) -> [PASS][61]
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-skl2/igt@gem_wait@await-vecs0.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-skl9/igt@gem_wait@await-vecs0.html

  * igt@kms_busy@extended-modeset-hang-newfb-render-a:
    - shard-iclb:         [INCOMPLETE][62] ([fdo#107713]) -> [PASS][63]
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb7/igt@kms_busy@extended-modeset-hang-newfb-render-a.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb4/igt@kms_busy@extended-modeset-hang-newfb-render-a.html

  * igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic:
    - shard-hsw:          [FAIL][64] ([fdo#105767]) -> [PASS][65]
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-hsw1/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-hsw2/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-snb:          [INCOMPLETE][66] ([fdo#105411]) -> [PASS][67]
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-snb4/igt@kms_flip@flip-vs-suspend-interruptible.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-snb2/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-apl:          [DMESG-WARN][68] ([fdo#108566]) -> [PASS][69] +2 similar issues
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-apl7/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-apl7/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render:
    - shard-iclb:         [FAIL][70] ([fdo#103167]) -> [PASS][71] +5 similar issues
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb8/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-kbl:          [DMESG-WARN][72] ([fdo#103558] / [fdo#105602]) -> [PASS][73] +2 similar issues
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-kbl4/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_psr@psr2_sprite_render:
    - shard-iclb:         [SKIP][74] ([fdo#109441]) -> [PASS][75] +1 similar issue
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb7/igt@kms_psr@psr2_sprite_render.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb2/igt@kms_psr@psr2_sprite_render.html

  * igt@kms_setmode@basic:
    - shard-kbl:          [FAIL][76] ([fdo#99912]) -> [PASS][77]
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-kbl1/igt@kms_setmode@basic.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-kbl2/igt@kms_setmode@basic.html

  * igt@tools_test@tools_test:
    - shard-iclb:         [SKIP][78] ([fdo#109352]) -> [PASS][79]
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb3/igt@tools_test@tools_test.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb7/igt@tools_test@tools_test.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [SKIP][80] ([fdo#109276]) -> [FAIL][81] ([fdo#111329])
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb7/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb2/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@gem_mocs_settings@mocs-rc6-bsd2:
    - shard-iclb:         [SKIP][82] ([fdo#109276]) -> [FAIL][83] ([fdo#111330])
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb8/igt@gem_mocs_settings@mocs-rc6-bsd2.html
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb4/igt@gem_mocs_settings@mocs-rc6-bsd2.html

  * igt@gem_mocs_settings@mocs-reset-bsd2:
    - shard-iclb:         [FAIL][84] ([fdo#111330]) -> [SKIP][85] ([fdo#109276])
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb4/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb6/igt@gem_mocs_settings@mocs-reset-bsd2.html

  * igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-blt:
    - shard-kbl:          [SKIP][86] ([fdo#105602] / [fdo#109271]) -> [SKIP][87] ([fdo#109271])
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-kbl1/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-blt.html
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-kbl4/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-blt.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
    - shard-apl:          [DMESG-WARN][88] ([fdo#108566]) -> [INCOMPLETE][89] ([fdo#103927])
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-apl8/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-apl3/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html

  * igt@kms_psr@psr2_suspend:
    - shard-iclb:         [SKIP][90] ([fdo#109441]) -> [DMESG-WARN][91] ([fdo#107724])
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7039/shard-iclb8/igt@kms_psr@psr2_suspend.html
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/shard-iclb2/igt@kms_psr@psr2_suspend.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103558]: https://bugs.freedesktop.org/show_bug.cgi?id=103558
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#105602]: https://bugs.freedesktop.org/show_bug.cgi?id=105602
  [fdo#105767]: https://bugs.freedesktop.org/show_bug.cgi?id=105767
  [fdo#106107]: https://bugs.freedesktop.org/show_bug.cgi?id=106107
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#108686]: https://bugs.freedesktop.org/show_bug.cgi?id=108686
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109352]: https://bugs.freedesktop.org/show_bug.cgi?id=109352
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109661]: https://bugs.freedesktop.org/show_bug.cgi?id=109661
  [fdo#110403]: https://bugs.freedesktop.org/show_bug.cgi?id=110403
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#110854]: https://bugs.freedesktop.org/show_bug.cgi?id=110854
  [fdo#111144]: https://bugs.freedesktop.org/show_bug.cgi?id=111144
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111329]: https://bugs.freedesktop.org/show_bug.cgi?id=111329
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111678]: https://bugs.freedesktop.org/show_bug.cgi?id=111678
  [fdo#111832]: https://bugs.freedesktop.org/show_bug.cgi?id=111832
  [fdo#111870]: https://bugs.freedesktop.org/show_bug.cgi?id=111870
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7039 -> Patchwork_14719

  CI-20190529: 20190529
  CI_DRM_7039: dda653c9731fe2ba4898964ffdb723cd35a633a5 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5218: 869ed1ee0b71ce17f0a864512488f8b1a6cb8545 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14719: 8c8c36e0a

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14719/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] drm/i915: Enable second dbuf slice for ICL
  2019-10-09  7:39 [PATCH v2] drm/i915: Enable second dbuf slice for ICL Stanislav Lisovskiy
                   ` (3 preceding siblings ...)
  2019-10-09 14:07 ` ✓ Fi.CI.IGT: " Patchwork
@ 2019-10-16 23:41 ` Matt Roper
  2019-10-17  8:05   ` Lisovskiy, Stanislav
  4 siblings, 1 reply; 7+ messages in thread
From: Matt Roper @ 2019-10-16 23:41 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx, martin.peres

On Wed, Oct 09, 2019 at 10:39:08AM +0300, Stanislav Lisovskiy wrote:
> Also implemented algorithm for choosing DBuf slice configuration
> based on active pipes, pipe ratio as stated in BSpec 12716.
> 
> Now pipe allocation still stays proportional to pipe width as before,
> however within allowed DBuf slice for this particular configuration.
> 
> v2: Remove unneeded check from commit as ddb enabled slices might
>     now differ from hw state.

I can't seem to find v1 of this patch in my archives; can you elaborate
on this?  It looks like a bit of a mismatch in terms of how we're
interpreting 'ddb.enabled_slices' in different parts of the driver.
During hw readout we're treating the value as the number of powered up
slices (which will always be 2 since we always power them both up in
icl_display_core_init -> icl_dbuf_enable even when we're not going to
use both), whereas in the atomic check code we're interpreting the value
as the number of slices we want to distribute our plane allocations
over.  We may want to break these out into two separate fields (powered
slices vs utilized slices) that we track separately.

> 
> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_display.c |   6 -
>  drivers/gpu/drm/i915/intel_pm.c              | 208 ++++++++++++++++++-
>  2 files changed, 202 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 1a533ccdb54f..4683731ac1ca 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -12989,12 +12989,6 @@ static void verify_wm_state(struct intel_crtc *crtc,
>  	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
>  	sw_ddb = &dev_priv->wm.skl_hw.ddb;
>  
> -	if (INTEL_GEN(dev_priv) >= 11 &&
> -	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
> -		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
> -			  sw_ddb->enabled_slices,
> -			  hw->ddb.enabled_slices);
> -

Related to the comment above, we probably do want to make sure that the
number of powered up dbuf slices matches what we expect (which today is
always 2, but that might change in the future if we try to be more
intelligent and power down the second slice when it isn't needed).

If we're already confirming that the planes' hw/sw DDB allocations
themselves match farther down this function, then we're effectively
already checking the distribution of planes between the two slices.

>  	/* planes */
>  	for_each_universal_plane(dev_priv, pipe, plane) {
>  		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index bfcf03ab5245..0fbeea61299f 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3616,7 +3616,7 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
>  	 * only that 1 slice enabled until we have a proper way for on-demand
>  	 * toggling of the second slice.
>  	 */

The comment here is no longer correct now that we're actually using the
second slice.

> -	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
> +	if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
>  		enabled_slices++;
>  
>  	return enabled_slices;
> @@ -3821,7 +3821,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
>  	 * - plane straddling both slices is illegal in multi-pipe scenarios
>  	 * - should validate we stay within the hw bandwidth limits
>  	 */

This comment is/was also outdated.  Also note that this change is going
to impact TGL as well even though you haven't added the TGL handling for
slice assignment yet.

> -	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
> +	if ((num_active > 1 || total_data_bw >= GBps(12))) {

Where does the 12 GBps number come from?  I know the comment above this
says that's the maximum BW supported by a single DBuf slice, but I can't
find where this is mentioned in the bspec (and I'm not sure if that
would apply to all gen11+ platforms or whether that was an ICL-specific
number from when the comment/code was first written).

>  		ddb->enabled_slices = 2;
>  	} else {
>  		ddb->enabled_slices = 1;
> @@ -3831,6 +3831,35 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
>  	return ddb_size;
>  }
>  
> +/*
> + * Calculate initial DBuf slice offset, based on slice size
> + * and mask(i.e if slice size is 1024 and second slice is enabled
> + * offset would be 1024)
> + */
> +static u32 skl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
> +					   u32 slice_size, u32 ddb_size)
> +{
> +	u32 offset = 0;
> +
> +	if (!dbuf_slice_mask)
> +		return 0;
> +
> +	while (!(dbuf_slice_mask & 1)) {
> +		dbuf_slice_mask >>= 1;
> +		offset += slice_size;
> +		if (offset >= ddb_size)

This should probably include a WARN_ON() since if there's any way we can
return an offset outside our DDB we're going to have problems.

> +			break;
> +	}
> +	return offset;
> +}
> +
> +static u32 i915_get_allowed_dbuf_mask(struct drm_i915_private *dev_priv,
> +				      int pipe, u32 active_pipes,
> +				      uint_fixed_16_16_t pipe_ratio);
> +
> +static uint_fixed_16_16_t
> +skl_get_pipe_ratio(const struct intel_crtc_state *crtc_state);
> +
>  static void
>  skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  				   const struct intel_crtc_state *crtc_state,
> @@ -3846,7 +3875,13 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
>  	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
>  	u16 ddb_size;
> +	u16 ddb_range_size;
>  	u32 i;
> +	u32 dbuf_slice_mask;
> +	u32 active_pipes;
> +	u32 offset;
> +	u32 slice_size;
> +	uint_fixed_16_16_t pipe_ratio;
>  
>  	if (WARN_ON(!state) || !crtc_state->base.active) {
>  		alloc->start = 0;
> @@ -3855,14 +3890,23 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  		return;
>  	}
>  
> -	if (intel_state->active_pipe_changes)
> +	if (intel_state->active_pipe_changes) {
>  		*num_active = hweight8(intel_state->active_pipes);
> -	else
> +		active_pipes = intel_state->active_pipes;
> +	} else {
>  		*num_active = hweight8(dev_priv->active_pipes);
> +		active_pipes = dev_priv->active_pipes;
> +	}
>  
>  	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
>  				      *num_active, ddb);
>  
> +	DRM_DEBUG_KMS("Got total ddb size %d\n", ddb_size);
> +
> +	slice_size = ddb_size / ddb->enabled_slices;
> +
> +	DRM_DEBUG_KMS("Got DBuf slice size %d\n", slice_size);
> +
>  	/*
>  	 * If the state doesn't change the active CRTC's or there is no
>  	 * modeset request, then there's no need to recalculate;
> @@ -3880,6 +3924,39 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  		return;
>  	}
>  
> +	/*
> +	 * Calculate pipe ratio as stated in BSpec 28692
> +	 */
> +	pipe_ratio = skl_get_pipe_ratio(crtc_state);

We can probably move this call down into the function below.  AFAICS,
pipe ratio doesn't matter anymore once we get to TGL and doesn't matter
on earlier platforms that only have one slice, so there's no need
to calculate it if we aren't going to use it.

> +
> +	/*
> +	 * Get allowed DBuf slices for correspondent pipe and platform.
> +	 */
> +	dbuf_slice_mask = i915_get_allowed_dbuf_mask(dev_priv, for_pipe,
> +						     active_pipes, pipe_ratio);
> +
> +	DRM_DEBUG_KMS("DBuf slice mask %x pipe %d active pipes %x\n",
> +		      dbuf_slice_mask,
> +		      for_pipe, active_pipes);
> +
> +	/*
> +	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
> +	 * and slice size is 1024, the offset would be 1024
> +	 */
> +	offset = skl_get_first_dbuf_slice_offset(dbuf_slice_mask,
> +						 slice_size, ddb_size);
> +
> +	/*
> +	 * Figure out total size of allowed DBuf slices, which is basically
> +	 * a number of allowed slices for that pipe multiplied by slice size.
> +	 * We might still have some dbuf slices disabled in case if those
> +	 * are not needed based on bandwidth requirements and num_active pipes,
> +	 * so stick to real ddb size if it happens to be less. Inside of this
> +	 * range ddb entries are still allocated in proportion to display width.
> +	 */
> +	ddb_range_size = min(hweight8(dbuf_slice_mask) * slice_size,
> +			     (unsigned int)ddb_size);
> +
>  	/*
>  	 * Watermark/ddb requirement highly depends upon width of the
>  	 * framebuffer, So instead of allocating DDB equally among pipes
> @@ -3890,10 +3967,23 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  			&crtc_state->base.adjusted_mode;
>  		enum pipe pipe = crtc->pipe;
>  		int hdisplay, vdisplay;
> +		uint_fixed_16_16_t ratio = skl_get_pipe_ratio(crtc_state);
> +		u32 pipe_dbuf_slice_mask = i915_get_allowed_dbuf_mask(dev_priv,
> +								      pipe,
> +								      active_pipes,
> +								      ratio);

Minor nitpick, but the lines here are a bit over 80 characters.  You may
want to break the line after the equals sign.

>  
>  		if (!crtc_state->base.enable)
>  			continue;
>  
> +		/*
> +		 * According to BSpec pipe can share one dbuf slice with another pipes or pipe can use
> +		 * multiple dbufs, in both cases we account for other pipes only if they have
> +		 * exactly same mask.
> +		 */

Some more long lines that need to be wrapped.

> +		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
> +			continue;
> +
>  		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
>  		total_width += hdisplay;
>  
> @@ -3903,8 +3993,11 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  			pipe_width = hdisplay;
>  	}
>  
> -	alloc->start = ddb_size * width_before_pipe / total_width;
> -	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
> +	alloc->start = offset + ddb_range_size * width_before_pipe / total_width;
> +	alloc->end = offset + ddb_range_size * (width_before_pipe + pipe_width) / total_width;
> +
> +	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
> +		      alloc->start, alloc->end);
>  }
>  
>  static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
> @@ -4255,6 +4348,109 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
>  	return total_data_rate;
>  }
>  
> +uint_fixed_16_16_t
> +skl_get_pipe_ratio(const struct intel_crtc_state *crtc_state)

I think this should be named icl_get_pipe_ratio() given that it comes
from a gen11-specific page of the bspec?

> +{
> +	struct drm_plane *plane;
> +	const struct drm_plane_state *drm_plane_state;
> +	uint_fixed_16_16_t pipe_downscale;
> +	uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
> +
> +	if (!crtc_state->base.enable)

This function only gets called from skl_ddb_get_pipe_allocation_limits()
which tests crtc_state->base.active at the beginning and bails out if
the CRTC isn't active.  active && !enable shouldn't be possible, so I'd
add a WARN_ON() here to make that assertion clear.

> +		return max_downscale;
> +
> +	drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
> +		uint_fixed_16_16_t plane_downscale;
> +		const struct intel_plane_state *plane_state =
> +			to_intel_plane_state(drm_plane_state);
> +
> +		if (!intel_wm_plane_visible(crtc_state, plane_state))
> +			continue;
> +
> +		plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
> +
> +		max_downscale = max_fixed16(plane_downscale, max_downscale);
> +	}
> +
> +	pipe_downscale = skl_pipe_downscale_amount(crtc_state);
> +
> +	pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
> +
> +	return pipe_downscale;

Wouldn't the pipe ratio be 1/pipe_downscale?


> +}
> +
> +#define DBUF_S1_BIT 1
> +#define DBUF_S2_BIT 2
> +
> +struct dbuf_slice_conf_entry {
> +	u32 dbuf_mask[I915_MAX_PIPES];
> +	u32 active_pipes;
> +};
> +
> +
> +/*
> + * Table taken from Bspec 12716
> + * Pipes do have some preferred DBuf slice affinity,
> + * plus there are some hardcoded requirements on how
> + * those should be distributed for multipipe scenarios.
> + * For more DBuf slices algorithm can get even more messy
> + * and less readable, so decided to use a table almost
> + * as is from BSpec itself - that way it is at least easier
> + * to compare, change and check.
> + */
> +struct dbuf_slice_conf_entry icl_allowed_dbufs[] = {
> +{ { 0, 0, 0, 0 }, 0 },
> +{ { DBUF_S1_BIT | DBUF_S2_BIT, 0, 0, 0 }, BIT(PIPE_A) },
> +{ { DBUF_S1_BIT, 0, 0, 0 }, BIT(PIPE_A) },
> +{ { 0, DBUF_S1_BIT | DBUF_S2_BIT, 0, 0 }, BIT(PIPE_B) },
> +{ { 0, DBUF_S1_BIT, 0, 0 }, BIT(PIPE_B) },
> +{ { 0, 0, DBUF_S1_BIT | DBUF_S2_BIT }, BIT(PIPE_C) },
> +{ { 0, 0, DBUF_S2_BIT, 0 }, BIT(PIPE_C) },
> +{ { DBUF_S1_BIT, DBUF_S2_BIT, 0, 0 }, BIT(PIPE_A) | BIT(PIPE_B) },
> +{ { DBUF_S1_BIT, 0, DBUF_S2_BIT, 0 }, BIT(PIPE_A) | BIT(PIPE_C) },
> +{ { 0, DBUF_S1_BIT, DBUF_S2_BIT, 0 }, BIT(PIPE_B) | BIT(PIPE_C) },
> +{ { DBUF_S1_BIT, DBUF_S1_BIT, DBUF_S2_BIT, 0 }, BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) },

You might want to align some of the columns here to make it slightly
easier to read.  And even though the bspec puts the pipes in the final
column, I think it would be more natural for readability to move that
first and/or put it on a line by itself.  You've already got a line here
that exceeds 80 characters by a bit and once you add a TGL table with
four pipes you're going to have even longer lines.


> +};
> +
> +/*
> + * This function finds an entry with same enabled pipe configuration and
> + * returns correspondent DBuf slice mask as stated in BSpec for particular
> + * platform.
> + */
> +static u32 icl_get_allowed_dbuf_mask(int pipe,
> +				     u32 active_pipes,
> +				     uint_fixed_16_16_t pipe_ratio)
> +{
> +	int i;
> +	for (i = 0; i < ARRAY_SIZE(icl_allowed_dbufs); i++) {
> +		if (icl_allowed_dbufs[i].active_pipes == active_pipes) {
> +			/*
> +			 * According to BSpec 12716: if pipe_ratio >= 88.8
> +			 * use single pipe, even if two are accessible.
> +			 */
> +			if (pipe_ratio.val >= div_fixed16(888, 10).val)
> +				++i;
> +			return icl_allowed_dbufs[i].dbuf_mask[pipe];
> +		}
> +	}
> +	return 0;
> +}
> +
> +u32 i915_get_allowed_dbuf_mask(struct drm_i915_private *dev_priv,
> +				      int pipe, u32 active_pipes,
> +				      uint_fixed_16_16_t pipe_ratio)
> +{
> +	if (IS_GEN(dev_priv, 11))
> +		return icl_get_allowed_dbuf_mask(pipe,
> +						 active_pipes,
> +						 pipe_ratio);
> +	/*
> +	 * For anything else just return one slice yet.
> +	 * Should be extended for other platforms.
> +	 */

Note that you've already adjusted the DDB size for TGL in
intel_get_ddb_size(), so we probably want to add TGL's table at the same
time as the gen11 table.


Matt

> +	return DBUF_S1_BIT;
> +}
> +
>  static u64
>  icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
>  				 u64 *plane_data_rate)
> -- 
> 2.17.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Matt Roper
Graphics Software Engineer
VTT-OSGC Platform Enablement
Intel Corporation
(916) 356-2795
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] drm/i915: Enable second dbuf slice for ICL
  2019-10-16 23:41 ` [PATCH v2] " Matt Roper
@ 2019-10-17  8:05   ` Lisovskiy, Stanislav
  0 siblings, 0 replies; 7+ messages in thread
From: Lisovskiy, Stanislav @ 2019-10-17  8:05 UTC (permalink / raw)
  To: Roper, Matthew D; +Cc: intel-gfx, Peres, Martin

On Wed, 2019-10-16 at 16:41 -0700, Matt Roper wrote:
> On Wed, Oct 09, 2019 at 10:39:08AM +0300, Stanislav Lisovskiy wrote:
> > Also implemented algorithm for choosing DBuf slice configuration
> > based on active pipes, pipe ratio as stated in BSpec 12716.
> > 
> > Now pipe allocation still stays proportional to pipe width as
> > before,
> > however within allowed DBuf slice for this particular
> > configuration.
> > 
> > v2: Remove unneeded check from commit as ddb enabled slices might
> >     now differ from hw state.
> 
> I can't seem to find v1 of this patch in my archives; can you
> elaborate
> on this? 

Hi, thanks for good points in review. I initially submitted v1 only for
TryBot, then figured out some issue and then sent a patch to that
mailing list.


>  It looks like a bit of a mismatch in terms of how we're
> interpreting 'ddb.enabled_slices' in different parts of the driver.
> During hw readout we're treating the value as the number of powered
> up
> slices (which will always be 2 since we always power them both up in
> icl_display_core_init -> icl_dbuf_enable even when we're not going to
> use both), whereas in the atomic check code we're interpreting the
> value
> as the number of slices we want to distribute our plane allocations
> over.  We may want to break these out into two separate fields
> (powered
> slices vs utilized slices) that we track separately.

Had exactly same concern, I wanted to always treat this field as number
of slices we have available, however if we are going to enable/disable 
that dynamically probably the only way is to split it.

> 
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  drivers/gpu/drm/i915/display/intel_display.c |   6 -
> >  drivers/gpu/drm/i915/intel_pm.c              | 208
> > ++++++++++++++++++-
> >  2 files changed, 202 insertions(+), 12 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_display.c
> > b/drivers/gpu/drm/i915/display/intel_display.c
> > index 1a533ccdb54f..4683731ac1ca 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display.c
> > @@ -12989,12 +12989,6 @@ static void verify_wm_state(struct
> > intel_crtc *crtc,
> >  	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
> >  	sw_ddb = &dev_priv->wm.skl_hw.ddb;
> >  
> > -	if (INTEL_GEN(dev_priv) >= 11 &&
> > -	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
> > -		DRM_ERROR("mismatch in DBUF Slices (expected %u, got
> > %u)\n",
> > -			  sw_ddb->enabled_slices,
> > -			  hw->ddb.enabled_slices);
> > -
> 
> Related to the comment above, we probably do want to make sure that
> the
> number of powered up dbuf slices matches what we expect (which today
> is
> always 2, but that might change in the future if we try to be more
> intelligent and power down the second slice when it isn't needed)
> 
> If we're already confirming that the planes' hw/sw DDB allocations
> themselves match farther down this function, then we're effectively
> already checking the distribution of planes between the two slices.
> 
> >  	/* planes */
> >  	for_each_universal_plane(dev_priv, pipe, plane) {
> >  		struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
> > diff --git a/drivers/gpu/drm/i915/intel_pm.c
> > b/drivers/gpu/drm/i915/intel_pm.c
> > index bfcf03ab5245..0fbeea61299f 100644
> > --- a/drivers/gpu/drm/i915/intel_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > @@ -3616,7 +3616,7 @@ static u8
> > intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
> >  	 * only that 1 slice enabled until we have a proper way for on-
> > demand
> >  	 * toggling of the second slice.
> >  	 */
> 
> 
> This comment is/was also outdated.  Also note that this change is
> going
> to impact TGL as well even though you haven't added the TGL handling
> for
> slice assignment yet.
> 
> > -	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
> > +	if ((num_active > 1 || total_data_bw >= GBps(12))) {
> 
> Where does the 12 GBps number come from?  I know the comment above
> this
> says that's the maximum BW supported by a single DBuf slice, but I
> can't
> find where this is mentioned in the bspec (and I'm not sure if that
> would apply to all gen11+ platforms or whether that was an ICL-
> specific
> number from when the comment/code was first written).

Had same question actually - neither me or Ville found any mentioning
about 12 GB/s in bspec, so decided to leave it as is before at least
we figure out where it came from.

> 
> >  		ddb->enabled_slices = 2;
> >  	} else {
> >  		ddb->enabled_slices = 1;
> > @@ -3831,6 +3831,35 @@ static u16 intel_get_ddb_size(struct
> > drm_i915_private *dev_priv,
> >  	return ddb_size;
> >  }
> >  
> > +/*
> > + * Calculate initial DBuf slice offset, based on slice size
> > + * and mask(i.e if slice size is 1024 and second slice is enabled
> > + * offset would be 1024)
> > + */
> > +static u32 skl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
> > +					   u32 slice_size, u32
> > ddb_size)
> > +{
> > +	u32 offset = 0;
> > +
> > +	if (!dbuf_slice_mask)
> > +		return 0;
> > +
> > +	while (!(dbuf_slice_mask & 1)) {
> > +		dbuf_slice_mask >>= 1;
> > +		offset += slice_size;
> > +		if (offset >= ddb_size)
> 
> 
> We can probably move this call down into the function below.  AFAICS,
> pipe ratio doesn't matter anymore once we get to TGL and doesn't
> matter
> on earlier platforms that only have one slice, so there's no need
> to calculate it if we aren't going to use it.

Good point, agree it is probably not so common to be used here.
Also thanks for spotting that I forgot to invert pipe_ratio value.

> 
> > +
> > +	/*
> > +	 * Get allowed DBuf slices for correspondent pipe and platform.
> > +	 */
> > +	dbuf_slice_mask = i915_get_allowed_dbuf_mask(dev_priv,
> > for_pipe,
> > +						     active_pipes,
> > pipe_ratio);
> > +
> > +	DRM_DEBUG_KMS("DBuf slice mask %x pipe %d active pipes %x\n",
> > +		      dbuf_slice_mask,
> > +		      for_pipe, active_pipes);
> > +
> > +	/*
> > +	 * Figure out at which DBuf slice we start, i.e if we start at
> > Dbuf S2
> > +	 * and slice size is 1024, the offset would be 1024
> > +	 */
> > +	offset = skl_get_first_dbuf_slice_offset(dbuf_slice_mask,
> > +						 slice_size, ddb_size);
> > +
> > +	/*
> > +	 * Figure out total size of allowed DBuf slices, which is
> > basically
> > +	 * a number of allowed slices for that pipe multiplied by slice
> > size.
> > +	 * We might still have some dbuf slices disabled in case if
> > those
> > +	 * are not needed based on bandwidth requirements and
> > num_active pipes,
> > +	 * so stick to real ddb size if it happens to be less. Inside
> > of this
> > +	 * range ddb entries are still allocated in proportion to
> > display width.
> > +	 */
> > +	ddb_range_size = min(hweight8(dbuf_slice_mask) * slice_size,
> > +			     (unsigned int)ddb_size);
> > +
> >  	/*
> >  	 * Watermark/ddb requirement highly depends upon width of the
> >  	 * framebuffer, So instead of allocating DDB equally among
> > pipes
> > @@ -3890,10 +3967,23 @@ skl_ddb_get_pipe_allocation_limits(struct
> > drm_i915_private *dev_priv,
> >  			&crtc_state->base.adjusted_mode;
> >  		enum pipe pipe = crtc->pipe;
> >  		int hdisplay, vdisplay;
> > +		uint_fixed_16_16_t ratio =
> > skl_get_pipe_ratio(crtc_state);
> > +		u32 pipe_dbuf_slice_mask =
> > i915_get_allowed_dbuf_mask(dev_priv,
> > +								      p
> > ipe,
> > +								      a
> > ctive_pipes,
> > +								      r
> > atio);
> 
> Minor nitpick, but the lines here are a bit over 80 characters.  You
> may
> want to break the line after the equals sign.
> 
> >  
> >  		if (!crtc_state->base.enable)
> >  			continue;
> >  
> > +		/*
> > +		 * According to BSpec pipe can share one dbuf slice
> > with another pipes or pipe can use
> > +		 * multiple dbufs, in both cases we account for other
> > pipes only if they have
> > +		 * exactly same mask.
> > +		 */
> 
> Some more long lines that need to be wrapped.
> 
> > +		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
> > +			continue;
> > +
> >  		drm_mode_get_hv_timing(adjusted_mode, &hdisplay,
> > &vdisplay);
> >  		total_width += hdisplay;
> >  
> > @@ -3903,8 +3993,11 @@ skl_ddb_get_pipe_allocation_limits(struct
> > drm_i915_private *dev_priv,
> >  			pipe_width = hdisplay;
> >  	}
> >  
> > -	alloc->start = ddb_size * width_before_pipe / total_width;
> > -	alloc->end = ddb_size * (width_before_pipe + pipe_width) /
> > total_width;
> > +	alloc->start = offset + ddb_range_size * width_before_pipe /
> > total_width;
> > +	alloc->end = offset + ddb_range_size * (width_before_pipe +
> > pipe_width) / total_width;
> > +
> > +	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
> > +		      alloc->start, alloc->end);
> >  }
> >  
> >  static int skl_compute_wm_params(const struct intel_crtc_state
> > *crtc_state,
> > @@ -4255,6 +4348,109 @@ skl_get_total_relative_data_rate(struct
> > intel_crtc_state *crtc_state,
> >  	return total_data_rate;
> >  }
> >  
> > +uint_fixed_16_16_t
> > +skl_get_pipe_ratio(const struct intel_crtc_state *crtc_state)
> 
> I think this should be named icl_get_pipe_ratio() given that it comes
> from a gen11-specific page of the bspec?
> 
> > +{
> > +	struct drm_plane *plane;
> > +	const struct drm_plane_state *drm_plane_state;
> > +	uint_fixed_16_16_t pipe_downscale;
> > +	uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
> > +
> > +	if (!crtc_state->base.enable)
> 
> This function only gets called from
> skl_ddb_get_pipe_allocation_limits()
> which tests crtc_state->base.active at the beginning and bails out if
> the CRTC isn't active.  active && !enable shouldn't be possible, so
> I'd
> add a WARN_ON() here to make that assertion clear.
> 
> > +		return max_downscale;
> > +
> > +	drm_atomic_crtc_state_for_each_plane_state(plane,
> > drm_plane_state, &crtc_state->base) {
> > +		uint_fixed_16_16_t plane_downscale;
> > +		const struct intel_plane_state *plane_state =
> > +			to_intel_plane_state(drm_plane_state);
> > +
> > +		if (!intel_wm_plane_visible(crtc_state, plane_state))
> > +			continue;
> > +
> > +		plane_downscale =
> > skl_plane_downscale_amount(crtc_state, plane_state);
> > +
> > +		max_downscale = max_fixed16(plane_downscale,
> > max_downscale);
> > +	}
> > +
> > +	pipe_downscale = skl_pipe_downscale_amount(crtc_state);
> > +
> > +	pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
> > +
> > +	return pipe_downscale;
> 
> Wouldn't the pipe ratio be 1/pipe_downscale?
> 
> 
> > +}
> > +
> > +#define DBUF_S1_BIT 1
> > +#define DBUF_S2_BIT 2
> > +
> > +struct dbuf_slice_conf_entry {
> > +	u32 dbuf_mask[I915_MAX_PIPES];
> > +	u32 active_pipes;
> > +};
> > +
> > +
> > +/*
> > + * Table taken from Bspec 12716
> > + * Pipes do have some preferred DBuf slice affinity,
> > + * plus there are some hardcoded requirements on how
> > + * those should be distributed for multipipe scenarios.
> > + * For more DBuf slices algorithm can get even more messy
> > + * and less readable, so decided to use a table almost
> > + * as is from BSpec itself - that way it is at least easier
> > + * to compare, change and check.
> > + */
> > +struct dbuf_slice_conf_entry icl_allowed_dbufs[] = {
> > +{ { 0, 0, 0, 0 }, 0 },
> > +{ { DBUF_S1_BIT | DBUF_S2_BIT, 0, 0, 0 }, BIT(PIPE_A) },
> > +{ { DBUF_S1_BIT, 0, 0, 0 }, BIT(PIPE_A) },
> > +{ { 0, DBUF_S1_BIT | DBUF_S2_BIT, 0, 0 }, BIT(PIPE_B) },
> > +{ { 0, DBUF_S1_BIT, 0, 0 }, BIT(PIPE_B) },
> > +{ { 0, 0, DBUF_S1_BIT | DBUF_S2_BIT }, BIT(PIPE_C) },
> > +{ { 0, 0, DBUF_S2_BIT, 0 }, BIT(PIPE_C) },
> > +{ { DBUF_S1_BIT, DBUF_S2_BIT, 0, 0 }, BIT(PIPE_A) | BIT(PIPE_B) },
> > +{ { DBUF_S1_BIT, 0, DBUF_S2_BIT, 0 }, BIT(PIPE_A) | BIT(PIPE_C) },
> > +{ { 0, DBUF_S1_BIT, DBUF_S2_BIT, 0 }, BIT(PIPE_B) | BIT(PIPE_C) },
> > +{ { DBUF_S1_BIT, DBUF_S1_BIT, DBUF_S2_BIT, 0 }, BIT(PIPE_A) |
> > BIT(PIPE_B) | BIT(PIPE_C) },
> 
> You might want to align some of the columns here to make it slightly
> easier to read.  And even though the bspec puts the pipes in the
> final
> column, I think it would be more natural for readability to move that
> first and/or put it on a line by itself.  You've already got a line
> here
> that exceeds 80 characters by a bit and once you add a TGL table with
> four pipes you're going to have even longer lines.
> 
> 
> > +};
> > +
> > +/*
> > + * This function finds an entry with same enabled pipe
> > configuration and
> > + * returns correspondent DBuf slice mask as stated in BSpec for
> > particular
> > + * platform.
> > + */
> > +static u32 icl_get_allowed_dbuf_mask(int pipe,
> > +				     u32 active_pipes,
> > +				     uint_fixed_16_16_t pipe_ratio)
> > +{
> > +	int i;
> > +	for (i = 0; i < ARRAY_SIZE(icl_allowed_dbufs); i++) {
> > +		if (icl_allowed_dbufs[i].active_pipes == active_pipes)
> > {
> > +			/*
> > +			 * According to BSpec 12716: if pipe_ratio >=
> > 88.8
> > +			 * use single pipe, even if two are accessible.
> > +			 */
> > +			if (pipe_ratio.val >= div_fixed16(888, 10).val)
> > +				++i;
> > +			return icl_allowed_dbufs[i].dbuf_mask[pipe];
> > +		}
> > +	}
> > +	return 0;
> > +}
> > +
> > +u32 i915_get_allowed_dbuf_mask(struct drm_i915_private *dev_priv,
> > +				      int pipe, u32 active_pipes,
> > +				      uint_fixed_16_16_t pipe_ratio)
> > +{
> > +	if (IS_GEN(dev_priv, 11))
> > +		return icl_get_allowed_dbuf_mask(pipe,
> > +						 active_pipes,
> > +						 pipe_ratio);
> > +	/*
> > +	 * For anything else just return one slice yet.
> > +	 * Should be extended for other platforms.
> > +	 */
> 
> Note that you've already adjusted the DDB size for TGL in
> intel_get_ddb_size(), so we probably want to add TGL's table at the
> same
> time as the gen11 table.
> 
> 
> Matt
> 
> > +	return DBUF_S1_BIT;
> > +}
> > +
> >  static u64
> >  icl_get_total_relative_data_rate(struct intel_crtc_state
> > *crtc_state,
> >  				 u64 *plane_data_rate)
> > -- 
> > 2.17.1
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-10-17  8:05 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-09  7:39 [PATCH v2] drm/i915: Enable second dbuf slice for ICL Stanislav Lisovskiy
2019-10-09 10:03 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2019-10-09 10:03 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-10-09 10:27 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-09 14:07 ` ✓ Fi.CI.IGT: " Patchwork
2019-10-16 23:41 ` [PATCH v2] " Matt Roper
2019-10-17  8:05   ` Lisovskiy, Stanislav

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.