All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL
@ 2020-01-24  8:44 Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 1/7] drm/i915: Remove skl_ddl_allocation struct Stanislav Lisovskiy
                   ` (10 more replies)
  0 siblings, 11 replies; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Those patch series, do some initial preparation DBuf manipulating code
cleanups, i.e remove redundant structures/code, switch to mask
based DBuf manupulation, get into use DBuf assignment according to
BSpec rules.

Stanislav Lisovskiy (7):
  drm/i915: Remove skl_ddl_allocation struct
  drm/i915: Move dbuf slice update to proper place
  drm/i915: Introduce parameterized DBUF_CTL
  drm/i915: Manipulate DBuf slices properly
  drm/i915: Correctly map DBUF slices to pipes
  drm/i915: Protect intel_dbuf_slices_update with mutex
  drm/i915: Update dbuf slices only with full modeset

 drivers/gpu/drm/i915/display/intel_display.c  |  54 ++-
 .../drm/i915/display/intel_display_power.c    |  95 ++--
 .../drm/i915/display/intel_display_power.h    |   5 +
 .../drm/i915/display/intel_display_types.h    |   3 +
 drivers/gpu/drm/i915/i915_drv.h               |   7 +-
 drivers/gpu/drm/i915/i915_pci.c               |   5 +-
 drivers/gpu/drm/i915/i915_reg.h               |   7 +-
 drivers/gpu/drm/i915/intel_device_info.h      |   1 +
 drivers/gpu/drm/i915/intel_pm.c               | 449 +++++++++++++++---
 drivers/gpu/drm/i915/intel_pm.h               |   5 +-
 10 files changed, 480 insertions(+), 151 deletions(-)

-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 1/7] drm/i915: Remove skl_ddl_allocation struct
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 2/7] drm/i915: Move dbuf slice update to proper place Stanislav Lisovskiy
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Current consensus that it is redundant as
we already have skl_ddb_values struct out there,
also this struct contains only single member
which makes it unnecessary.

v2: As dirty_pipes soon going to be nuked away
    from skl_ddb_values, evacuating enabled_slices
    to safer in dev_priv.

v3: Changed "enabled_slices" to be "enabled_dbuf_slices_num"
    (Matt Roper)

v4: - Wrapped the line getting number of dbuf slices(Matt Roper)
    - Removed indeed redundant skl_ddb_values declaration(Matt Roper)

Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display.c  | 16 +++----
 .../drm/i915/display/intel_display_power.c    |  8 ++--
 .../drm/i915/display/intel_display_types.h    |  3 ++
 drivers/gpu/drm/i915/i915_drv.h               |  7 +--
 drivers/gpu/drm/i915/intel_pm.c               | 45 +++++++++----------
 drivers/gpu/drm/i915/intel_pm.h               |  5 +--
 6 files changed, 39 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 878d331b9e8c..ae0e932962ee 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -13745,14 +13745,13 @@ static void verify_wm_state(struct intel_crtc *crtc,
 	struct skl_hw_state {
 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
-		struct skl_ddb_allocation ddb;
 		struct skl_pipe_wm wm;
 	} *hw;
-	struct skl_ddb_allocation *sw_ddb;
 	struct skl_pipe_wm *sw_wm;
 	struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
 	const enum pipe pipe = crtc->pipe;
 	int plane, level, max_level = ilk_wm_max_level(dev_priv);
+	u8 hw_enabled_slices;
 
 	if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->hw.active)
 		return;
@@ -13766,14 +13765,13 @@ static void verify_wm_state(struct intel_crtc *crtc,
 
 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
 
-	skl_ddb_get_hw_state(dev_priv, &hw->ddb);
-	sw_ddb = &dev_priv->wm.skl_hw.ddb;
+	hw_enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 11 &&
-	    hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+	    hw_enabled_slices != dev_priv->enabled_dbuf_slices_num)
 		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
-			  sw_ddb->enabled_slices,
-			  hw->ddb.enabled_slices);
+			  dev_priv->enabled_dbuf_slices_num,
+			  hw_enabled_slices);
 
 	/* planes */
 	for_each_universal_plane(dev_priv, pipe, plane) {
@@ -15101,8 +15099,8 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-	u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-	u8 required_slices = state->wm_results.ddb.enabled_slices;
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
+	u8 required_slices = state->enabled_dbuf_slices_num;
 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
 	const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
 	u8 update_pipes = 0, modeset_pipes = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 21561acfa3ac..5e1c601f0f99 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4406,7 +4406,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 			    u8 req_slices)
 {
-	const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+	const u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
 	bool ret;
 
 	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -4423,7 +4423,7 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
 
 	if (ret)
-		dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+		dev_priv->enabled_dbuf_slices_num = req_slices;
 }
 
 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
@@ -4442,7 +4442,7 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 		 * FIXME: for now pretend that we only have 1 slice, see
 		 * intel_enabled_dbuf_slices_num().
 		 */
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+		dev_priv->enabled_dbuf_slices_num = 1;
 }
 
 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -4461,7 +4461,7 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 		 * FIXME: for now pretend that the first slice is always
 		 * enabled, see intel_enabled_dbuf_slices_num().
 		 */
-		dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+		dev_priv->enabled_dbuf_slices_num = 1;
 }
 
 static void icl_mbus_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 33ba93863488..793c5271c5ad 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -521,6 +521,9 @@ struct intel_atomic_state {
 	/* Gen9+ only */
 	struct skl_ddb_values wm_results;
 
+	/* Number of enabled DBuf slices */
+	u8 enabled_dbuf_slices_num;
+
 	struct i915_sw_fence commit_ready;
 
 	struct llist_node freed;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a8a08c63278e..3d888527897b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -797,13 +797,8 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
 	return false;
 }
 
-struct skl_ddb_allocation {
-	u8 enabled_slices; /* GEN11 has configurable 2 slices */
-};
-
 struct skl_ddb_values {
 	unsigned dirty_pipes;
-	struct skl_ddb_allocation ddb;
 };
 
 struct skl_wm_level {
@@ -1211,6 +1206,8 @@ struct drm_i915_private {
 		bool distrust_bios_wm;
 	} wm;
 
+	u8 enabled_dbuf_slices_num; /* GEN11 has configurable 2 slices */
+
 	struct dram_info {
 		bool valid;
 		bool is_16gb_dimm;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 36d158d6c5b2..04f94057d6b3 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3644,16 +3644,16 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
 {
-	u8 enabled_slices;
+	u8 enabled_dbuf_slices_num;
 
 	/* Slice 1 will always be enabled */
-	enabled_slices = 1;
+	enabled_dbuf_slices_num = 1;
 
 	/* Gen prior to GEN11 have only one DBuf slice */
 	if (INTEL_GEN(dev_priv) < 11)
-		return enabled_slices;
+		return enabled_dbuf_slices_num;
 
 	/*
 	 * FIXME: for now we'll only ever use 1 slice; pretend that we have
@@ -3661,9 +3661,9 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
 	 * toggling of the second slice.
 	 */
 	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
-		enabled_slices++;
+		enabled_dbuf_slices_num++;
 
-	return enabled_slices;
+	return enabled_dbuf_slices_num;
 }
 
 /*
@@ -3867,9 +3867,10 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
 static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 			      const struct intel_crtc_state *crtc_state,
 			      const u64 total_data_rate,
-			      const int num_active,
-			      struct skl_ddb_allocation *ddb)
+			      const int num_active)
 {
+	struct drm_atomic_state *state = crtc_state->uapi.state;
+	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	const struct drm_display_mode *adjusted_mode;
 	u64 total_data_bw;
 	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
@@ -3891,9 +3892,9 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	 * - should validate we stay within the hw bandwidth limits
 	 */
 	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
-		ddb->enabled_slices = 2;
+		intel_state->enabled_dbuf_slices_num = 2;
 	} else {
-		ddb->enabled_slices = 1;
+		intel_state->enabled_dbuf_slices_num = 1;
 		ddb_size /= 2;
 	}
 
@@ -3904,7 +3905,6 @@ static void
 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   const struct intel_crtc_state *crtc_state,
 				   const u64 total_data_rate,
-				   struct skl_ddb_allocation *ddb,
 				   struct skl_ddb_entry *alloc, /* out */
 				   int *num_active /* out */)
 {
@@ -3930,7 +3930,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		*num_active = hweight8(dev_priv->active_pipes);
 
 	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
-				      *num_active, ddb);
+				      *num_active);
 
 	/*
 	 * If the state doesn't change the active CRTC's or there is no
@@ -4091,10 +4091,10 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 	intel_display_power_put(dev_priv, power_domain, wakeref);
 }
 
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
-			  struct skl_ddb_allocation *ddb /* out */)
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
 {
-	ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+	dev_priv->enabled_dbuf_slices_num =
+				intel_enabled_dbuf_slices_num(dev_priv);
 }
 
 /*
@@ -4271,8 +4271,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 }
 
 static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
-		      struct skl_ddb_allocation *ddb /* out */)
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
 {
 	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct drm_crtc *crtc = crtc_state->uapi.crtc;
@@ -4314,7 +4313,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 
 
 	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
-					   ddb, alloc, &num_active);
+					   alloc, &num_active);
 	alloc_size = skl_ddb_entry_size(alloc);
 	if (alloc_size == 0)
 		return 0;
@@ -5233,18 +5232,17 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
 static int
 skl_compute_ddb(struct intel_atomic_state *state)
 {
-	const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc_state *old_crtc_state;
 	struct intel_crtc_state *new_crtc_state;
 	struct intel_crtc *crtc;
 	int ret, i;
 
-	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+	state->enabled_dbuf_slices_num = dev_priv->enabled_dbuf_slices_num;
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
-		ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
+		ret = skl_allocate_pipe_ddb(new_crtc_state);
 		if (ret)
 			return ret;
 
@@ -5721,11 +5719,10 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
 {
 	struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
-	struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *crtc_state;
 
-	skl_ddb_get_hw_state(dev_priv, ddb);
+	skl_ddb_get_hw_state(dev_priv);
 	for_each_intel_crtc(&dev_priv->drm, crtc) {
 		crtc_state = to_intel_crtc_state(crtc->base.state);
 
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c06c6a846d9a..22fd2daf608e 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -17,7 +17,6 @@ struct intel_atomic_state;
 struct intel_crtc;
 struct intel_crtc_state;
 struct intel_plane;
-struct skl_ddb_allocation;
 struct skl_ddb_entry;
 struct skl_pipe_wm;
 struct skl_wm_level;
@@ -33,11 +32,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv);
 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 			       struct skl_ddb_entry *ddb_y,
 			       struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
-			  struct skl_ddb_allocation *ddb /* out */);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
 void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
 			      struct skl_pipe_wm *out);
 void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 2/7] drm/i915: Move dbuf slice update to proper place
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 1/7] drm/i915: Remove skl_ddl_allocation struct Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL Stanislav Lisovskiy
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Current DBuf slices update wasn't done in proper
place, especially its "post" part, which should
disable those only once vblank had passed and
all other changes are committed.

v2: Fix to use dev_priv and intel_atomic_state
    instead of skl_ddb_values
    (to be nuked in Villes patch)

v3: Renamed "enabled_slices" to "enabled_dbuf_slices_num"
    (Matt Roper)

v4: - Rebase against drm-tip.
    - Move post_update closer to optimize_watermarks,
      to prevent unneeded noise from underrun reporting
      (Ville Syrjälä)

Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display.c | 37 +++++++++++++++-----
 1 file changed, 28 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index ae0e932962ee..66016c641cdd 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -15094,13 +15094,33 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
 				       state);
 }
 
+static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
+	u8 required_slices = state->enabled_dbuf_slices_num;
+
+	/* If 2nd DBuf slice required, enable it here */
+	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
+static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
+{
+	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
+	u8 required_slices = state->enabled_dbuf_slices_num;
+
+	/* If 2nd DBuf slice is no more required disable it */
+	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 	struct intel_crtc *crtc;
 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
-	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
-	u8 required_slices = state->enabled_dbuf_slices_num;
 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
 	const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
 	u8 update_pipes = 0, modeset_pipes = 0;
@@ -15121,10 +15141,6 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 		}
 	}
 
-	/* If 2nd DBuf slice required, enable it here */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
-
 	/*
 	 * Whenever the number of active pipes changes, we need to make sure we
 	 * update the pipes in the right order so that their ddb allocations
@@ -15226,9 +15242,6 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
 
 	WARN_ON(modeset_pipes);
 
-	/* If 2nd DBuf slice is no more required disable it */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
 }
 
 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15358,6 +15371,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	if (state->modeset)
 		intel_encoders_update_prepare(state);
 
+	/* Enable all new slices, we might need */
+	icl_dbuf_slice_pre_update(state);
+
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	dev_priv->display.commit_modeset_enables(state);
 
@@ -15414,6 +15430,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 			dev_priv->display.optimize_watermarks(state, crtc);
 	}
 
+	/* Disable all slices, we don't need */
+	icl_dbuf_slice_post_update(state);
+
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		intel_post_plane_update(state, crtc);
 
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 1/7] drm/i915: Remove skl_ddl_allocation struct Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 2/7] drm/i915: Move dbuf slice update to proper place Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-28 17:35   ` Ville Syrjälä
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 4/7] drm/i915: Manipulate DBuf slices properly Stanislav Lisovskiy
                   ` (7 subsequent siblings)
  10 siblings, 1 reply; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Now start using parameterized DBUF_CTL instead
of hardcoded, this would allow shorter access
functions when reading or storing entire state.

Tried to implement it in a MMIO_PIPE manner, however
DBUF_CTL1 address is higher than DBUF_CTL2, which
implies that we have to now subtract from base
rather than add.

v2: - Removed unneeded DBUF_CTL_DIST and DBUF_CTL_ADDR
      macros. Started to use _PICK construct as suggested
      by Matt Roper.

v3: - DBUF_CTL_S* to _DBUF_CTL_S*, changed X to "slice"
      in macro(Ville Syrjälä)
    - Introduced enum for enumerating DBUF slices(Ville Syrjälä)

Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 .../drm/i915/display/intel_display_power.c    | 30 +++++++++++--------
 .../drm/i915/display/intel_display_power.h    |  5 ++++
 drivers/gpu/drm/i915/i915_reg.h               |  7 +++--
 drivers/gpu/drm/i915/intel_pm.c               |  2 +-
 4 files changed, 28 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 5e1c601f0f99..a59efb24be92 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4418,9 +4418,11 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 		return;
 
 	if (req_slices > hw_enabled_slices)
-		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+		ret = intel_dbuf_slice_set(dev_priv,
+					   _DBUF_CTL_S(DBUF_S2), true);
 	else
-		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+		ret = intel_dbuf_slice_set(dev_priv,
+					   _DBUF_CTL_S(DBUF_S2), false);
 
 	if (ret)
 		dev_priv->enabled_dbuf_slices_num = req_slices;
@@ -4428,14 +4430,16 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 
 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
-	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
-	POSTING_READ(DBUF_CTL_S2);
+	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
+		   I915_READ(_DBUF_CTL_S(DBUF_S1)) | DBUF_POWER_REQUEST);
+	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
+		   I915_READ(_DBUF_CTL_S(DBUF_S2)) | DBUF_POWER_REQUEST);
+	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
 
 	udelay(10);
 
-	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
-	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+	if (!(I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
+	    !(I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
 		DRM_ERROR("DBuf power enable timeout\n");
 	else
 		/*
@@ -4447,14 +4451,16 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 
 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
-	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
-	POSTING_READ(DBUF_CTL_S2);
+	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
+		   I915_READ(_DBUF_CTL_S(DBUF_S1)) & ~DBUF_POWER_REQUEST);
+	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
+		   I915_READ(_DBUF_CTL_S(DBUF_S2)) & ~DBUF_POWER_REQUEST);
+	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
 
 	udelay(10);
 
-	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
-	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
+	if ((I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
+	    (I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
 		DRM_ERROR("DBuf power disable timeout!\n");
 	else
 		/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2608a65af7fa..601e000ffd0d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -307,6 +307,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
 }
 #endif
 
+enum dbuf_slice {
+	DBUF_S1,
+	DBUF_S2,
+};
+
 #define with_intel_display_power(i915, domain, wf) \
 	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
 	     intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b93c4c18f05c..625be54d3eae 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7748,9 +7748,10 @@ enum {
 #define DISP_ARB_CTL2	_MMIO(0x45004)
 #define  DISP_DATA_PARTITION_5_6	(1 << 6)
 #define  DISP_IPC_ENABLE		(1 << 3)
-#define DBUF_CTL	_MMIO(0x45008)
-#define DBUF_CTL_S1	_MMIO(0x45008)
-#define DBUF_CTL_S2	_MMIO(0x44FE8)
+#define DBUF_CTL_ADDR1			0x45008
+#define DBUF_CTL_ADDR2			0x44FE8
+#define _DBUF_CTL_S(X)			_MMIO(_PICK_EVEN(X, DBUF_CTL_ADDR1, DBUF_CTL_ADDR2))
+#define DBUF_CTL			_DBUF_CTL_S(0)
 #define  DBUF_POWER_REQUEST		(1 << 31)
 #define  DBUF_POWER_STATE		(1 << 30)
 #define GEN7_MSG_CTL	_MMIO(0x45010)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 04f94057d6b3..b8d78e26515c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3660,7 +3660,7 @@ u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
 	 * only that 1 slice enabled until we have a proper way for on-demand
 	 * toggling of the second slice.
 	 */
-	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+	if (0 && I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
 		enabled_dbuf_slices_num++;
 
 	return enabled_dbuf_slices_num;
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 4/7] drm/i915: Manipulate DBuf slices properly
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (2 preceding siblings ...)
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes Stanislav Lisovskiy
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Start manipulating DBuf slices as a mask,
but not as a total number, as current approach
doesn't give us full control on all combinations
of slices, which we might need(like enabling S2
only can't enabled by setting enabled_slices=1).

Removed wrong code from intel_get_ddb_size as
it doesn't match to BSpec. For now still just
use DBuf slice until proper algorithm is implemented.

Other minor code refactoring to get prepared
for major DBuf assignment changes landed:
- As now enabled slices contain a mask
  we still need some value which should
  reflect how much DBuf slices are supported
  by the platform, now device info contains
  num_supported_dbuf_slices.
- Removed unneeded assertion as we are now
  manipulating slices in a more proper way.

v2: Start using enabled_slices in dev_priv

v3: "enabled_slices" is now "enabled_dbuf_slices_mask",
    as this now sits in dev_priv independently.

v4: - Fixed debug print formatting to hex(Matt Roper)
    - Optimized dbuf slice updates to be used only
      if slice union is different from current conf(Matt Roper)
    - Fixed some functions to be static(Matt Roper)
    - Created a parameterized version for DBUF_CTL to
      simplify DBuf programming cycle(Matt Roper)
    - Removed unrequred field from GEN10_FEATURES(Matt Roper)

v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä)
    - Started to use parameterized loop for hw readout to get slices
      (Ville Syrjälä)
    - Added back assertion checking amount of DBUF slices enabled
      after DC states 5/6 transition, also added new assertion
      as starting from ICL DMC seems to restore the last DBuf
      power state set, rather than power up all dbuf slices
      as assertion was previously expecting(Ville Syrjälä)

v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä)
    - Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled
      back, as we really need to have a single unified assert here
      however currently enabling always slice 1 is enforced by BSpec,
      so we will have to OR enabled slices mask with 1 in order
      to be consistent with BSpec, that way we can unify that
      assertion and against the actual state from the driver, but
      not some hardcoded value.(concluded with Ville)
    - Remove parameterized DBUF_CTL version, to extract it to another
      patch.(Ville Syrjälä)
v7:
    - Removed unneeded hardcoded return value for older gens from
      intel_enabled_dbuf_slices_mask - this now is handled in a
      unified manner since device info anyway returns max dbuf slices
      as 1 for older platforms(Matthew Roper)
    - Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead
      of intel_dbuf_max_slices function as it is trivial(Matthew Roper)

v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä)

Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display.c  | 23 ++---
 .../drm/i915/display/intel_display_power.c    | 89 ++++++-------------
 .../drm/i915/display/intel_display_types.h    |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  2 +-
 drivers/gpu/drm/i915/i915_pci.c               |  5 +-
 drivers/gpu/drm/i915/intel_device_info.h      |  1 +
 drivers/gpu/drm/i915/intel_pm.c               | 53 +++--------
 drivers/gpu/drm/i915/intel_pm.h               |  2 +-
 8 files changed, 60 insertions(+), 117 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 66016c641cdd..1c957df5c28c 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -13765,12 +13765,12 @@ static void verify_wm_state(struct intel_crtc *crtc,
 
 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
 
-	hw_enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 11 &&
-	    hw_enabled_slices != dev_priv->enabled_dbuf_slices_num)
-		DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
-			  dev_priv->enabled_dbuf_slices_num,
+	    hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+		DRM_ERROR("mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+			  dev_priv->enabled_dbuf_slices_mask,
 			  hw_enabled_slices);
 
 	/* planes */
@@ -15097,22 +15097,23 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
 static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
-	u8 required_slices = state->enabled_dbuf_slices_num;
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+	u8 required_slices = state->enabled_dbuf_slices_mask;
+	u8 slices_union = hw_enabled_slices | required_slices;
 
 	/* If 2nd DBuf slice required, enable it here */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
-		icl_dbuf_slices_update(dev_priv, required_slices);
+	if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
+		icl_dbuf_slices_update(dev_priv, slices_union);
 }
 
 static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
 {
 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
-	u8 required_slices = state->enabled_dbuf_slices_num;
+	u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+	u8 required_slices = state->enabled_dbuf_slices_mask;
 
 	/* If 2nd DBuf slice is no more required disable it */
-	if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
+	if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
 		icl_dbuf_slices_update(dev_priv, required_slices);
 }
 
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index a59efb24be92..96b38252578b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -15,6 +15,7 @@
 #include "intel_display_types.h"
 #include "intel_dpio_phy.h"
 #include "intel_hotplug.h"
+#include "intel_pm.h"
 #include "intel_sideband.h"
 #include "intel_tc.h"
 #include "intel_vga.h"
@@ -1028,11 +1029,13 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
 
 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
 {
-	u32 tmp = I915_READ(DBUF_CTL);
+	u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+	u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
 
-	WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
-	     (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
-	     "Unexpected DBuf power power state (0x%08x)\n", tmp);
+	WARN(hw_enabled_dbuf_slices != enabled_dbuf_slices,
+	     "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+	     hw_enabled_dbuf_slices,
+	     enabled_dbuf_slices);
 }
 
 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
@@ -4388,86 +4391,46 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
 
 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
 {
-	intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+	icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
 }
 
 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
 {
-	intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
-	if (INTEL_GEN(dev_priv) < 11)
-		return 1;
-	return 2;
+	icl_dbuf_slices_update(dev_priv, 0);
 }
 
 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 			    u8 req_slices)
 {
-	const u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
-	bool ret;
+	int i;
+	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
 
-	if (req_slices > intel_dbuf_max_slices(dev_priv)) {
-		DRM_ERROR("Invalid number of dbuf slices requested\n");
-		return;
-	}
+	WARN(hweight8(req_slices) > max_slices,
+	     "Invalid number of dbuf slices requested\n");
 
-	if (req_slices == hw_enabled_slices || req_slices == 0)
-		return;
+	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
 
-	if (req_slices > hw_enabled_slices)
-		ret = intel_dbuf_slice_set(dev_priv,
-					   _DBUF_CTL_S(DBUF_S2), true);
-	else
-		ret = intel_dbuf_slice_set(dev_priv,
-					   _DBUF_CTL_S(DBUF_S2), false);
+	for (i = 0; i < max_slices; i++) {
+		intel_dbuf_slice_set(dev_priv,
+				     _DBUF_CTL_S(i),
+				     (req_slices & BIT(i)) != 0);
+	}
 
-	if (ret)
-		dev_priv->enabled_dbuf_slices_num = req_slices;
+	dev_priv->enabled_dbuf_slices_mask = req_slices;
 }
 
 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
-		   I915_READ(_DBUF_CTL_S(DBUF_S1)) | DBUF_POWER_REQUEST);
-	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
-		   I915_READ(_DBUF_CTL_S(DBUF_S2)) | DBUF_POWER_REQUEST);
-	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
-
-	udelay(10);
-
-	if (!(I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
-	    !(I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
-		DRM_ERROR("DBuf power enable timeout\n");
-	else
-		/*
-		 * FIXME: for now pretend that we only have 1 slice, see
-		 * intel_enabled_dbuf_slices_num().
-		 */
-		dev_priv->enabled_dbuf_slices_num = 1;
+	/*
+	 * Just power up 1 slice, we will
+	 * figure out later which slices we have and what we need.
+	 */
+	icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
 }
 
 static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
 {
-	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
-		   I915_READ(_DBUF_CTL_S(DBUF_S1)) & ~DBUF_POWER_REQUEST);
-	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
-		   I915_READ(_DBUF_CTL_S(DBUF_S2)) & ~DBUF_POWER_REQUEST);
-	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
-
-	udelay(10);
-
-	if ((I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
-	    (I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
-		DRM_ERROR("DBuf power disable timeout!\n");
-	else
-		/*
-		 * FIXME: for now pretend that the first slice is always
-		 * enabled, see intel_enabled_dbuf_slices_num().
-		 */
-		dev_priv->enabled_dbuf_slices_num = 1;
+	icl_dbuf_slices_update(dev_priv, 0);
 }
 
 static void icl_mbus_init(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 793c5271c5ad..297d33a69b65 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -522,7 +522,7 @@ struct intel_atomic_state {
 	struct skl_ddb_values wm_results;
 
 	/* Number of enabled DBuf slices */
-	u8 enabled_dbuf_slices_num;
+	u8 enabled_dbuf_slices_mask;
 
 	struct i915_sw_fence commit_ready;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3d888527897b..2678ac84f107 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1206,7 +1206,7 @@ struct drm_i915_private {
 		bool distrust_bios_wm;
 	} wm;
 
-	u8 enabled_dbuf_slices_num; /* GEN11 has configurable 2 slices */
+	u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
 
 	struct dram_info {
 		bool valid;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6fbec2e7068d..c91aaccdb3d6 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -615,7 +615,8 @@ static const struct intel_device_info chv_info = {
 	.has_gt_uc = 1, \
 	.display.has_hdcp = 1, \
 	.display.has_ipc = 1, \
-	.ddb_size = 896
+	.ddb_size = 896, \
+	.num_supported_dbuf_slices = 1
 
 #define SKL_PLATFORM \
 	GEN9_FEATURES, \
@@ -650,6 +651,7 @@ static const struct intel_device_info skl_gt4_info = {
 #define GEN9_LP_FEATURES \
 	GEN(9), \
 	.is_lp = 1, \
+	.num_supported_dbuf_slices = 1, \
 	.display.has_hotplug = 1, \
 	.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
 	.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
@@ -774,6 +776,7 @@ static const struct intel_device_info cnl_info = {
 	}, \
 	GEN(11), \
 	.ddb_size = 2048, \
+	.num_supported_dbuf_slices = 2, \
 	.has_logical_ring_elsq = 1, \
 	.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
 
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2725cb7fc169..7d4d122d2182 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -180,6 +180,7 @@ struct intel_device_info {
 	} display;
 
 	u16 ddb_size; /* in blocks */
+	u8 num_supported_dbuf_slices; /* number of DBuf slices */
 
 	/* Register offsets for the various display pipes and transcoders */
 	int pipe_offsets[I915_MAX_TRANSCODERS];
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index b8d78e26515c..ca5b34d297d9 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3644,26 +3644,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
 	return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
 }
 
-u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
 {
-	u8 enabled_dbuf_slices_num;
-
-	/* Slice 1 will always be enabled */
-	enabled_dbuf_slices_num = 1;
-
-	/* Gen prior to GEN11 have only one DBuf slice */
-	if (INTEL_GEN(dev_priv) < 11)
-		return enabled_dbuf_slices_num;
+	int i;
+	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+	u8 enabled_slices_mask = 0;
 
-	/*
-	 * FIXME: for now we'll only ever use 1 slice; pretend that we have
-	 * only that 1 slice enabled until we have a proper way for on-demand
-	 * toggling of the second slice.
-	 */
-	if (0 && I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
-		enabled_dbuf_slices_num++;
+	for (i = 0; i < max_slices; i++) {
+		if (I915_READ(_DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+			enabled_slices_mask |= BIT(i);
+	}
 
-	return enabled_dbuf_slices_num;
+	return enabled_slices_mask;
 }
 
 /*
@@ -3871,8 +3863,6 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 {
 	struct drm_atomic_state *state = crtc_state->uapi.state;
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-	const struct drm_display_mode *adjusted_mode;
-	u64 total_data_bw;
 	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
 
 	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
@@ -3880,23 +3870,8 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) < 11)
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	adjusted_mode = &crtc_state->hw.adjusted_mode;
-	total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
-
-	/*
-	 * 12GB/s is maximum BW supported by single DBuf slice.
-	 *
-	 * FIXME dbuf slice code is broken:
-	 * - must wait for planes to stop using the slice before powering it off
-	 * - plane straddling both slices is illegal in multi-pipe scenarios
-	 * - should validate we stay within the hw bandwidth limits
-	 */
-	if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
-		intel_state->enabled_dbuf_slices_num = 2;
-	} else {
-		intel_state->enabled_dbuf_slices_num = 1;
-		ddb_size /= 2;
-	}
+	intel_state->enabled_dbuf_slices_mask = BIT(DBUF_S1);
+	ddb_size /= 2;
 
 	return ddb_size;
 }
@@ -4093,8 +4068,8 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
 {
-	dev_priv->enabled_dbuf_slices_num =
-				intel_enabled_dbuf_slices_num(dev_priv);
+	dev_priv->enabled_dbuf_slices_mask =
+				intel_enabled_dbuf_slices_mask(dev_priv);
 }
 
 /*
@@ -5238,7 +5213,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
 	struct intel_crtc *crtc;
 	int ret, i;
 
-	state->enabled_dbuf_slices_num = dev_priv->enabled_dbuf_slices_num;
+	state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
 					    new_crtc_state, i) {
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 22fd2daf608e..d60a85421c5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -32,7 +32,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
 void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
 void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
 			       struct skl_ddb_entry *ddb_y,
 			       struct skl_ddb_entry *ddb_uv);
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (3 preceding siblings ...)
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 4/7] drm/i915: Manipulate DBuf slices properly Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-28 23:15   ` Matt Roper
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex Stanislav Lisovskiy
                   ` (5 subsequent siblings)
  10 siblings, 1 reply; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Added proper DBuf slice mapping to correspondent
pipes, depending on pipe configuration as stated
in BSpec.

v2:
    - Remove unneeded braces
    - Stop using macro for DBuf assignments as
      it seems to reduce readability.

v3: Start using enabled slices mask in dev_priv

v4: Renamed "enabled_slices" used in dev_priv
    to "enabled_dbuf_slices_mask"(Matt Roper)

v5: - Removed redundant parameters from
      intel_get_ddb_size function.(Matt Roper)
    - Made i915_possible_dbuf_slices static(Matt Roper)
    - Renamed total_width into total_width_in_range
      so that it now reflects that this is not
      a total pipe width but the one in current
      dbuf slice allowed range for pipe.(Matt Roper)
    - Removed 4th pipe for ICL in DBuf assignment
      table(Matt Roper)
    - Fixed wrong DBuf slice in DBuf table for TGL
      (Matt Roper)
    - Added comment regarding why we currently not
      using pipe ratio for DBuf assignment for ICL

v6: - Changed u32 to unsigned int in
      icl_get_first_dbuf_slice_offset function signature
      (Ville Syrjälä)
    - Changed also u32 to u8 in dbuf slice mask structure
      (Ville Syrjälä)
    - Switched from DBUF_S1_BIT to enum + explicit
      BIT(DBUF_S1) access(Ville Syrjälä)
    - Switched to named initializers in DBuf assignment
      arrays(Ville Syrjälä)
    - DBuf assignment arrays now use autogeneration tool
      from
      https://patchwork.freedesktop.org/series/70493/
      to avoid typos.
    - Renamed i915_find_pipe_conf to *_compute_dbuf_slices
      (Ville Syrjälä)
    - Changed platforms ordering in skl_compute_dbuf_slices
      to be from newest to oldest(Ville Syrjälä)

v7: - Now ORing assigned DBuf slice config always with DBUF_S1
      because slice 1 has to be constantly powered on.
      (Ville Syrjälä)

v8: - Added pipe_name for neater printing(Ville Syrjälä)
    - Renamed width_before_pipe to width_before_pipe_in_range,
      to better reflect that now all the calculations are happening
      inside DBuf range allowed by current pipe configuration mask
      (Ville Syrjälä)
    - Shortened FIXME comment message, regarding constant ORing with
      DBUF_S1(Ville Syrjälä)
    - Added .dbuf_mask named initializer to pipe assignment array
      (Ville Syrjälä)
    - Edited pipe assignment array to use only single DBuf slice
      for gen11 single pipe configurations, until "pipe ratio"
      thing is finally sorted out(Ville Syrjälä)
    - Removed unused parameter crtc_state for now(Ville Syrjälä)
      from icl/tgl_compute_dbuf_slices function

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/intel_pm.c | 385 ++++++++++++++++++++++++++++++--
 1 file changed, 366 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ca5b34d297d9..92c4d4624092 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3856,13 +3856,29 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
 	return true;
 }
 
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
-			      const struct intel_crtc_state *crtc_state,
-			      const u64 total_data_rate,
-			      const int num_active)
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static unsigned int
+icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+				u32 slice_size,
+				u32 ddb_size)
+{
+	unsigned int offset = 0;
+
+	if (!dbuf_slice_mask)
+		return 0;
+
+	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+
+	WARN_ON(offset >= ddb_size);
+	return offset;
+}
+
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
 {
-	struct drm_atomic_state *state = crtc_state->uapi.state;
-	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
 
 	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
@@ -3870,12 +3886,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) < 11)
 		return ddb_size - 4; /* 4 blocks for bypass path allocation */
 
-	intel_state->enabled_dbuf_slices_mask = BIT(DBUF_S1);
-	ddb_size /= 2;
-
 	return ddb_size;
 }
 
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+				  u32 active_pipes);
+
 static void
 skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 				   const struct intel_crtc_state *crtc_state,
@@ -3887,10 +3903,17 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
 	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
 	const struct intel_crtc *crtc;
-	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
 	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
 	u16 ddb_size;
+	u32 ddb_range_size;
 	u32 i;
+	u32 dbuf_slice_mask;
+	u32 active_pipes;
+	u32 offset;
+	u32 slice_size;
+	u32 total_slice_mask;
+	u32 start, end;
 
 	if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
 		alloc->start = 0;
@@ -3900,12 +3923,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 	}
 
 	if (intel_state->active_pipe_changes)
-		*num_active = hweight8(intel_state->active_pipes);
+		active_pipes = intel_state->active_pipes;
 	else
-		*num_active = hweight8(dev_priv->active_pipes);
+		active_pipes = dev_priv->active_pipes;
+
+	*num_active = hweight8(active_pipes);
+
+	ddb_size = intel_get_ddb_size(dev_priv);
 
-	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
-				      *num_active);
+	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
 
 	/*
 	 * If the state doesn't change the active CRTC's or there is no
@@ -3924,31 +3950,96 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
 		return;
 	}
 
+	/*
+	 * Get allowed DBuf slices for correspondent pipe and platform.
+	 */
+	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
+
+	DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
+		      dbuf_slice_mask,
+		      pipe_name(for_pipe), active_pipes);
+
+	/*
+	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+	 * and slice size is 1024, the offset would be 1024
+	 */
+	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+						 slice_size, ddb_size);
+
+	/*
+	 * Figure out total size of allowed DBuf slices, which is basically
+	 * a number of allowed slices for that pipe multiplied by slice size.
+	 * Inside of this
+	 * range ddb entries are still allocated in proportion to display width.
+	 */
+	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
+
 	/*
 	 * Watermark/ddb requirement highly depends upon width of the
 	 * framebuffer, So instead of allocating DDB equally among pipes
 	 * distribute DDB based on resolution/width of the display.
 	 */
+	total_slice_mask = dbuf_slice_mask;
 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
 		const struct drm_display_mode *adjusted_mode =
 			&crtc_state->hw.adjusted_mode;
 		enum pipe pipe = crtc->pipe;
 		int hdisplay, vdisplay;
+		u32 pipe_dbuf_slice_mask;
 
-		if (!crtc_state->hw.enable)
+		if (!crtc_state->hw.active)
+			continue;
+
+		pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
+							       active_pipes);
+
+		/*
+		 * According to BSpec pipe can share one dbuf slice with another
+		 * pipes or pipe can use multiple dbufs, in both cases we
+		 * account for other pipes only if they have exactly same mask.
+		 * However we need to account how many slices we should enable
+		 * in total.
+		 */
+		total_slice_mask |= pipe_dbuf_slice_mask;
+
+		/*
+		 * Do not account pipes using other slice sets
+		 * luckily as of current BSpec slice sets do not partially
+		 * intersect(pipes share either same one slice or same slice set
+		 * i.e no partial intersection), so it is enough to check for
+		 * equality for now.
+		 */
+		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
 			continue;
 
 		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
-		total_width += hdisplay;
+
+		total_width_in_range += hdisplay;
 
 		if (pipe < for_pipe)
-			width_before_pipe += hdisplay;
+			width_before_pipe_in_range += hdisplay;
 		else if (pipe == for_pipe)
 			pipe_width = hdisplay;
 	}
 
-	alloc->start = ddb_size * width_before_pipe / total_width;
-	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+	/*
+	 * FIXME: For now we always enable slice S1 as per
+	 * the Bspec display initialization sequence.
+	 */
+	intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+
+	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
+	end = ddb_range_size *
+		(width_before_pipe_in_range + pipe_width) / total_width_in_range;
+
+	alloc->start = offset + start;
+	alloc->end = offset + end;
+
+	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+		      alloc->start, alloc->end);
+	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
+		      intel_state->enabled_dbuf_slices_mask,
+		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
 }
 
 static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4119,6 +4210,262 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
 	return mul_fixed16(downscale_w, downscale_h);
 }
 
+struct dbuf_slice_conf_entry {
+	u8 active_pipes;
+	u8 dbuf_mask[I915_MAX_PIPES];
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+	{
+		.active_pipes = BIT(PIPE_A),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+	{
+		.active_pipes = BIT(PIPE_A),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S2),
+			[PIPE_B] = BIT(DBUF_S1)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_C] = BIT(DBUF_S1),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+	{
+		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+		.dbuf_mask = {
+			[PIPE_A] = BIT(DBUF_S1),
+			[PIPE_B] = BIT(DBUF_S1),
+			[PIPE_C] = BIT(DBUF_S2),
+			[PIPE_D] = BIT(DBUF_S2)
+		}
+	},
+};
+
+static u8 compute_dbuf_slices(enum pipe pipe,
+			      u32 active_pipes,
+			      const struct dbuf_slice_conf_entry *dbuf_slices,
+			      int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (dbuf_slices[i].active_pipes == active_pipes)
+			return dbuf_slices[i].dbuf_mask[pipe];
+	}
+	return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u32 icl_compute_dbuf_slices(enum pipe pipe,
+				   u32 active_pipes)
+{
+	/*
+	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+	 * required calculating "pipe ratio" in order to determine
+	 * if one or two slices can be used for single pipe configurations
+	 * as additional constraint to the existing table.
+	 * However based on recent info, it should be not "pipe ratio"
+	 * but rather ratio between pixel_rate and cdclk with additional
+	 * constants, so for now we are using only table until this is
+	 * clarified. Also this is the reason why crtc_state param is
+	 * still here - we will need it once those additional constraints
+	 * pop up.
+	 */
+	return compute_dbuf_slices(pipe, active_pipes,
+				   icl_allowed_dbufs,
+				   ARRAY_SIZE(icl_allowed_dbufs));
+}
+
+static u32 tgl_compute_dbuf_slices(enum pipe pipe,
+				   u32 active_pipes)
+{
+	return compute_dbuf_slices(pipe, active_pipes,
+				   tgl_allowed_dbufs,
+				   ARRAY_SIZE(tgl_allowed_dbufs));
+}
+
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+				  u32 active_pipes)
+{
+	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+	enum pipe pipe = crtc->pipe;
+
+	if (IS_GEN(dev_priv, 12))
+		return tgl_compute_dbuf_slices(pipe,
+					       active_pipes);
+	else if (IS_GEN(dev_priv, 11))
+		return icl_compute_dbuf_slices(pipe,
+					       active_pipes);
+	/*
+	 * For anything else just return one slice yet.
+	 * Should be extended for other platforms.
+	 */
+	return BIT(DBUF_S1);
+}
+
 static u64
 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
 			     const struct intel_plane_state *plane_state,
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (4 preceding siblings ...)
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-28 23:33   ` Matt Roper
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset Stanislav Lisovskiy
                   ` (4 subsequent siblings)
  10 siblings, 1 reply; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

Now using power_domain mutex to protect from race condition, which
can occur because intel_dbuf_slices_update might be running in
parallel to gen9_dc_off_power_well_enable being called from
intel_dp_detect for instance, which causes assertion triggered by
race condition, as gen9_assert_dbuf_enabled might preempt this
when registers were already updated, while dev_priv was not.

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display_power.c | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 96b38252578b..99ddc21e004c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -4404,12 +4404,22 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 {
 	int i;
 	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+	struct i915_power_domains *power_domains = &dev_priv->power_domains;
 
 	WARN(hweight8(req_slices) > max_slices,
 	     "Invalid number of dbuf slices requested\n");
 
 	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
 
+	/*
+	 * Might be running this in parallel to gen9_dc_off_power_well_enable
+	 * being called from intel_dp_detect for instance,
+	 * which causes assertion triggered by race condition,
+	 * as gen9_assert_dbuf_enabled might preempt this when registers
+	 * were already updated, while dev_priv was not.
+	 */
+	mutex_lock(&power_domains->lock);
+
 	for (i = 0; i < max_slices; i++) {
 		intel_dbuf_slice_set(dev_priv,
 				     _DBUF_CTL_S(i),
@@ -4417,6 +4427,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
 	}
 
 	dev_priv->enabled_dbuf_slices_mask = req_slices;
+
+	mutex_unlock(&power_domains->lock);
 }
 
 static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (5 preceding siblings ...)
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex Stanislav Lisovskiy
@ 2020-01-24  8:44 ` Stanislav Lisovskiy
  2020-01-28 23:37   ` Matt Roper
  2020-01-24  9:52 ` [Intel-gfx] ✓ Fi.CI.BAT: success for Enable second DBuf slice for ICL and TGL (rev21) Patchwork
                   ` (3 subsequent siblings)
  10 siblings, 1 reply; 25+ messages in thread
From: Stanislav Lisovskiy @ 2020-01-24  8:44 UTC (permalink / raw)
  To: intel-gfx

During full modeset, global state(i.e dev_priv) is protected
by locking the crtcs in state, otherwise global state is not
serialized. Also if it is not a full modeset, we anyway
don't need to change DBuf slice configuration as Pipe configuration
doesn't change.

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 1c957df5c28c..888a9e94032e 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -15373,7 +15373,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 		intel_encoders_update_prepare(state);
 
 	/* Enable all new slices, we might need */
-	icl_dbuf_slice_pre_update(state);
+	if (state->modeset)
+		icl_dbuf_slice_pre_update(state);
 
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	dev_priv->display.commit_modeset_enables(state);
@@ -15432,7 +15433,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 	}
 
 	/* Disable all slices, we don't need */
-	icl_dbuf_slice_post_update(state);
+	if (state->modeset)
+		icl_dbuf_slice_post_update(state);
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 		intel_post_plane_update(state, crtc);
-- 
2.24.1.485.gad05a3d8e5

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 25+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for Enable second DBuf slice for ICL and TGL (rev21)
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (6 preceding siblings ...)
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset Stanislav Lisovskiy
@ 2020-01-24  9:52 ` Patchwork
  2020-01-26  9:19 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 25+ messages in thread
From: Patchwork @ 2020-01-24  9:52 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: Enable second DBuf slice for ICL and TGL (rev21)
URL   : https://patchwork.freedesktop.org/series/70059/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7806 -> Patchwork_16248
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html

Known issues
------------

  Here are the changes found in Patchwork_16248 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_parallel@fds:
    - fi-byt-n2820:       [PASS][1] -> [INCOMPLETE][2] ([i915#45])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-byt-n2820/igt@gem_exec_parallel@fds.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-byt-n2820/igt@gem_exec_parallel@fds.html

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-tgl-y:           [PASS][3] -> [FAIL][4] ([CI#94])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-tgl-y/igt@gem_exec_suspend@basic-s4-devices.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-tgl-y/igt@gem_exec_suspend@basic-s4-devices.html

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-cfl-8700k:       [PASS][5] -> [INCOMPLETE][6] ([i915#505])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-cfl-8700k/igt@i915_module_load@reload-with-fault-injection.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-cfl-8700k/igt@i915_module_load@reload-with-fault-injection.html

  * igt@kms_addfb_basic@bad-pitch-128:
    - fi-tgl-y:           [PASS][7] -> [DMESG-WARN][8] ([CI#94] / [i915#402]) +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-tgl-y/igt@kms_addfb_basic@bad-pitch-128.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-tgl-y/igt@kms_addfb_basic@bad-pitch-128.html

  
#### Possible fixes ####

  * igt@gem_exec_parallel@contexts:
    - fi-byt-n2820:       [FAIL][9] ([i915#694]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-byt-n2820/igt@gem_exec_parallel@contexts.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-byt-n2820/igt@gem_exec_parallel@contexts.html

  * igt@i915_selftest@live_blt:
    - fi-bsw-nick:        [DMESG-FAIL][11] ([i915#723]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-bsw-nick/igt@i915_selftest@live_blt.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-bsw-nick/igt@i915_selftest@live_blt.html
    - fi-hsw-4770r:       [DMESG-FAIL][13] ([i915#563]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-hsw-4770r/igt@i915_selftest@live_blt.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-hsw-4770r/igt@i915_selftest@live_blt.html
    - fi-ivb-3770:        [DMESG-FAIL][15] ([i915#725]) -> [PASS][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-ivb-3770/igt@i915_selftest@live_blt.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-ivb-3770/igt@i915_selftest@live_blt.html
    - fi-hsw-4770:        [DMESG-FAIL][17] ([i915#770]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-hsw-4770/igt@i915_selftest@live_blt.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-hsw-4770/igt@i915_selftest@live_blt.html

  * igt@prime_self_import@basic-with_one_bo_two_files:
    - fi-tgl-y:           [DMESG-WARN][19] ([CI#94] / [i915#402]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-tgl-y/igt@prime_self_import@basic-with_one_bo_two_files.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-tgl-y/igt@prime_self_import@basic-with_one_bo_two_files.html

  
#### Warnings ####

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [FAIL][21] ([fdo#111407]) -> [FAIL][22] ([fdo#111096] / [i915#323])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
  [CI#94]: https://gitlab.freedesktop.org/gfx-ci/i915-infra/issues/94
  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [i915#323]: https://gitlab.freedesktop.org/drm/intel/issues/323
  [i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
  [i915#45]: https://gitlab.freedesktop.org/drm/intel/issues/45
  [i915#505]: https://gitlab.freedesktop.org/drm/intel/issues/505
  [i915#563]: https://gitlab.freedesktop.org/drm/intel/issues/563
  [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
  [i915#723]: https://gitlab.freedesktop.org/drm/intel/issues/723
  [i915#725]: https://gitlab.freedesktop.org/drm/intel/issues/725
  [i915#770]: https://gitlab.freedesktop.org/drm/intel/issues/770


Participating hosts (52 -> 40)
------------------------------

  Additional (1): fi-snb-2520m 
  Missing    (13): fi-ilk-m540 fi-bdw-samus fi-bdw-5557u fi-kbl-7560u fi-hsw-4200u fi-byt-j1900 fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-gdg-551 fi-blb-e6850 fi-byt-clapper fi-skl-6600u 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7806 -> Patchwork_16248

  CI-20190529: 20190529
  CI_DRM_7806: 0b551226df5e5b84044705d5fd76571da70f3163 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5382: 8dbe5ce61baa2d563d4dd7c56a018bb1e1077467 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16248: 095e63821d5eb9749b1ff6d9a7a49e5c2a45d4bb @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

095e63821d5e drm/i915: Update dbuf slices only with full modeset
bd18f5ca4979 drm/i915: Protect intel_dbuf_slices_update with mutex
8136353a9f78 drm/i915: Correctly map DBUF slices to pipes
b5e4833a86d9 drm/i915: Manipulate DBuf slices properly
05c38bd73760 drm/i915: Introduce parameterized DBUF_CTL
cb268e2956dc drm/i915: Move dbuf slice update to proper place
ac4dbc686a5e drm/i915: Remove skl_ddl_allocation struct

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for Enable second DBuf slice for ICL and TGL (rev21)
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (7 preceding siblings ...)
  2020-01-24  9:52 ` [Intel-gfx] ✓ Fi.CI.BAT: success for Enable second DBuf slice for ICL and TGL (rev21) Patchwork
@ 2020-01-26  9:19 ` Patchwork
  2020-01-27  7:48   ` Lisovskiy, Stanislav
  2020-01-27 13:01 ` [Intel-gfx] ✓ Fi.CI.IGT: success " Patchwork
  2020-01-27 13:07 ` Patchwork
  10 siblings, 1 reply; 25+ messages in thread
From: Patchwork @ 2020-01-26  9:19 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: Enable second DBuf slice for ICL and TGL (rev21)
URL   : https://patchwork.freedesktop.org/series/70059/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7806_full -> Patchwork_16248_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_16248_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_16248_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_16248_full:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive:
    - shard-skl:          [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html

  
Known issues
------------

  Here are the changes found in Patchwork_16248_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@close-race:
    - shard-hsw:          [PASS][3] -> [TIMEOUT][4] ([fdo#112271])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw8/igt@gem_busy@close-race.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_busy@close-race.html

  * igt@gem_ctx_isolation@vcs1-dirty-create:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([fdo#109276] / [fdo#112080]) +1 similar issue
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-create.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb7/igt@gem_ctx_isolation@vcs1-dirty-create.html

  * igt@gem_exec_schedule@pi-common-bsd:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([i915#677])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@gem_exec_schedule@pi-common-bsd.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@pi-common-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#112146]) +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html

  * igt@gem_exec_schedule@promotion-bsd1:
    - shard-iclb:         [PASS][11] -> [SKIP][12] ([fdo#109276]) +6 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@promotion-bsd1.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@promotion-bsd1.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
    - shard-hsw:          [PASS][13] -> [INCOMPLETE][14] ([i915#530] / [i915#61])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
    - shard-iclb:         [PASS][15] -> [INCOMPLETE][16] ([i915#140])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-snb:          [PASS][17] -> [FAIL][18] ([i915#520])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-snb5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-snb2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-iclb:         [PASS][19] -> [INCOMPLETE][20] ([fdo#109100] / [i915#140])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb3/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-skl:          [PASS][21] -> [INCOMPLETE][22] ([i915#530])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@i915_pm_rps@reset:
    - shard-tglb:         [PASS][23] -> [FAIL][24] ([i915#413])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_pm_rps@reset.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_pm_rps@reset.html

  * igt@i915_selftest@mock_requests:
    - shard-skl:          [PASS][25] -> [INCOMPLETE][26] ([i915#198])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl10/igt@i915_selftest@mock_requests.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl4/igt@i915_selftest@mock_requests.html
    - shard-apl:          [PASS][27] -> [INCOMPLETE][28] ([fdo#103927]) +1 similar issue
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl7/igt@i915_selftest@mock_requests.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@i915_selftest@mock_requests.html

  * igt@i915_suspend@debugfs-reader:
    - shard-apl:          [PASS][29] -> [DMESG-WARN][30] ([i915#180])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl2/igt@i915_suspend@debugfs-reader.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl4/igt@i915_suspend@debugfs-reader.html

  * igt@kms_color@pipe-b-ctm-0-25:
    - shard-skl:          [PASS][31] -> [DMESG-WARN][32] ([i915#109]) +2 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@kms_color@pipe-b-ctm-0-25.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl7/igt@kms_color@pipe-b-ctm-0-25.html

  * igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent:
    - shard-skl:          [PASS][33] -> [FAIL][34] ([i915#54]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html

  * igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled:
    - shard-skl:          [PASS][35] -> [FAIL][36] ([i915#52] / [i915#54])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl4/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl8/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#79])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
    - shard-glk:          [PASS][39] -> [FAIL][40] ([i915#79])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc:
    - shard-tglb:         [PASS][41] -> [FAIL][42] ([i915#49]) +3 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html

  * igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
    - shard-skl:          [PASS][43] -> [FAIL][44] ([fdo#108145]) +1 similar issue
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][45] -> [SKIP][46] ([fdo#109441]) +2 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb1/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][47] -> [FAIL][48] ([i915#31])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl4/igt@kms_setmode@basic.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl6/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([i915#180]) +6 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@perf_pmu@busy-vcs1:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#112080]) +4 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@perf_pmu@busy-vcs1.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb6/igt@perf_pmu@busy-vcs1.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-kbl:          [DMESG-WARN][53] ([i915#180]) -> [PASS][54] +7 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl2/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-none:
    - shard-iclb:         [SKIP][55] ([fdo#109276] / [fdo#112080]) -> [PASS][56] +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_ctx_isolation@vcs1-none.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_ctx_isolation@vcs1-none.html

  * igt@gem_ctx_persistence@vecs0-mixed-process:
    - shard-glk:          [FAIL][57] ([i915#679]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][59] ([fdo#110841]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [SKIP][61] ([fdo#112080]) -> [PASS][62] +6 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@in-order-bsd2:
    - shard-iclb:         [SKIP][63] ([fdo#109276]) -> [PASS][64] +15 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_exec_schedule@in-order-bsd2.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html

  * igt@gem_exec_schedule@pi-shared-iova-bsd:
    - shard-iclb:         [SKIP][65] ([i915#677]) -> [PASS][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb4/igt@gem_exec_schedule@pi-shared-iova-bsd.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_exec_schedule@pi-shared-iova-bsd.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [SKIP][67] ([fdo#112146]) -> [PASS][68] +6 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-hsw:          [INCOMPLETE][69] ([i915#530] / [i915#61]) -> [PASS][70] +1 similar issue
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-apl:          [INCOMPLETE][71] ([fdo#103927]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@i915_pm_rps@waitboost:
    - shard-iclb:         [FAIL][73] ([i915#413]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb3/igt@i915_pm_rps@waitboost.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@i915_pm_rps@waitboost.html

  * igt@i915_selftest@mock_requests:
    - shard-glk:          [INCOMPLETE][75] ([i915#58] / [k.org#198133]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk7/igt@i915_selftest@mock_requests.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@i915_selftest@mock_requests.html
    - shard-tglb:         [INCOMPLETE][77] ([i915#472]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_selftest@mock_requests.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_selftest@mock_requests.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [FAIL][79] ([i915#72]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_cursor_legacy@flip-vs-cursor-toggle:
    - shard-skl:          [FAIL][81] ([IGT#5] / [i915#697]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-apl:          [DMESG-WARN][83] ([i915#180]) -> [PASS][84] +1 similar issue
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@kms_flip@flip-vs-suspend.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl2/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_flip@modeset-vs-vblank-race-interruptible:
    - shard-glk:          [FAIL][85] ([i915#407]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk6/igt@kms_flip@modeset-vs-vblank-race-interruptible.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk5/igt@kms_flip@modeset-vs-vblank-race-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack:
    - shard-tglb:         [FAIL][87] ([i915#49]) -> [PASS][88] +5 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb1/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [FAIL][89] ([fdo#108145] / [i915#265]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][91] ([fdo#109441]) -> [PASS][92] +1 similar issue
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@prime_mmap_coherency@ioctl-errors:
    - shard-hsw:          [FAIL][93] ([i915#831]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@prime_mmap_coherency@ioctl-errors.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@prime_mmap_coherency@ioctl-errors.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [SKIP][95] ([fdo#109276] / [fdo#112080]) -> [FAIL][96] ([IGT#28])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@gem_tiled_blits@interruptible:
    - shard-hsw:          [FAIL][97] ([i915#818]) -> [FAIL][98] ([i915#694])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw1/igt@gem_tiled_blits@interruptible.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@gem_tiled_blits@interruptible.html

  * igt@kms_atomic_transition@3x-modeset-transitions:
    - shard-hsw:          [SKIP][99] ([fdo#109271] / [i915#439]) -> [SKIP][100] ([fdo#109271])
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw5/igt@kms_atomic_transition@3x-modeset-transitions.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw8/igt@kms_atomic_transition@3x-modeset-transitions.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [SKIP][101] ([fdo#109349]) -> [DMESG-WARN][102] ([fdo#107724])
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  
  [IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
  [IGT#5]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/5
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
  [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
  [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109
  [i915#140]: https://gitlab.freedesktop.org/drm/intel/issues/140
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
  [i915#407]: https://gitlab.freedesktop.org/drm/intel/issues/407
  [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
  [i915#439]: https://gitlab.freedesktop.org/drm/intel/issues/439
  [i915#472]: https://gitlab.freedesktop.org/drm/intel/issues/472
  [i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#520]: https://gitlab.freedesktop.org/drm/intel/issues/520
  [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
  [i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
  [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
  [i915#679]: https://gitlab.freedesktop.org/drm/intel/issues/679
  [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
  [i915#697]: https://gitlab.freedesktop.org/drm/intel/issues/697
  [i915#72]: https://gitlab.freedesktop.org/drm/intel/issues/72
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#818]: https://gitlab.freedesktop.org/drm/intel/issues/818
  [i915#831]: https://gitlab.freedesktop.org/drm/intel/issues/831
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7806 -> Patchwork_16248

  CI-20190529: 20190529
  CI_DRM_7806: 0b551226df5e5b84044705d5fd76571da70f3163 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5382: 8dbe5ce61baa2d563d4dd7c56a018bb1e1077467 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16248: 095e63821d5eb9749b1ff6d9a7a49e5c2a45d4bb @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx]  ✗ Fi.CI.IGT: failure for Enable second DBuf slice for ICL and TGL (rev21)
  2020-01-26  9:19 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
@ 2020-01-27  7:48   ` Lisovskiy, Stanislav
  2020-01-27 12:29     ` Peres, Martin
  0 siblings, 1 reply; 25+ messages in thread
From: Lisovskiy, Stanislav @ 2020-01-27  7:48 UTC (permalink / raw)
  To: intel-gfx, Peres, Martin, Hiler, Arkadiusz, Vudum, Lakshminarayana


[-- Attachment #1.1: Type: text/plain, Size: 24016 bytes --]

Good morning :)


Yet another gem related issue not caused by this patch..


Best Regards,

Lisovskiy Stanislav

Organization: Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160 Espoo
________________________________
From: Patchwork <patchwork@emeril.freedesktop.org>
Sent: Sunday, January 26, 2020 11:19:53 AM
To: Lisovskiy, Stanislav
Cc: intel-gfx@lists.freedesktop.org
Subject: ✗ Fi.CI.IGT: failure for Enable second DBuf slice for ICL and TGL (rev21)

== Series Details ==

Series: Enable second DBuf slice for ICL and TGL (rev21)
URL   : https://patchwork.freedesktop.org/series/70059/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7806_full -> Patchwork_16248_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_16248_full absolutely need to be
  verified manually.

  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_16248_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.



Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_16248_full:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive:
    - shard-skl:          [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html


Known issues
------------

  Here are the changes found in Patchwork_16248_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@close-race:
    - shard-hsw:          [PASS][3] -> [TIMEOUT][4] ([fdo#112271])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw8/igt@gem_busy@close-race.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_busy@close-race.html

  * igt@gem_ctx_isolation@vcs1-dirty-create:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([fdo#109276] / [fdo#112080]) +1 similar issue
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-create.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb7/igt@gem_ctx_isolation@vcs1-dirty-create.html

  * igt@gem_exec_schedule@pi-common-bsd:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([i915#677])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@gem_exec_schedule@pi-common-bsd.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@pi-common-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#112146]) +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html

  * igt@gem_exec_schedule@promotion-bsd1:
    - shard-iclb:         [PASS][11] -> [SKIP][12] ([fdo#109276]) +6 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@promotion-bsd1.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@promotion-bsd1.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
    - shard-hsw:          [PASS][13] -> [INCOMPLETE][14] ([i915#530] / [i915#61])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
    - shard-iclb:         [PASS][15] -> [INCOMPLETE][16] ([i915#140])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-snb:          [PASS][17] -> [FAIL][18] ([i915#520])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-snb5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-snb2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-iclb:         [PASS][19] -> [INCOMPLETE][20] ([fdo#109100] / [i915#140])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb3/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-skl:          [PASS][21] -> [INCOMPLETE][22] ([i915#530])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@i915_pm_rps@reset:
    - shard-tglb:         [PASS][23] -> [FAIL][24] ([i915#413])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_pm_rps@reset.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_pm_rps@reset.html

  * igt@i915_selftest@mock_requests:
    - shard-skl:          [PASS][25] -> [INCOMPLETE][26] ([i915#198])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl10/igt@i915_selftest@mock_requests.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl4/igt@i915_selftest@mock_requests.html
    - shard-apl:          [PASS][27] -> [INCOMPLETE][28] ([fdo#103927]) +1 similar issue
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl7/igt@i915_selftest@mock_requests.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@i915_selftest@mock_requests.html

  * igt@i915_suspend@debugfs-reader:
    - shard-apl:          [PASS][29] -> [DMESG-WARN][30] ([i915#180])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl2/igt@i915_suspend@debugfs-reader.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl4/igt@i915_suspend@debugfs-reader.html

  * igt@kms_color@pipe-b-ctm-0-25:
    - shard-skl:          [PASS][31] -> [DMESG-WARN][32] ([i915#109]) +2 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@kms_color@pipe-b-ctm-0-25.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl7/igt@kms_color@pipe-b-ctm-0-25.html

  * igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent:
    - shard-skl:          [PASS][33] -> [FAIL][34] ([i915#54]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html

  * igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled:
    - shard-skl:          [PASS][35] -> [FAIL][36] ([i915#52] / [i915#54])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl4/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl8/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#79])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
    - shard-glk:          [PASS][39] -> [FAIL][40] ([i915#79])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc:
    - shard-tglb:         [PASS][41] -> [FAIL][42] ([i915#49]) +3 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html

  * igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
    - shard-skl:          [PASS][43] -> [FAIL][44] ([fdo#108145]) +1 similar issue
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][45] -> [SKIP][46] ([fdo#109441]) +2 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb1/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][47] -> [FAIL][48] ([i915#31])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl4/igt@kms_setmode@basic.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl6/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([i915#180]) +6 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@perf_pmu@busy-vcs1:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#112080]) +4 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@perf_pmu@busy-vcs1.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb6/igt@perf_pmu@busy-vcs1.html


#### Possible fixes ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-kbl:          [DMESG-WARN][53] ([i915#180]) -> [PASS][54] +7 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl2/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-none:
    - shard-iclb:         [SKIP][55] ([fdo#109276] / [fdo#112080]) -> [PASS][56] +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_ctx_isolation@vcs1-none.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_ctx_isolation@vcs1-none.html

  * igt@gem_ctx_persistence@vecs0-mixed-process:
    - shard-glk:          [FAIL][57] ([i915#679]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][59] ([fdo#110841]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [SKIP][61] ([fdo#112080]) -> [PASS][62] +6 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@in-order-bsd2:
    - shard-iclb:         [SKIP][63] ([fdo#109276]) -> [PASS][64] +15 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_exec_schedule@in-order-bsd2.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html

  * igt@gem_exec_schedule@pi-shared-iova-bsd:
    - shard-iclb:         [SKIP][65] ([i915#677]) -> [PASS][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb4/igt@gem_exec_schedule@pi-shared-iova-bsd.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_exec_schedule@pi-shared-iova-bsd.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [SKIP][67] ([fdo#112146]) -> [PASS][68] +6 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-hsw:          [INCOMPLETE][69] ([i915#530] / [i915#61]) -> [PASS][70] +1 similar issue
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-apl:          [INCOMPLETE][71] ([fdo#103927]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@i915_pm_rps@waitboost:
    - shard-iclb:         [FAIL][73] ([i915#413]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb3/igt@i915_pm_rps@waitboost.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@i915_pm_rps@waitboost.html

  * igt@i915_selftest@mock_requests:
    - shard-glk:          [INCOMPLETE][75] ([i915#58] / [k.org#198133]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk7/igt@i915_selftest@mock_requests.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@i915_selftest@mock_requests.html
    - shard-tglb:         [INCOMPLETE][77] ([i915#472]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_selftest@mock_requests.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_selftest@mock_requests.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [FAIL][79] ([i915#72]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_cursor_legacy@flip-vs-cursor-toggle:
    - shard-skl:          [FAIL][81] ([IGT#5] / [i915#697]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-apl:          [DMESG-WARN][83] ([i915#180]) -> [PASS][84] +1 similar issue
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@kms_flip@flip-vs-suspend.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl2/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_flip@modeset-vs-vblank-race-interruptible:
    - shard-glk:          [FAIL][85] ([i915#407]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk6/igt@kms_flip@modeset-vs-vblank-race-interruptible.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk5/igt@kms_flip@modeset-vs-vblank-race-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack:
    - shard-tglb:         [FAIL][87] ([i915#49]) -> [PASS][88] +5 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb1/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [FAIL][89] ([fdo#108145] / [i915#265]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][91] ([fdo#109441]) -> [PASS][92] +1 similar issue
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@prime_mmap_coherency@ioctl-errors:
    - shard-hsw:          [FAIL][93] ([i915#831]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@prime_mmap_coherency@ioctl-errors.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@prime_mmap_coherency@ioctl-errors.html


#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [SKIP][95] ([fdo#109276] / [fdo#112080]) -> [FAIL][96] ([IGT#28])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@gem_tiled_blits@interruptible:
    - shard-hsw:          [FAIL][97] ([i915#818]) -> [FAIL][98] ([i915#694])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw1/igt@gem_tiled_blits@interruptible.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@gem_tiled_blits@interruptible.html

  * igt@kms_atomic_transition@3x-modeset-transitions:
    - shard-hsw:          [SKIP][99] ([fdo#109271] / [i915#439]) -> [SKIP][100] ([fdo#109271])
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw5/igt@kms_atomic_transition@3x-modeset-transitions.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw8/igt@kms_atomic_transition@3x-modeset-transitions.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [SKIP][101] ([fdo#109349]) -> [DMESG-WARN][102] ([fdo#107724])
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html


  [IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
  [IGT#5]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/5
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
  [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
  [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109
  [i915#140]: https://gitlab.freedesktop.org/drm/intel/issues/140
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
  [i915#407]: https://gitlab.freedesktop.org/drm/intel/issues/407
  [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
  [i915#439]: https://gitlab.freedesktop.org/drm/intel/issues/439
  [i915#472]: https://gitlab.freedesktop.org/drm/intel/issues/472
  [i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#520]: https://gitlab.freedesktop.org/drm/intel/issues/520
  [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
  [i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
  [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
  [i915#679]: https://gitlab.freedesktop.org/drm/intel/issues/679
  [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
  [i915#697]: https://gitlab.freedesktop.org/drm/intel/issues/697
  [i915#72]: https://gitlab.freedesktop.org/drm/intel/issues/72
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#818]: https://gitlab.freedesktop.org/drm/intel/issues/818
  [i915#831]: https://gitlab.freedesktop.org/drm/intel/issues/831
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7806 -> Patchwork_16248

  CI-20190529: 20190529
  CI_DRM_7806: 0b551226df5e5b84044705d5fd76571da70f3163 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5382: 8dbe5ce61baa2d563d4dd7c56a018bb1e1077467 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16248: 095e63821d5eb9749b1ff6d9a7a49e5c2a45d4bb @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html

[-- Attachment #1.2: Type: text/html, Size: 47420 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx]  ✗ Fi.CI.IGT: failure for Enable second DBuf slice for ICL and TGL (rev21)
  2020-01-27  7:48   ` Lisovskiy, Stanislav
@ 2020-01-27 12:29     ` Peres, Martin
  0 siblings, 0 replies; 25+ messages in thread
From: Peres, Martin @ 2020-01-27 12:29 UTC (permalink / raw)
  To: Lisovskiy, Stanislav, intel-gfx, Hiler, Arkadiusz, Vudum,
	Lakshminarayana

On 27/01/2020 09:48, Lisovskiy, Stanislav wrote:
> Good morning :)
> 
> 
> Yet another gem related issue not caused by this patch..

Thanks! Lakshmi reported the bug and I made the filing a little more
generic and attached it to
https://gitlab.freedesktop.org/drm/intel/issues/530.

Thanks for sending us all these false positives! I queued a re-reporting
of the results.

Martin

> 
> 
> Best Regards,
> 
> Lisovskiy Stanislav
> 
> Organization: Intel Finland Oy - BIC 0357606-4 - Westendinkatu 7, 02160
> Espoo
> ------------------------------------------------------------------------
> *From:* Patchwork <patchwork@emeril.freedesktop.org>
> *Sent:* Sunday, January 26, 2020 11:19:53 AM
> *To:* Lisovskiy, Stanislav
> *Cc:* intel-gfx@lists.freedesktop.org
> *Subject:* ✗ Fi.CI.IGT: failure for Enable second DBuf slice for ICL and
> TGL (rev21)
>  
> == Series Details ==
> 
> Series: Enable second DBuf slice for ICL and TGL (rev21)
> URL   : https://patchwork.freedesktop.org/series/70059/
> State : failure
> 
> == Summary ==
> 
> CI Bug Log - changes from CI_DRM_7806_full -> Patchwork_16248_full
> ====================================================
> 
> Summary
> -------
> 
>   **FAILURE**
> 
>   Serious unknown changes coming with Patchwork_16248_full absolutely
> need to be
>   verified manually.
>  
>   If you think the reported changes have nothing to do with the changes
>   introduced in Patchwork_16248_full, please notify your bug team to
> allow them
>   to document this new failure mode, which will reduce false positives
> in CI.
> 
>  
> 
> Possible new issues
> -------------------
> 
>   Here are the unknown changes that may have been introduced in
> Patchwork_16248_full:
> 
> ### IGT changes ###
> 
> #### Possible regressions ####
> 
>   * igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive:
>     - shard-skl:          [PASS][1] -> [INCOMPLETE][2]
>    [1]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
>    [2]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
> 
>  
> Known issues
> ------------
> 
>   Here are the changes found in Patchwork_16248_full that come from
> known issues:
> 
> ### IGT changes ###
> 
> #### Issues hit ####
> 
>   * igt@gem_busy@close-race:
>     - shard-hsw:          [PASS][3] -> [TIMEOUT][4] ([fdo#112271])
>    [3]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw8/igt@gem_busy@close-race.html
>    [4]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_busy@close-race.html
> 
>   * igt@gem_ctx_isolation@vcs1-dirty-create:
>     - shard-iclb:         [PASS][5] -> [SKIP][6] ([fdo#109276] /
> [fdo#112080]) +1 similar issue
>    [5]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-create.html
>    [6]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb7/igt@gem_ctx_isolation@vcs1-dirty-create.html
> 
>   * igt@gem_exec_schedule@pi-common-bsd:
>     - shard-iclb:         [PASS][7] -> [SKIP][8] ([i915#677])
>    [7]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@gem_exec_schedule@pi-common-bsd.html
>    [8]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@pi-common-bsd.html
> 
>   * igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd:
>     - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#112146]) +1
> similar issue
>    [9]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
>    [10]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
> 
>   * igt@gem_exec_schedule@promotion-bsd1:
>     - shard-iclb:         [PASS][11] -> [SKIP][12] ([fdo#109276]) +6
> similar issues
>    [11]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@promotion-bsd1.html
>    [12]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@promotion-bsd1.html
> 
>   * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
>     - shard-hsw:          [PASS][13] -> [INCOMPLETE][14] ([i915#530] /
> [i915#61])
>    [13]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
>    [14]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
>     - shard-iclb:         [PASS][15] -> [INCOMPLETE][16] ([i915#140])
>    [15]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
>    [16]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
> 
>   * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
>     - shard-snb:          [PASS][17] -> [FAIL][18] ([i915#520])
>    [17]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-snb5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
>    [18]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-snb2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
> 
>   * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
>     - shard-iclb:         [PASS][19] -> [INCOMPLETE][20] ([fdo#109100] /
> [i915#140])
>    [19]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
>    [20]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb3/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
> 
>   * igt@gem_persistent_relocs@forked-interruptible-thrashing:
>     - shard-skl:          [PASS][21] -> [INCOMPLETE][22] ([i915#530])
>    [21]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
>    [22]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
> 
>   * igt@i915_pm_rps@reset:
>     - shard-tglb:         [PASS][23] -> [FAIL][24] ([i915#413])
>    [23]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_pm_rps@reset.html
>    [24]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_pm_rps@reset.html
> 
>   * igt@i915_selftest@mock_requests:
>     - shard-skl:          [PASS][25] -> [INCOMPLETE][26] ([i915#198])
>    [25]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl10/igt@i915_selftest@mock_requests.html
>    [26]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl4/igt@i915_selftest@mock_requests.html
>     - shard-apl:          [PASS][27] -> [INCOMPLETE][28] ([fdo#103927])
> +1 similar issue
>    [27]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl7/igt@i915_selftest@mock_requests.html
>    [28]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@i915_selftest@mock_requests.html
> 
>   * igt@i915_suspend@debugfs-reader:
>     - shard-apl:          [PASS][29] -> [DMESG-WARN][30] ([i915#180])
>    [29]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl2/igt@i915_suspend@debugfs-reader.html
>    [30]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl4/igt@i915_suspend@debugfs-reader.html
> 
>   * igt@kms_color@pipe-b-ctm-0-25:
>     - shard-skl:          [PASS][31] -> [DMESG-WARN][32] ([i915#109]) +2
> similar issues
>    [31]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@kms_color@pipe-b-ctm-0-25.html
>    [32]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl7/igt@kms_color@pipe-b-ctm-0-25.html
> 
>   * igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent:
>     - shard-skl:          [PASS][33] -> [FAIL][34] ([i915#54]) +1
> similar issue
>    [33]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html
>    [34]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html
> 
>   * igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled:
>     - shard-skl:          [PASS][35] -> [FAIL][36] ([i915#52] / [i915#54])
>    [35]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl4/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html
>    [36]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl8/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html
> 
>   * igt@kms_flip@flip-vs-expired-vblank-interruptible:
>     - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#79])
>    [37]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
>    [38]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
>     - shard-glk:          [PASS][39] -> [FAIL][40] ([i915#79])
>    [39]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
>    [40]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
> 
>   * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc:
>     - shard-tglb:         [PASS][41] -> [FAIL][42] ([i915#49]) +3
> similar issues
>    [41]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html
>    [42]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html
> 
>   * igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
>     - shard-skl:          [PASS][43] -> [FAIL][44] ([fdo#108145]) +1
> similar issue
>    [43]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
>    [44]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
> 
>   * igt@kms_psr@psr2_cursor_plane_move:
>     - shard-iclb:         [PASS][45] -> [SKIP][46] ([fdo#109441]) +2
> similar issues
>    [45]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
>    [46]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb1/igt@kms_psr@psr2_cursor_plane_move.html
> 
>   * igt@kms_setmode@basic:
>     - shard-apl:          [PASS][47] -> [FAIL][48] ([i915#31])
>    [47]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl4/igt@kms_setmode@basic.html
>    [48]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl6/igt@kms_setmode@basic.html
> 
>   * igt@kms_vblank@pipe-a-ts-continuation-suspend:
>     - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([i915#180]) +6
> similar issues
>    [49]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
>    [50]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
> 
>   * igt@perf_pmu@busy-vcs1:
>     - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#112080]) +4
> similar issues
>    [51]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@perf_pmu@busy-vcs1.html
>    [52]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb6/igt@perf_pmu@busy-vcs1.html
> 
>  
> #### Possible fixes ####
> 
>   * igt@gem_ctx_isolation@rcs0-s3:
>     - shard-kbl:          [DMESG-WARN][53] ([i915#180]) -> [PASS][54] +7
> similar issues
>    [53]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html
>    [54]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl2/igt@gem_ctx_isolation@rcs0-s3.html
> 
>   * igt@gem_ctx_isolation@vcs1-none:
>     - shard-iclb:         [SKIP][55] ([fdo#109276] / [fdo#112080]) ->
> [PASS][56] +1 similar issue
>    [55]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_ctx_isolation@vcs1-none.html
>    [56]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_ctx_isolation@vcs1-none.html
> 
>   * igt@gem_ctx_persistence@vecs0-mixed-process:
>     - shard-glk:          [FAIL][57] ([i915#679]) -> [PASS][58]
>    [57]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html
>    [58]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html
> 
>   * igt@gem_ctx_shared@exec-single-timeline-bsd:
>     - shard-iclb:         [SKIP][59] ([fdo#110841]) -> [PASS][60]
>    [59]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
>    [60]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_ctx_shared@exec-single-timeline-bsd.html
> 
>   * igt@gem_exec_parallel@vcs1-fds:
>     - shard-iclb:         [SKIP][61] ([fdo#112080]) -> [PASS][62] +6
> similar issues
>    [61]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html
>    [62]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html
> 
>   * igt@gem_exec_schedule@in-order-bsd2:
>     - shard-iclb:         [SKIP][63] ([fdo#109276]) -> [PASS][64] +15
> similar issues
>    [63]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_exec_schedule@in-order-bsd2.html
>    [64]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html
> 
>   * igt@gem_exec_schedule@pi-shared-iova-bsd:
>     - shard-iclb:         [SKIP][65] ([i915#677]) -> [PASS][66]
>    [65]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb4/igt@gem_exec_schedule@pi-shared-iova-bsd.html
>    [66]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_exec_schedule@pi-shared-iova-bsd.html
> 
>   * igt@gem_exec_schedule@preemptive-hang-bsd:
>     - shard-iclb:         [SKIP][67] ([fdo#112146]) -> [PASS][68] +6
> similar issues
>    [67]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
>    [68]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@preemptive-hang-bsd.html
> 
>   * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
>     - shard-hsw:          [INCOMPLETE][69] ([i915#530] / [i915#61]) ->
> [PASS][70] +1 similar issue
>    [69]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
>    [70]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
> 
>   * igt@gem_persistent_relocs@forked-thrashing:
>     - shard-apl:          [INCOMPLETE][71] ([fdo#103927]) -> [PASS][72]
>    [71]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html
>    [72]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html
> 
>   * igt@i915_pm_rps@waitboost:
>     - shard-iclb:         [FAIL][73] ([i915#413]) -> [PASS][74]
>    [73]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb3/igt@i915_pm_rps@waitboost.html
>    [74]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@i915_pm_rps@waitboost.html
> 
>   * igt@i915_selftest@mock_requests:
>     - shard-glk:          [INCOMPLETE][75] ([i915#58] / [k.org#198133])
> -> [PASS][76]
>    [75]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk7/igt@i915_selftest@mock_requests.html
>    [76]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@i915_selftest@mock_requests.html
>     - shard-tglb:         [INCOMPLETE][77] ([i915#472]) -> [PASS][78]
>    [77]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_selftest@mock_requests.html
>    [78]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_selftest@mock_requests.html
> 
>   * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
>     - shard-glk:          [FAIL][79] ([i915#72]) -> [PASS][80]
>    [79]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
>    [80]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
> 
>   * igt@kms_cursor_legacy@flip-vs-cursor-toggle:
>     - shard-skl:          [FAIL][81] ([IGT#5] / [i915#697]) -> [PASS][82]
>    [81]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
>    [82]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
> 
>   * igt@kms_flip@flip-vs-suspend:
>     - shard-apl:          [DMESG-WARN][83] ([i915#180]) -> [PASS][84] +1
> similar issue
>    [83]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@kms_flip@flip-vs-suspend.html
>    [84]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl2/igt@kms_flip@flip-vs-suspend.html
> 
>   * igt@kms_flip@modeset-vs-vblank-race-interruptible:
>     - shard-glk:          [FAIL][85] ([i915#407]) -> [PASS][86]
>    [85]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk6/igt@kms_flip@modeset-vs-vblank-race-interruptible.html
>    [86]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk5/igt@kms_flip@modeset-vs-vblank-race-interruptible.html
> 
>   * igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack:
>     - shard-tglb:         [FAIL][87] ([i915#49]) -> [PASS][88] +5
> similar issues
>    [87]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
>    [88]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb1/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
> 
>   * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
>     - shard-skl:          [FAIL][89] ([fdo#108145] / [i915#265]) ->
> [PASS][90]
>    [89]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
>    [90]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
> 
>   * igt@kms_psr@psr2_cursor_mmap_cpu:
>     - shard-iclb:         [SKIP][91] ([fdo#109441]) -> [PASS][92] +1
> similar issue
>    [91]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@kms_psr@psr2_cursor_mmap_cpu.html
>    [92]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html
> 
>   * igt@prime_mmap_coherency@ioctl-errors:
>     - shard-hsw:          [FAIL][93] ([i915#831]) -> [PASS][94]
>    [93]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@prime_mmap_coherency@ioctl-errors.html
>    [94]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@prime_mmap_coherency@ioctl-errors.html
> 
>  
> #### Warnings ####
> 
>   * igt@gem_ctx_isolation@vcs1-nonpriv:
>     - shard-iclb:         [SKIP][95] ([fdo#109276] / [fdo#112080]) ->
> [FAIL][96] ([IGT#28])
>    [95]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv.html
>    [96]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html
> 
>   * igt@gem_tiled_blits@interruptible:
>     - shard-hsw:          [FAIL][97] ([i915#818]) -> [FAIL][98] ([i915#694])
>    [97]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw1/igt@gem_tiled_blits@interruptible.html
>    [98]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@gem_tiled_blits@interruptible.html
> 
>   * igt@kms_atomic_transition@3x-modeset-transitions:
>     - shard-hsw:          [SKIP][99] ([fdo#109271] / [i915#439]) ->
> [SKIP][100] ([fdo#109271])
>    [99]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw5/igt@kms_atomic_transition@3x-modeset-transitions.html
>    [100]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw8/igt@kms_atomic_transition@3x-modeset-transitions.html
> 
>   * igt@kms_dp_dsc@basic-dsc-enable-edp:
>     - shard-iclb:         [SKIP][101] ([fdo#109349]) ->
> [DMESG-WARN][102] ([fdo#107724])
>    [101]:
> https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@kms_dp_dsc@basic-dsc-enable-edp.html
>    [102]:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html
> 
>  
>   [IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
>   [IGT#5]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/5
>   [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
>   [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
>   [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
>   [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
>   [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
>   [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
>   [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
>   [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
>   [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
>   [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
>   [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
>   [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
>   [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109
>   [i915#140]: https://gitlab.freedesktop.org/drm/intel/issues/140
>   [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
>   [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
>   [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
>   [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
>   [i915#407]: https://gitlab.freedesktop.org/drm/intel/issues/407
>   [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
>   [i915#439]: https://gitlab.freedesktop.org/drm/intel/issues/439
>   [i915#472]: https://gitlab.freedesktop.org/drm/intel/issues/472
>   [i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
>   [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
>   [i915#520]: https://gitlab.freedesktop.org/drm/intel/issues/520
>   [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530
>   [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
>   [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
>   [i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
>   [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
>   [i915#679]: https://gitlab.freedesktop.org/drm/intel/issues/679
>   [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
>   [i915#697]: https://gitlab.freedesktop.org/drm/intel/issues/697
>   [i915#72]: https://gitlab.freedesktop.org/drm/intel/issues/72
>   [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
>   [i915#818]: https://gitlab.freedesktop.org/drm/intel/issues/818
>   [i915#831]: https://gitlab.freedesktop.org/drm/intel/issues/831
>   [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133
> 
> 
> Participating hosts (10 -> 10)
> ------------------------------
> 
>   No changes in participating hosts
> 
> 
> Build changes
> -------------
> 
>   * CI: CI-20190529 -> None
>   * Linux: CI_DRM_7806 -> Patchwork_16248
> 
>   CI-20190529: 20190529
>   CI_DRM_7806: 0b551226df5e5b84044705d5fd76571da70f3163 @
> git://anongit.freedesktop.org/gfx-ci/linux
>   IGT_5382: 8dbe5ce61baa2d563d4dd7c56a018bb1e1077467 @
> git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
>   Patchwork_16248: 095e63821d5eb9749b1ff6d9a7a49e5c2a45d4bb @
> git://anongit.freedesktop.org/gfx-ci/linux
>   piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @
> git://anongit.freedesktop.org/piglit
> 
> == Logs ==
> 
> For more details see:
> https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for Enable second DBuf slice for ICL and TGL (rev21)
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (8 preceding siblings ...)
  2020-01-26  9:19 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
@ 2020-01-27 13:01 ` Patchwork
  2020-01-27 13:07 ` Patchwork
  10 siblings, 0 replies; 25+ messages in thread
From: Patchwork @ 2020-01-27 13:01 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: Enable second DBuf slice for ICL and TGL (rev21)
URL   : https://patchwork.freedesktop.org/series/70059/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7806_full -> Patchwork_16248_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_16248_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@close-race:
    - shard-hsw:          [PASS][1] -> [TIMEOUT][2] ([fdo#112271])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw8/igt@gem_busy@close-race.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_busy@close-race.html

  * igt@gem_ctx_isolation@vcs1-dirty-create:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#109276] / [fdo#112080]) +1 similar issue
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-create.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb7/igt@gem_ctx_isolation@vcs1-dirty-create.html

  * igt@gem_exec_schedule@pi-common-bsd:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([i915#677])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@gem_exec_schedule@pi-common-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@pi-common-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#112146]) +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html

  * igt@gem_exec_schedule@promotion-bsd1:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#109276]) +6 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@promotion-bsd1.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@promotion-bsd1.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive:
    - shard-skl:          [PASS][11] -> [INCOMPLETE][12] ([i915#1028])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
    - shard-hsw:          [PASS][13] -> [INCOMPLETE][14] ([i915#530] / [i915#61])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
    - shard-iclb:         [PASS][15] -> [INCOMPLETE][16] ([i915#140])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-snb:          [PASS][17] -> [FAIL][18] ([i915#520])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-snb5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-snb2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-iclb:         [PASS][19] -> [INCOMPLETE][20] ([fdo#109100] / [i915#140])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb3/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-skl:          [PASS][21] -> [INCOMPLETE][22] ([i915#530])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@i915_pm_rps@reset:
    - shard-tglb:         [PASS][23] -> [FAIL][24] ([i915#413])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_pm_rps@reset.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_pm_rps@reset.html

  * igt@i915_selftest@mock_requests:
    - shard-skl:          [PASS][25] -> [INCOMPLETE][26] ([i915#198])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl10/igt@i915_selftest@mock_requests.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl4/igt@i915_selftest@mock_requests.html
    - shard-apl:          [PASS][27] -> [INCOMPLETE][28] ([fdo#103927]) +1 similar issue
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl7/igt@i915_selftest@mock_requests.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@i915_selftest@mock_requests.html

  * igt@i915_suspend@debugfs-reader:
    - shard-apl:          [PASS][29] -> [DMESG-WARN][30] ([i915#180])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl2/igt@i915_suspend@debugfs-reader.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl4/igt@i915_suspend@debugfs-reader.html

  * igt@kms_color@pipe-b-ctm-0-25:
    - shard-skl:          [PASS][31] -> [DMESG-WARN][32] ([i915#109]) +2 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@kms_color@pipe-b-ctm-0-25.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl7/igt@kms_color@pipe-b-ctm-0-25.html

  * igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent:
    - shard-skl:          [PASS][33] -> [FAIL][34] ([i915#54]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html

  * igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled:
    - shard-skl:          [PASS][35] -> [FAIL][36] ([i915#52] / [i915#54])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl4/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl8/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#79])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
    - shard-glk:          [PASS][39] -> [FAIL][40] ([i915#79])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc:
    - shard-tglb:         [PASS][41] -> [FAIL][42] ([i915#49]) +3 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html

  * igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
    - shard-skl:          [PASS][43] -> [FAIL][44] ([fdo#108145]) +1 similar issue
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][45] -> [SKIP][46] ([fdo#109441]) +2 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb1/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][47] -> [FAIL][48] ([i915#31])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl4/igt@kms_setmode@basic.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl6/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([i915#180]) +6 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@perf_pmu@busy-vcs1:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#112080]) +4 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@perf_pmu@busy-vcs1.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb6/igt@perf_pmu@busy-vcs1.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-kbl:          [DMESG-WARN][53] ([i915#180]) -> [PASS][54] +7 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl2/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-none:
    - shard-iclb:         [SKIP][55] ([fdo#109276] / [fdo#112080]) -> [PASS][56] +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_ctx_isolation@vcs1-none.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_ctx_isolation@vcs1-none.html

  * igt@gem_ctx_persistence@vecs0-mixed-process:
    - shard-glk:          [FAIL][57] ([i915#679]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][59] ([fdo#110841]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [SKIP][61] ([fdo#112080]) -> [PASS][62] +6 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@in-order-bsd2:
    - shard-iclb:         [SKIP][63] ([fdo#109276]) -> [PASS][64] +15 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_exec_schedule@in-order-bsd2.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html

  * igt@gem_exec_schedule@pi-shared-iova-bsd:
    - shard-iclb:         [SKIP][65] ([i915#677]) -> [PASS][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb4/igt@gem_exec_schedule@pi-shared-iova-bsd.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_exec_schedule@pi-shared-iova-bsd.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [SKIP][67] ([fdo#112146]) -> [PASS][68] +6 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-hsw:          [INCOMPLETE][69] ([i915#530] / [i915#61]) -> [PASS][70] +1 similar issue
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-apl:          [INCOMPLETE][71] ([fdo#103927]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@i915_pm_rps@waitboost:
    - shard-iclb:         [FAIL][73] ([i915#413]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb3/igt@i915_pm_rps@waitboost.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@i915_pm_rps@waitboost.html

  * igt@i915_selftest@mock_requests:
    - shard-glk:          [INCOMPLETE][75] ([i915#58] / [k.org#198133]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk7/igt@i915_selftest@mock_requests.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@i915_selftest@mock_requests.html
    - shard-tglb:         [INCOMPLETE][77] ([i915#472]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_selftest@mock_requests.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_selftest@mock_requests.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [FAIL][79] ([i915#72]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_cursor_legacy@flip-vs-cursor-toggle:
    - shard-skl:          [FAIL][81] ([IGT#5] / [i915#697]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-apl:          [DMESG-WARN][83] ([i915#180]) -> [PASS][84] +1 similar issue
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@kms_flip@flip-vs-suspend.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl2/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_flip@modeset-vs-vblank-race-interruptible:
    - shard-glk:          [FAIL][85] ([i915#407]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk6/igt@kms_flip@modeset-vs-vblank-race-interruptible.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk5/igt@kms_flip@modeset-vs-vblank-race-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack:
    - shard-tglb:         [FAIL][87] ([i915#49]) -> [PASS][88] +5 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb1/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [FAIL][89] ([fdo#108145] / [i915#265]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][91] ([fdo#109441]) -> [PASS][92] +1 similar issue
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@prime_mmap_coherency@ioctl-errors:
    - shard-hsw:          [FAIL][93] ([i915#831]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@prime_mmap_coherency@ioctl-errors.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@prime_mmap_coherency@ioctl-errors.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [SKIP][95] ([fdo#109276] / [fdo#112080]) -> [FAIL][96] ([IGT#28])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@gem_tiled_blits@interruptible:
    - shard-hsw:          [FAIL][97] ([i915#818]) -> [FAIL][98] ([i915#694])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw1/igt@gem_tiled_blits@interruptible.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@gem_tiled_blits@interruptible.html

  * igt@kms_atomic_transition@3x-modeset-transitions:
    - shard-hsw:          [SKIP][99] ([fdo#109271] / [i915#439]) -> [SKIP][100] ([fdo#109271])
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw5/igt@kms_atomic_transition@3x-modeset-transitions.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw8/igt@kms_atomic_transition@3x-modeset-transitions.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [SKIP][101] ([fdo#109349]) -> [DMESG-WARN][102] ([fdo#107724])
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  
  [IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
  [IGT#5]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/5
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
  [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
  [i915#1028]: https://gitlab.freedesktop.org/drm/intel/issues/1028
  [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109
  [i915#140]: https://gitlab.freedesktop.org/drm/intel/issues/140
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
  [i915#407]: https://gitlab.freedesktop.org/drm/intel/issues/407
  [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
  [i915#439]: https://gitlab.freedesktop.org/drm/intel/issues/439
  [i915#472]: https://gitlab.freedesktop.org/drm/intel/issues/472
  [i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#520]: https://gitlab.freedesktop.org/drm/intel/issues/520
  [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
  [i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
  [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
  [i915#679]: https://gitlab.freedesktop.org/drm/intel/issues/679
  [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
  [i915#697]: https://gitlab.freedesktop.org/drm/intel/issues/697
  [i915#72]: https://gitlab.freedesktop.org/drm/intel/issues/72
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#818]: https://gitlab.freedesktop.org/drm/intel/issues/818
  [i915#831]: https://gitlab.freedesktop.org/drm/intel/issues/831
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7806 -> Patchwork_16248

  CI-20190529: 20190529
  CI_DRM_7806: 0b551226df5e5b84044705d5fd76571da70f3163 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5382: 8dbe5ce61baa2d563d4dd7c56a018bb1e1077467 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16248: 095e63821d5eb9749b1ff6d9a7a49e5c2a45d4bb @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for Enable second DBuf slice for ICL and TGL (rev21)
  2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
                   ` (9 preceding siblings ...)
  2020-01-27 13:01 ` [Intel-gfx] ✓ Fi.CI.IGT: success " Patchwork
@ 2020-01-27 13:07 ` Patchwork
  10 siblings, 0 replies; 25+ messages in thread
From: Patchwork @ 2020-01-27 13:07 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

== Series Details ==

Series: Enable second DBuf slice for ICL and TGL (rev21)
URL   : https://patchwork.freedesktop.org/series/70059/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7806_full -> Patchwork_16248_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_16248_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@close-race:
    - shard-hsw:          [PASS][1] -> [TIMEOUT][2] ([fdo#112271])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw8/igt@gem_busy@close-race.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_busy@close-race.html

  * igt@gem_ctx_isolation@vcs1-dirty-create:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#109276] / [fdo#112080]) +1 similar issue
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@gem_ctx_isolation@vcs1-dirty-create.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb7/igt@gem_ctx_isolation@vcs1-dirty-create.html

  * igt@gem_exec_schedule@pi-common-bsd:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([i915#677])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@gem_exec_schedule@pi-common-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@pi-common-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#112146]) +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html

  * igt@gem_exec_schedule@promotion-bsd1:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#109276]) +6 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@promotion-bsd1.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@promotion-bsd1.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive:
    - shard-skl:          [PASS][11] -> [INCOMPLETE][12] ([i915#1028])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
    - shard-hsw:          [PASS][13] -> [INCOMPLETE][14] ([i915#530] / [i915#61])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
    - shard-iclb:         [PASS][15] -> [INCOMPLETE][16] ([i915#140])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-snb:          [PASS][17] -> [FAIL][18] ([i915#520])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-snb5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-snb2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-iclb:         [PASS][19] -> [INCOMPLETE][20] ([fdo#109100] / [i915#140])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb6/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb3/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-skl:          [PASS][21] -> [INCOMPLETE][22] ([i915#530])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@i915_pm_rps@reset:
    - shard-tglb:         [PASS][23] -> [FAIL][24] ([i915#413])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_pm_rps@reset.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_pm_rps@reset.html

  * igt@i915_selftest@mock_requests:
    - shard-skl:          [PASS][25] -> [INCOMPLETE][26] ([i915#198])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl10/igt@i915_selftest@mock_requests.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl4/igt@i915_selftest@mock_requests.html
    - shard-apl:          [PASS][27] -> [INCOMPLETE][28] ([fdo#103927]) +1 similar issue
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl7/igt@i915_selftest@mock_requests.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@i915_selftest@mock_requests.html

  * igt@i915_suspend@debugfs-reader:
    - shard-apl:          [PASS][29] -> [DMESG-WARN][30] ([i915#180])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl2/igt@i915_suspend@debugfs-reader.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl4/igt@i915_suspend@debugfs-reader.html

  * igt@kms_color@pipe-b-ctm-0-25:
    - shard-skl:          [PASS][31] -> [DMESG-WARN][32] ([i915#109]) +2 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl6/igt@kms_color@pipe-b-ctm-0-25.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl7/igt@kms_color@pipe-b-ctm-0-25.html

  * igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent:
    - shard-skl:          [PASS][33] -> [FAIL][34] ([i915#54]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_crc@pipe-b-cursor-alpha-transparent.html

  * igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled:
    - shard-skl:          [PASS][35] -> [FAIL][36] ([i915#52] / [i915#54])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl4/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl8/igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][37] -> [FAIL][38] ([i915#79])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
    - shard-glk:          [PASS][39] -> [FAIL][40] ([i915#79])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc:
    - shard-tglb:         [PASS][41] -> [FAIL][42] ([i915#49]) +3 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-mmap-wc.html

  * igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
    - shard-skl:          [PASS][43] -> [FAIL][44] ([fdo#108145]) +1 similar issue
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [PASS][45] -> [SKIP][46] ([fdo#109441]) +2 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb1/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][47] -> [FAIL][48] ([i915#31])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl4/igt@kms_setmode@basic.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl6/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([i915#180]) +6 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl2/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@perf_pmu@busy-vcs1:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#112080]) +4 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@perf_pmu@busy-vcs1.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb6/igt@perf_pmu@busy-vcs1.html

  
#### Possible fixes ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-kbl:          [DMESG-WARN][53] ([i915#180]) -> [PASS][54] +7 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-kbl2/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-none:
    - shard-iclb:         [SKIP][55] ([fdo#109276] / [fdo#112080]) -> [PASS][56] +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_ctx_isolation@vcs1-none.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_ctx_isolation@vcs1-none.html

  * igt@gem_ctx_persistence@vecs0-mixed-process:
    - shard-glk:          [FAIL][57] ([i915#679]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk3/igt@gem_ctx_persistence@vecs0-mixed-process.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][59] ([fdo#110841]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [SKIP][61] ([fdo#112080]) -> [PASS][62] +6 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@in-order-bsd2:
    - shard-iclb:         [SKIP][63] ([fdo#109276]) -> [PASS][64] +15 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@gem_exec_schedule@in-order-bsd2.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html

  * igt@gem_exec_schedule@pi-shared-iova-bsd:
    - shard-iclb:         [SKIP][65] ([i915#677]) -> [PASS][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb4/igt@gem_exec_schedule@pi-shared-iova-bsd.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb8/igt@gem_exec_schedule@pi-shared-iova-bsd.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [SKIP][67] ([fdo#112146]) -> [PASS][68] +6 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-hsw:          [INCOMPLETE][69] ([i915#530] / [i915#61]) -> [PASS][70] +1 similar issue
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-apl:          [INCOMPLETE][71] ([fdo#103927]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl8/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@i915_pm_rps@waitboost:
    - shard-iclb:         [FAIL][73] ([i915#413]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb3/igt@i915_pm_rps@waitboost.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb5/igt@i915_pm_rps@waitboost.html

  * igt@i915_selftest@mock_requests:
    - shard-glk:          [INCOMPLETE][75] ([i915#58] / [k.org#198133]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk7/igt@i915_selftest@mock_requests.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@i915_selftest@mock_requests.html
    - shard-tglb:         [INCOMPLETE][77] ([i915#472]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb6/igt@i915_selftest@mock_requests.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb4/igt@i915_selftest@mock_requests.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [FAIL][79] ([i915#72]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk3/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk2/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_cursor_legacy@flip-vs-cursor-toggle:
    - shard-skl:          [FAIL][81] ([IGT#5] / [i915#697]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-apl:          [DMESG-WARN][83] ([i915#180]) -> [PASS][84] +1 similar issue
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-apl8/igt@kms_flip@flip-vs-suspend.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-apl2/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_flip@modeset-vs-vblank-race-interruptible:
    - shard-glk:          [FAIL][85] ([i915#407]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-glk6/igt@kms_flip@modeset-vs-vblank-race-interruptible.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-glk5/igt@kms_flip@modeset-vs-vblank-race-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack:
    - shard-tglb:         [FAIL][87] ([i915#49]) -> [PASS][88] +5 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-tglb1/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [FAIL][89] ([fdo#108145] / [i915#265]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-skl5/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [SKIP][91] ([fdo#109441]) -> [PASS][92] +1 similar issue
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb5/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html

  * igt@prime_mmap_coherency@ioctl-errors:
    - shard-hsw:          [FAIL][93] ([i915#831]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw2/igt@prime_mmap_coherency@ioctl-errors.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@prime_mmap_coherency@ioctl-errors.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [SKIP][95] ([fdo#109276] / [fdo#112080]) -> [FAIL][96] ([IGT#28])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@gem_tiled_blits@interruptible:
    - shard-hsw:          [FAIL][97] ([i915#818]) -> [FAIL][98] ([i915#694])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw1/igt@gem_tiled_blits@interruptible.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw5/igt@gem_tiled_blits@interruptible.html

  * igt@kms_atomic_transition@3x-modeset-transitions:
    - shard-hsw:          [SKIP][99] ([fdo#109271] / [i915#439]) -> [SKIP][100] ([fdo#109271])
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-hsw5/igt@kms_atomic_transition@3x-modeset-transitions.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-hsw8/igt@kms_atomic_transition@3x-modeset-transitions.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [SKIP][101] ([fdo#109349]) -> [DMESG-WARN][102] ([fdo#107724])
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7806/shard-iclb7/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  
  [IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
  [IGT#5]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/5
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
  [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
  [i915#1028]: https://gitlab.freedesktop.org/drm/intel/issues/1028
  [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109
  [i915#140]: https://gitlab.freedesktop.org/drm/intel/issues/140
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
  [i915#407]: https://gitlab.freedesktop.org/drm/intel/issues/407
  [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
  [i915#439]: https://gitlab.freedesktop.org/drm/intel/issues/439
  [i915#472]: https://gitlab.freedesktop.org/drm/intel/issues/472
  [i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#520]: https://gitlab.freedesktop.org/drm/intel/issues/520
  [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
  [i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
  [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
  [i915#679]: https://gitlab.freedesktop.org/drm/intel/issues/679
  [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
  [i915#697]: https://gitlab.freedesktop.org/drm/intel/issues/697
  [i915#72]: https://gitlab.freedesktop.org/drm/intel/issues/72
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#818]: https://gitlab.freedesktop.org/drm/intel/issues/818
  [i915#831]: https://gitlab.freedesktop.org/drm/intel/issues/831
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7806 -> Patchwork_16248

  CI-20190529: 20190529
  CI_DRM_7806: 0b551226df5e5b84044705d5fd76571da70f3163 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5382: 8dbe5ce61baa2d563d4dd7c56a018bb1e1077467 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16248: 095e63821d5eb9749b1ff6d9a7a49e5c2a45d4bb @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16248/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL Stanislav Lisovskiy
@ 2020-01-28 17:35   ` Ville Syrjälä
  2020-01-29  8:41     ` Lisovskiy, Stanislav
  0 siblings, 1 reply; 25+ messages in thread
From: Ville Syrjälä @ 2020-01-28 17:35 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

On Fri, Jan 24, 2020 at 10:44:52AM +0200, Stanislav Lisovskiy wrote:
> Now start using parameterized DBUF_CTL instead
> of hardcoded, this would allow shorter access
> functions when reading or storing entire state.
> 
> Tried to implement it in a MMIO_PIPE manner, however
> DBUF_CTL1 address is higher than DBUF_CTL2, which
> implies that we have to now subtract from base
> rather than add.
> 
> v2: - Removed unneeded DBUF_CTL_DIST and DBUF_CTL_ADDR
>       macros. Started to use _PICK construct as suggested
>       by Matt Roper.
> 
> v3: - DBUF_CTL_S* to _DBUF_CTL_S*, changed X to "slice"
>       in macro(Ville Syrjälä)
>     - Introduced enum for enumerating DBUF slices(Ville Syrjälä)
> 
> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> ---
>  .../drm/i915/display/intel_display_power.c    | 30 +++++++++++--------
>  .../drm/i915/display/intel_display_power.h    |  5 ++++
>  drivers/gpu/drm/i915/i915_reg.h               |  7 +++--
>  drivers/gpu/drm/i915/intel_pm.c               |  2 +-
>  4 files changed, 28 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
> index 5e1c601f0f99..a59efb24be92 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> @@ -4418,9 +4418,11 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
>  		return;
>  
>  	if (req_slices > hw_enabled_slices)
> -		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
> +		ret = intel_dbuf_slice_set(dev_priv,
> +					   _DBUF_CTL_S(DBUF_S2), true);
>  	else
> -		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
> +		ret = intel_dbuf_slice_set(dev_priv,
> +					   _DBUF_CTL_S(DBUF_S2), false);
>  
>  	if (ret)
>  		dev_priv->enabled_dbuf_slices_num = req_slices;
> @@ -4428,14 +4430,16 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
>  
>  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
>  {
> -	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
> -	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
> -	POSTING_READ(DBUF_CTL_S2);
> +	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
> +		   I915_READ(_DBUF_CTL_S(DBUF_S1)) | DBUF_POWER_REQUEST);
> +	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
> +		   I915_READ(_DBUF_CTL_S(DBUF_S2)) | DBUF_POWER_REQUEST);
> +	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
>  
>  	udelay(10);
>  
> -	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> -	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> +	if (!(I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
> +	    !(I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
>  		DRM_ERROR("DBuf power enable timeout\n");
>  	else
>  		/*
> @@ -4447,14 +4451,16 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
>  
>  static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
>  {
> -	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
> -	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
> -	POSTING_READ(DBUF_CTL_S2);
> +	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
> +		   I915_READ(_DBUF_CTL_S(DBUF_S1)) & ~DBUF_POWER_REQUEST);
> +	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
> +		   I915_READ(_DBUF_CTL_S(DBUF_S2)) & ~DBUF_POWER_REQUEST);
> +	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
>  
>  	udelay(10);
>  
> -	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> -	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> +	if ((I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
> +	    (I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
>  		DRM_ERROR("DBuf power disable timeout!\n");
>  	else
>  		/*
> diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
> index 2608a65af7fa..601e000ffd0d 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_power.h
> +++ b/drivers/gpu/drm/i915/display/intel_display_power.h
> @@ -307,6 +307,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
>  }
>  #endif
>  
> +enum dbuf_slice {
> +	DBUF_S1,
> +	DBUF_S2,
> +};
> +
>  #define with_intel_display_power(i915, domain, wf) \
>  	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
>  	     intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index b93c4c18f05c..625be54d3eae 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -7748,9 +7748,10 @@ enum {
>  #define DISP_ARB_CTL2	_MMIO(0x45004)
>  #define  DISP_DATA_PARTITION_5_6	(1 << 6)
>  #define  DISP_IPC_ENABLE		(1 << 3)
> -#define DBUF_CTL	_MMIO(0x45008)
> -#define DBUF_CTL_S1	_MMIO(0x45008)
> -#define DBUF_CTL_S2	_MMIO(0x44FE8)
> +#define DBUF_CTL_ADDR1			0x45008
> +#define DBUF_CTL_ADDR2			0x44FE8
> +#define _DBUF_CTL_S(X)			_MMIO(_PICK_EVEN(X, DBUF_CTL_ADDR1, DBUF_CTL_ADDR2))

That's not at all what I meant. Also the 'X' is still there despite what
the changelog says.

#define _DBUF_CTL_S1	0x45008
#define _DBUF_CTL_S2	0x44FE8
#define DBUF_CTL_S(slice)	_MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))


> +#define DBUF_CTL			_DBUF_CTL_S(0)
>  #define  DBUF_POWER_REQUEST		(1 << 31)
>  #define  DBUF_POWER_STATE		(1 << 30)
>  #define GEN7_MSG_CTL	_MMIO(0x45010)
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 04f94057d6b3..b8d78e26515c 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3660,7 +3660,7 @@ u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
>  	 * only that 1 slice enabled until we have a proper way for on-demand
>  	 * toggling of the second slice.
>  	 */
> -	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
> +	if (0 && I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
>  		enabled_dbuf_slices_num++;
>  
>  	return enabled_dbuf_slices_num;
> -- 
> 2.24.1.485.gad05a3d8e5

-- 
Ville Syrjälä
Intel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes Stanislav Lisovskiy
@ 2020-01-28 23:15   ` Matt Roper
  2020-01-28 23:38     ` Matt Roper
  2020-01-29  9:03     ` Lisovskiy, Stanislav
  0 siblings, 2 replies; 25+ messages in thread
From: Matt Roper @ 2020-01-28 23:15 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

On Fri, Jan 24, 2020 at 10:44:54AM +0200, Stanislav Lisovskiy wrote:
> Added proper DBuf slice mapping to correspondent
> pipes, depending on pipe configuration as stated
> in BSpec.
> 
> v2:
>     - Remove unneeded braces
>     - Stop using macro for DBuf assignments as
>       it seems to reduce readability.
> 
> v3: Start using enabled slices mask in dev_priv
> 
> v4: Renamed "enabled_slices" used in dev_priv
>     to "enabled_dbuf_slices_mask"(Matt Roper)
> 
> v5: - Removed redundant parameters from
>       intel_get_ddb_size function.(Matt Roper)
>     - Made i915_possible_dbuf_slices static(Matt Roper)
>     - Renamed total_width into total_width_in_range
>       so that it now reflects that this is not
>       a total pipe width but the one in current
>       dbuf slice allowed range for pipe.(Matt Roper)
>     - Removed 4th pipe for ICL in DBuf assignment
>       table(Matt Roper)
>     - Fixed wrong DBuf slice in DBuf table for TGL
>       (Matt Roper)
>     - Added comment regarding why we currently not
>       using pipe ratio for DBuf assignment for ICL
> 
> v6: - Changed u32 to unsigned int in
>       icl_get_first_dbuf_slice_offset function signature
>       (Ville Syrjälä)
>     - Changed also u32 to u8 in dbuf slice mask structure
>       (Ville Syrjälä)
>     - Switched from DBUF_S1_BIT to enum + explicit
>       BIT(DBUF_S1) access(Ville Syrjälä)
>     - Switched to named initializers in DBuf assignment
>       arrays(Ville Syrjälä)
>     - DBuf assignment arrays now use autogeneration tool
>       from
>       https://patchwork.freedesktop.org/series/70493/
>       to avoid typos.
>     - Renamed i915_find_pipe_conf to *_compute_dbuf_slices
>       (Ville Syrjälä)
>     - Changed platforms ordering in skl_compute_dbuf_slices
>       to be from newest to oldest(Ville Syrjälä)
> 
> v7: - Now ORing assigned DBuf slice config always with DBUF_S1
>       because slice 1 has to be constantly powered on.
>       (Ville Syrjälä)
> 
> v8: - Added pipe_name for neater printing(Ville Syrjälä)
>     - Renamed width_before_pipe to width_before_pipe_in_range,
>       to better reflect that now all the calculations are happening
>       inside DBuf range allowed by current pipe configuration mask
>       (Ville Syrjälä)
>     - Shortened FIXME comment message, regarding constant ORing with
>       DBUF_S1(Ville Syrjälä)
>     - Added .dbuf_mask named initializer to pipe assignment array
>       (Ville Syrjälä)
>     - Edited pipe assignment array to use only single DBuf slice
>       for gen11 single pipe configurations, until "pipe ratio"
>       thing is finally sorted out(Ville Syrjälä)
>     - Removed unused parameter crtc_state for now(Ville Syrjälä)
>       from icl/tgl_compute_dbuf_slices function
> 
> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_pm.c | 385 ++++++++++++++++++++++++++++++--
>  1 file changed, 366 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index ca5b34d297d9..92c4d4624092 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3856,13 +3856,29 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
>  	return true;
>  }
>  
> -static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
> -			      const struct intel_crtc_state *crtc_state,
> -			      const u64 total_data_rate,
> -			      const int num_active)
> +/*
> + * Calculate initial DBuf slice offset, based on slice size
> + * and mask(i.e if slice size is 1024 and second slice is enabled
> + * offset would be 1024)
> + */
> +static unsigned int
> +icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
> +				u32 slice_size,
> +				u32 ddb_size)
> +{
> +	unsigned int offset = 0;
> +
> +	if (!dbuf_slice_mask)
> +		return 0;
> +
> +	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
> +
> +	WARN_ON(offset >= ddb_size);
> +	return offset;
> +}
> +
> +static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
>  {
> -	struct drm_atomic_state *state = crtc_state->uapi.state;
> -	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
>  	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
>  
>  	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
> @@ -3870,12 +3886,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
>  	if (INTEL_GEN(dev_priv) < 11)
>  		return ddb_size - 4; /* 4 blocks for bypass path allocation */
>  
> -	intel_state->enabled_dbuf_slices_mask = BIT(DBUF_S1);
> -	ddb_size /= 2;
> -
>  	return ddb_size;
>  }
>  
> +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
> +				  u32 active_pipes);
> +
>  static void
>  skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  				   const struct intel_crtc_state *crtc_state,
> @@ -3887,10 +3903,17 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
>  	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
>  	const struct intel_crtc *crtc;
> -	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
> +	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
>  	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
>  	u16 ddb_size;
> +	u32 ddb_range_size;
>  	u32 i;
> +	u32 dbuf_slice_mask;
> +	u32 active_pipes;
> +	u32 offset;
> +	u32 slice_size;
> +	u32 total_slice_mask;
> +	u32 start, end;
>  
>  	if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
>  		alloc->start = 0;
> @@ -3900,12 +3923,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  	}
>  
>  	if (intel_state->active_pipe_changes)
> -		*num_active = hweight8(intel_state->active_pipes);
> +		active_pipes = intel_state->active_pipes;
>  	else
> -		*num_active = hweight8(dev_priv->active_pipes);
> +		active_pipes = dev_priv->active_pipes;
> +
> +	*num_active = hweight8(active_pipes);
> +
> +	ddb_size = intel_get_ddb_size(dev_priv);
>  
> -	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
> -				      *num_active);
> +	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
>  
>  	/*
>  	 * If the state doesn't change the active CRTC's or there is no
> @@ -3924,31 +3950,96 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
>  		return;
>  	}
>  
> +	/*
> +	 * Get allowed DBuf slices for correspondent pipe and platform.
> +	 */
> +	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
> +
> +	DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
> +		      dbuf_slice_mask,
> +		      pipe_name(for_pipe), active_pipes);
> +
> +	/*
> +	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
> +	 * and slice size is 1024, the offset would be 1024
> +	 */
> +	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
> +						 slice_size, ddb_size);
> +
> +	/*
> +	 * Figure out total size of allowed DBuf slices, which is basically
> +	 * a number of allowed slices for that pipe multiplied by slice size.
> +	 * Inside of this
> +	 * range ddb entries are still allocated in proportion to display width.
> +	 */
> +	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
> +
>  	/*
>  	 * Watermark/ddb requirement highly depends upon width of the
>  	 * framebuffer, So instead of allocating DDB equally among pipes
>  	 * distribute DDB based on resolution/width of the display.
>  	 */
> +	total_slice_mask = dbuf_slice_mask;
>  	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
>  		const struct drm_display_mode *adjusted_mode =
>  			&crtc_state->hw.adjusted_mode;
>  		enum pipe pipe = crtc->pipe;
>  		int hdisplay, vdisplay;
> +		u32 pipe_dbuf_slice_mask;
>  
> -		if (!crtc_state->hw.enable)
> +		if (!crtc_state->hw.active)
> +			continue;
> +
> +		pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
> +							       active_pipes);
> +
> +		/*
> +		 * According to BSpec pipe can share one dbuf slice with another
> +		 * pipes or pipe can use multiple dbufs, in both cases we
> +		 * account for other pipes only if they have exactly same mask.
> +		 * However we need to account how many slices we should enable
> +		 * in total.
> +		 */
> +		total_slice_mask |= pipe_dbuf_slice_mask;
> +
> +		/*
> +		 * Do not account pipes using other slice sets
> +		 * luckily as of current BSpec slice sets do not partially
> +		 * intersect(pipes share either same one slice or same slice set
> +		 * i.e no partial intersection), so it is enough to check for
> +		 * equality for now.
> +		 */
> +		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
>  			continue;
>  
>  		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
> -		total_width += hdisplay;
> +
> +		total_width_in_range += hdisplay;
>  
>  		if (pipe < for_pipe)
> -			width_before_pipe += hdisplay;
> +			width_before_pipe_in_range += hdisplay;
>  		else if (pipe == for_pipe)
>  			pipe_width = hdisplay;
>  	}
>  
> -	alloc->start = ddb_size * width_before_pipe / total_width;
> -	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
> +	/*
> +	 * FIXME: For now we always enable slice S1 as per
> +	 * the Bspec display initialization sequence.
> +	 */
> +	intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
> +
> +	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
> +	end = ddb_range_size *
> +		(width_before_pipe_in_range + pipe_width) / total_width_in_range;
> +
> +	alloc->start = offset + start;
> +	alloc->end = offset + end;
> +
> +	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
> +		      alloc->start, alloc->end);
> +	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
> +		      intel_state->enabled_dbuf_slices_mask,
> +		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
>  }
>  
>  static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
> @@ -4119,6 +4210,262 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
>  	return mul_fixed16(downscale_w, downscale_h);
>  }
>  
> +struct dbuf_slice_conf_entry {
> +	u8 active_pipes;
> +	u8 dbuf_mask[I915_MAX_PIPES];
> +};
> +
> +/*
> + * Table taken from Bspec 12716
> + * Pipes do have some preferred DBuf slice affinity,
> + * plus there are some hardcoded requirements on how
> + * those should be distributed for multipipe scenarios.
> + * For more DBuf slices algorithm can get even more messy
> + * and less readable, so decided to use a table almost
> + * as is from BSpec itself - that way it is at least easier
> + * to compare, change and check.
> + */
> +static struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
> +/* Autogenerated with igt/tools/intel_dbuf_map tool: */
> +{
> +	{
> +		.active_pipes = BIT(PIPE_A),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_B),
> +		.dbuf_mask = {
> +			[PIPE_B] = BIT(DBUF_S1)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_B] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +};
> +
> +/*
> + * Table taken from Bspec 49255
> + * Pipes do have some preferred DBuf slice affinity,
> + * plus there are some hardcoded requirements on how
> + * those should be distributed for multipipe scenarios.
> + * For more DBuf slices algorithm can get even more messy
> + * and less readable, so decided to use a table almost
> + * as is from BSpec itself - that way it is at least easier
> + * to compare, change and check.
> + */
> +static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
> +/* Autogenerated with igt/tools/intel_dbuf_map tool: */
> +{
> +	{
> +		.active_pipes = BIT(PIPE_A),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_B),
> +		.dbuf_mask = {
> +			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S2),
> +			[PIPE_B] = BIT(DBUF_S1)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_C] = BIT(DBUF_S1),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +	{
> +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
> +		.dbuf_mask = {
> +			[PIPE_A] = BIT(DBUF_S1),
> +			[PIPE_B] = BIT(DBUF_S1),
> +			[PIPE_C] = BIT(DBUF_S2),
> +			[PIPE_D] = BIT(DBUF_S2)
> +		}
> +	},
> +};
> +
> +static u8 compute_dbuf_slices(enum pipe pipe,
> +			      u32 active_pipes,
> +			      const struct dbuf_slice_conf_entry *dbuf_slices,
> +			      int size)
> +{
> +	int i;
> +
> +	for (i = 0; i < size; i++) {
> +		if (dbuf_slices[i].active_pipes == active_pipes)
> +			return dbuf_slices[i].dbuf_mask[pipe];
> +	}
> +	return 0;
> +}
> +
> +/*
> + * This function finds an entry with same enabled pipe configuration and
> + * returns correspondent DBuf slice mask as stated in BSpec for particular
> + * platform.
> + */
> +static u32 icl_compute_dbuf_slices(enum pipe pipe,
> +				   u32 active_pipes)
> +{
> +	/*
> +	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
> +	 * required calculating "pipe ratio" in order to determine
> +	 * if one or two slices can be used for single pipe configurations
> +	 * as additional constraint to the existing table.
> +	 * However based on recent info, it should be not "pipe ratio"
> +	 * but rather ratio between pixel_rate and cdclk with additional
> +	 * constants, so for now we are using only table until this is
> +	 * clarified. Also this is the reason why crtc_state param is
> +	 * still here - we will need it once those additional constraints
> +	 * pop up.

The last part of this comment no longer applies --- crtc_state isn't
still here.

I haven't heard any recent discussion with the hardware folks --- if the
bspec is still unclear in this area, is it safe to try to enable the
second dbuf slice at this time?  I'm worried that we might add
regressions due to the incomplete hardware documentation.  Should we
initially only enable it on TGL until the bspec gets clarified?  Or at
least only enable it on ICL/EHL as a completely separate patch that's
really easy to revert?  AFAIK, we don't yet have EHL machines in CI, so
even if CI results come back clean on ICL, I'd still be a little bit
nervous about regressing EHL/JSL.

> +	 */
> +	return compute_dbuf_slices(pipe, active_pipes,
> +				   icl_allowed_dbufs,
> +				   ARRAY_SIZE(icl_allowed_dbufs));
> +}
> +
> +static u32 tgl_compute_dbuf_slices(enum pipe pipe,
> +				   u32 active_pipes)
> +{
> +	return compute_dbuf_slices(pipe, active_pipes,
> +				   tgl_allowed_dbufs,
> +				   ARRAY_SIZE(tgl_allowed_dbufs));
> +}
> +
> +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
> +				  u32 active_pipes)

Given that this is basically a common frontend function that just
dispatches to an appropriate per-platform handler, maybe we should
rename this to to an intel_ prefix rather than skl_?  Up to you.

Aside from this and the comments above,

Reviewed-by: Matt Roper <matthew.d.roper@intel.com>

> +{
> +	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
> +	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
> +	enum pipe pipe = crtc->pipe;
> +
> +	if (IS_GEN(dev_priv, 12))
> +		return tgl_compute_dbuf_slices(pipe,
> +					       active_pipes);
> +	else if (IS_GEN(dev_priv, 11))
> +		return icl_compute_dbuf_slices(pipe,
> +					       active_pipes);
> +	/*
> +	 * For anything else just return one slice yet.
> +	 * Should be extended for other platforms.
> +	 */
> +	return BIT(DBUF_S1);
> +}
> +
>  static u64
>  skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
>  			     const struct intel_plane_state *plane_state,
> -- 
> 2.24.1.485.gad05a3d8e5
> 

-- 
Matt Roper
Graphics Software Engineer
VTT-OSGC Platform Enablement
Intel Corporation
(916) 356-2795
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex Stanislav Lisovskiy
@ 2020-01-28 23:33   ` Matt Roper
  2020-01-29  9:22     ` Lisovskiy, Stanislav
  2020-01-31 15:22     ` Ville Syrjälä
  0 siblings, 2 replies; 25+ messages in thread
From: Matt Roper @ 2020-01-28 23:33 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

On Fri, Jan 24, 2020 at 10:44:55AM +0200, Stanislav Lisovskiy wrote:
> Now using power_domain mutex to protect from race condition, which
> can occur because intel_dbuf_slices_update might be running in
> parallel to gen9_dc_off_power_well_enable being called from
> intel_dp_detect for instance, which causes assertion triggered by
> race condition, as gen9_assert_dbuf_enabled might preempt this
> when registers were already updated, while dev_priv was not.

I may be overlooking something, but I think your next patch already
takes care of this by ensuring we only do dbuf updates during modesets.
We already had POWER_DOMAIN_MODESET in our various DC_OFF_POWER_DOMAINS
definitions which would ensure that the "DC off" power well is enabled
(and DC states themselves are disabled) for the entire duration of the
modeset process.

If we need this, I'm not sure whether it's a good idea to use
power_domains->lock rather than a new, dedicated lock.  Anything that
touches power domains in any manner grabs this lock, even though we only
really care about it for stopping races with the specific "DC off" power
well.

Also, if we bisect to the point right before these last two patches,
don't we have a problem since there's a point in the git history where
we potentially face a race?


Matt

> 
> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_display_power.c | 12 ++++++++++++
>  1 file changed, 12 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
> index 96b38252578b..99ddc21e004c 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> @@ -4404,12 +4404,22 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
>  {
>  	int i;
>  	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
> +	struct i915_power_domains *power_domains = &dev_priv->power_domains;
>  
>  	WARN(hweight8(req_slices) > max_slices,
>  	     "Invalid number of dbuf slices requested\n");
>  
>  	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
>  
> +	/*
> +	 * Might be running this in parallel to gen9_dc_off_power_well_enable
> +	 * being called from intel_dp_detect for instance,
> +	 * which causes assertion triggered by race condition,
> +	 * as gen9_assert_dbuf_enabled might preempt this when registers
> +	 * were already updated, while dev_priv was not.
> +	 */
> +	mutex_lock(&power_domains->lock);
> +
>  	for (i = 0; i < max_slices; i++) {
>  		intel_dbuf_slice_set(dev_priv,
>  				     _DBUF_CTL_S(i),
> @@ -4417,6 +4427,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
>  	}
>  
>  	dev_priv->enabled_dbuf_slices_mask = req_slices;
> +
> +	mutex_unlock(&power_domains->lock);
>  }
>  
>  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> -- 
> 2.24.1.485.gad05a3d8e5
> 

-- 
Matt Roper
Graphics Software Engineer
VTT-OSGC Platform Enablement
Intel Corporation
(916) 356-2795
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset
  2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset Stanislav Lisovskiy
@ 2020-01-28 23:37   ` Matt Roper
  2020-01-31 15:10     ` Ville Syrjälä
  0 siblings, 1 reply; 25+ messages in thread
From: Matt Roper @ 2020-01-28 23:37 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

On Fri, Jan 24, 2020 at 10:44:56AM +0200, Stanislav Lisovskiy wrote:
> During full modeset, global state(i.e dev_priv) is protected
> by locking the crtcs in state, otherwise global state is not
> serialized. Also if it is not a full modeset, we anyway
> don't need to change DBuf slice configuration as Pipe configuration
> doesn't change.

Looks correct, but don't we need this earlier so that we don't have a
bad bisection point in the git history (assuming we rely on this rather
than the extra locking from the previous patch to cover the DC off
race)?


Matt

> 
> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_display.c | 6 ++++--
>  1 file changed, 4 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 1c957df5c28c..888a9e94032e 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -15373,7 +15373,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
>  		intel_encoders_update_prepare(state);
>  
>  	/* Enable all new slices, we might need */
> -	icl_dbuf_slice_pre_update(state);
> +	if (state->modeset)
> +		icl_dbuf_slice_pre_update(state);
>  
>  	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
>  	dev_priv->display.commit_modeset_enables(state);
> @@ -15432,7 +15433,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
>  	}
>  
>  	/* Disable all slices, we don't need */
> -	icl_dbuf_slice_post_update(state);
> +	if (state->modeset)
> +		icl_dbuf_slice_post_update(state);
>  
>  	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
>  		intel_post_plane_update(state, crtc);
> -- 
> 2.24.1.485.gad05a3d8e5
> 

-- 
Matt Roper
Graphics Software Engineer
VTT-OSGC Platform Enablement
Intel Corporation
(916) 356-2795
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes
  2020-01-28 23:15   ` Matt Roper
@ 2020-01-28 23:38     ` Matt Roper
  2020-01-29  9:03     ` Lisovskiy, Stanislav
  1 sibling, 0 replies; 25+ messages in thread
From: Matt Roper @ 2020-01-28 23:38 UTC (permalink / raw)
  To: Stanislav Lisovskiy; +Cc: intel-gfx

On Tue, Jan 28, 2020 at 03:15:30PM -0800, Matt Roper wrote:
> On Fri, Jan 24, 2020 at 10:44:54AM +0200, Stanislav Lisovskiy wrote:
> > Added proper DBuf slice mapping to correspondent
> > pipes, depending on pipe configuration as stated
> > in BSpec.
> > 
> > v2:
> >     - Remove unneeded braces
> >     - Stop using macro for DBuf assignments as
> >       it seems to reduce readability.
> > 
> > v3: Start using enabled slices mask in dev_priv
> > 
> > v4: Renamed "enabled_slices" used in dev_priv
> >     to "enabled_dbuf_slices_mask"(Matt Roper)
> > 
> > v5: - Removed redundant parameters from
> >       intel_get_ddb_size function.(Matt Roper)
> >     - Made i915_possible_dbuf_slices static(Matt Roper)
> >     - Renamed total_width into total_width_in_range
> >       so that it now reflects that this is not
> >       a total pipe width but the one in current
> >       dbuf slice allowed range for pipe.(Matt Roper)
> >     - Removed 4th pipe for ICL in DBuf assignment
> >       table(Matt Roper)
> >     - Fixed wrong DBuf slice in DBuf table for TGL
> >       (Matt Roper)
> >     - Added comment regarding why we currently not
> >       using pipe ratio for DBuf assignment for ICL
> > 
> > v6: - Changed u32 to unsigned int in
> >       icl_get_first_dbuf_slice_offset function signature
> >       (Ville Syrjälä)
> >     - Changed also u32 to u8 in dbuf slice mask structure
> >       (Ville Syrjälä)
> >     - Switched from DBUF_S1_BIT to enum + explicit
> >       BIT(DBUF_S1) access(Ville Syrjälä)
> >     - Switched to named initializers in DBuf assignment
> >       arrays(Ville Syrjälä)
> >     - DBuf assignment arrays now use autogeneration tool
> >       from
> >       https://patchwork.freedesktop.org/series/70493/
> >       to avoid typos.
> >     - Renamed i915_find_pipe_conf to *_compute_dbuf_slices
> >       (Ville Syrjälä)
> >     - Changed platforms ordering in skl_compute_dbuf_slices
> >       to be from newest to oldest(Ville Syrjälä)
> > 
> > v7: - Now ORing assigned DBuf slice config always with DBUF_S1
> >       because slice 1 has to be constantly powered on.
> >       (Ville Syrjälä)
> > 
> > v8: - Added pipe_name for neater printing(Ville Syrjälä)
> >     - Renamed width_before_pipe to width_before_pipe_in_range,
> >       to better reflect that now all the calculations are happening
> >       inside DBuf range allowed by current pipe configuration mask
> >       (Ville Syrjälä)
> >     - Shortened FIXME comment message, regarding constant ORing with
> >       DBUF_S1(Ville Syrjälä)
> >     - Added .dbuf_mask named initializer to pipe assignment array
> >       (Ville Syrjälä)
> >     - Edited pipe assignment array to use only single DBuf slice
> >       for gen11 single pipe configurations, until "pipe ratio"
> >       thing is finally sorted out(Ville Syrjälä)
> >     - Removed unused parameter crtc_state for now(Ville Syrjälä)
> >       from icl/tgl_compute_dbuf_slices function
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_pm.c | 385 ++++++++++++++++++++++++++++++--
> >  1 file changed, 366 insertions(+), 19 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> > index ca5b34d297d9..92c4d4624092 100644
> > --- a/drivers/gpu/drm/i915/intel_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > @@ -3856,13 +3856,29 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
> >  	return true;
> >  }
> >  
> > -static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
> > -			      const struct intel_crtc_state *crtc_state,
> > -			      const u64 total_data_rate,
> > -			      const int num_active)
> > +/*
> > + * Calculate initial DBuf slice offset, based on slice size
> > + * and mask(i.e if slice size is 1024 and second slice is enabled
> > + * offset would be 1024)
> > + */
> > +static unsigned int
> > +icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
> > +				u32 slice_size,
> > +				u32 ddb_size)
> > +{
> > +	unsigned int offset = 0;
> > +
> > +	if (!dbuf_slice_mask)
> > +		return 0;
> > +
> > +	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
> > +
> > +	WARN_ON(offset >= ddb_size);
> > +	return offset;
> > +}
> > +
> > +static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
> >  {
> > -	struct drm_atomic_state *state = crtc_state->uapi.state;
> > -	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
> >  	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
> >  
> >  	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
> > @@ -3870,12 +3886,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
> >  	if (INTEL_GEN(dev_priv) < 11)
> >  		return ddb_size - 4; /* 4 blocks for bypass path allocation */
> >  
> > -	intel_state->enabled_dbuf_slices_mask = BIT(DBUF_S1);
> > -	ddb_size /= 2;
> > -
> >  	return ddb_size;
> >  }
> >  
> > +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
> > +				  u32 active_pipes);
> > +
> >  static void
> >  skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
> >  				   const struct intel_crtc_state *crtc_state,
> > @@ -3887,10 +3903,17 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
> >  	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
> >  	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
> >  	const struct intel_crtc *crtc;
> > -	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
> > +	u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
> >  	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
> >  	u16 ddb_size;
> > +	u32 ddb_range_size;
> >  	u32 i;
> > +	u32 dbuf_slice_mask;
> > +	u32 active_pipes;
> > +	u32 offset;
> > +	u32 slice_size;
> > +	u32 total_slice_mask;
> > +	u32 start, end;
> >  
> >  	if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
> >  		alloc->start = 0;
> > @@ -3900,12 +3923,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
> >  	}
> >  
> >  	if (intel_state->active_pipe_changes)
> > -		*num_active = hweight8(intel_state->active_pipes);
> > +		active_pipes = intel_state->active_pipes;
> >  	else
> > -		*num_active = hweight8(dev_priv->active_pipes);
> > +		active_pipes = dev_priv->active_pipes;
> > +
> > +	*num_active = hweight8(active_pipes);
> > +
> > +	ddb_size = intel_get_ddb_size(dev_priv);
> >  
> > -	ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
> > -				      *num_active);
> > +	slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
> >  
> >  	/*
> >  	 * If the state doesn't change the active CRTC's or there is no
> > @@ -3924,31 +3950,96 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
> >  		return;
> >  	}
> >  
> > +	/*
> > +	 * Get allowed DBuf slices for correspondent pipe and platform.
> > +	 */
> > +	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
> > +
> > +	DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
> > +		      dbuf_slice_mask,
> > +		      pipe_name(for_pipe), active_pipes);
> > +
> > +	/*
> > +	 * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
> > +	 * and slice size is 1024, the offset would be 1024
> > +	 */
> > +	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
> > +						 slice_size, ddb_size);
> > +
> > +	/*
> > +	 * Figure out total size of allowed DBuf slices, which is basically
> > +	 * a number of allowed slices for that pipe multiplied by slice size.
> > +	 * Inside of this
> > +	 * range ddb entries are still allocated in proportion to display width.
> > +	 */
> > +	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
> > +
> >  	/*
> >  	 * Watermark/ddb requirement highly depends upon width of the
> >  	 * framebuffer, So instead of allocating DDB equally among pipes
> >  	 * distribute DDB based on resolution/width of the display.
> >  	 */
> > +	total_slice_mask = dbuf_slice_mask;
> >  	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
> >  		const struct drm_display_mode *adjusted_mode =
> >  			&crtc_state->hw.adjusted_mode;
> >  		enum pipe pipe = crtc->pipe;
> >  		int hdisplay, vdisplay;
> > +		u32 pipe_dbuf_slice_mask;
> >  
> > -		if (!crtc_state->hw.enable)
> > +		if (!crtc_state->hw.active)
> > +			continue;
> > +
> > +		pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
> > +							       active_pipes);
> > +
> > +		/*
> > +		 * According to BSpec pipe can share one dbuf slice with another
> > +		 * pipes or pipe can use multiple dbufs, in both cases we
> > +		 * account for other pipes only if they have exactly same mask.
> > +		 * However we need to account how many slices we should enable
> > +		 * in total.
> > +		 */
> > +		total_slice_mask |= pipe_dbuf_slice_mask;
> > +
> > +		/*
> > +		 * Do not account pipes using other slice sets
> > +		 * luckily as of current BSpec slice sets do not partially
> > +		 * intersect(pipes share either same one slice or same slice set
> > +		 * i.e no partial intersection), so it is enough to check for
> > +		 * equality for now.
> > +		 */
> > +		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
> >  			continue;
> >  
> >  		drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
> > -		total_width += hdisplay;
> > +
> > +		total_width_in_range += hdisplay;
> >  
> >  		if (pipe < for_pipe)
> > -			width_before_pipe += hdisplay;
> > +			width_before_pipe_in_range += hdisplay;
> >  		else if (pipe == for_pipe)
> >  			pipe_width = hdisplay;
> >  	}
> >  
> > -	alloc->start = ddb_size * width_before_pipe / total_width;
> > -	alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
> > +	/*
> > +	 * FIXME: For now we always enable slice S1 as per
> > +	 * the Bspec display initialization sequence.
> > +	 */
> > +	intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
> > +
> > +	start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
> > +	end = ddb_range_size *
> > +		(width_before_pipe_in_range + pipe_width) / total_width_in_range;
> > +
> > +	alloc->start = offset + start;
> > +	alloc->end = offset + end;
> > +
> > +	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
> > +		      alloc->start, alloc->end);
> > +	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
> > +		      intel_state->enabled_dbuf_slices_mask,
> > +		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
> >  }
> >  
> >  static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
> > @@ -4119,6 +4210,262 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
> >  	return mul_fixed16(downscale_w, downscale_h);
> >  }
> >  
> > +struct dbuf_slice_conf_entry {
> > +	u8 active_pipes;
> > +	u8 dbuf_mask[I915_MAX_PIPES];
> > +};
> > +
> > +/*
> > + * Table taken from Bspec 12716
> > + * Pipes do have some preferred DBuf slice affinity,
> > + * plus there are some hardcoded requirements on how
> > + * those should be distributed for multipipe scenarios.
> > + * For more DBuf slices algorithm can get even more messy
> > + * and less readable, so decided to use a table almost
> > + * as is from BSpec itself - that way it is at least easier
> > + * to compare, change and check.
> > + */
> > +static struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
> > +/* Autogenerated with igt/tools/intel_dbuf_map tool: */
> > +{
> > +	{
> > +		.active_pipes = BIT(PIPE_A),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +};
> > +
> > +/*
> > + * Table taken from Bspec 49255
> > + * Pipes do have some preferred DBuf slice affinity,
> > + * plus there are some hardcoded requirements on how
> > + * those should be distributed for multipipe scenarios.
> > + * For more DBuf slices algorithm can get even more messy
> > + * and less readable, so decided to use a table almost
> > + * as is from BSpec itself - that way it is at least easier
> > + * to compare, change and check.
> > + */
> > +static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
> > +/* Autogenerated with igt/tools/intel_dbuf_map tool: */
> > +{
> > +	{
> > +		.active_pipes = BIT(PIPE_A),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S2),
> > +			[PIPE_B] = BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_C] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +};
> > +
> > +static u8 compute_dbuf_slices(enum pipe pipe,
> > +			      u32 active_pipes,
> > +			      const struct dbuf_slice_conf_entry *dbuf_slices,
> > +			      int size)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < size; i++) {
> > +		if (dbuf_slices[i].active_pipes == active_pipes)
> > +			return dbuf_slices[i].dbuf_mask[pipe];
> > +	}
> > +	return 0;
> > +}
> > +
> > +/*
> > + * This function finds an entry with same enabled pipe configuration and
> > + * returns correspondent DBuf slice mask as stated in BSpec for particular
> > + * platform.
> > + */
> > +static u32 icl_compute_dbuf_slices(enum pipe pipe,
> > +				   u32 active_pipes)
> > +{
> > +	/*
> > +	 * FIXME: For ICL this is still a bit unclear as prev BSpec revision
> > +	 * required calculating "pipe ratio" in order to determine
> > +	 * if one or two slices can be used for single pipe configurations
> > +	 * as additional constraint to the existing table.
> > +	 * However based on recent info, it should be not "pipe ratio"
> > +	 * but rather ratio between pixel_rate and cdclk with additional
> > +	 * constants, so for now we are using only table until this is
> > +	 * clarified. Also this is the reason why crtc_state param is
> > +	 * still here - we will need it once those additional constraints
> > +	 * pop up.
> 
> The last part of this comment no longer applies --- crtc_state isn't
> still here.
> 
> I haven't heard any recent discussion with the hardware folks --- if the
> bspec is still unclear in this area, is it safe to try to enable the
> second dbuf slice at this time?  I'm worried that we might add
> regressions due to the incomplete hardware documentation.  Should we
> initially only enable it on TGL until the bspec gets clarified?  Or at
> least only enable it on ICL/EHL as a completely separate patch that's
> really easy to revert?  AFAIK, we don't yet have EHL machines in CI, so
> even if CI results come back clean on ICL, I'd still be a little bit
> nervous about regressing EHL/JSL.
> 
> > +	 */
> > +	return compute_dbuf_slices(pipe, active_pipes,
> > +				   icl_allowed_dbufs,
> > +				   ARRAY_SIZE(icl_allowed_dbufs));
> > +}
> > +
> > +static u32 tgl_compute_dbuf_slices(enum pipe pipe,
> > +				   u32 active_pipes)
> > +{
> > +	return compute_dbuf_slices(pipe, active_pipes,
> > +				   tgl_allowed_dbufs,
> > +				   ARRAY_SIZE(tgl_allowed_dbufs));
> > +}
> > +
> > +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
> > +				  u32 active_pipes)
> 
> Given that this is basically a common frontend function that just
> dispatches to an appropriate per-platform handler, maybe we should
> rename this to to an intel_ prefix rather than skl_?  Up to you.
> 
> Aside from this and the comments above,
> 
> Reviewed-by: Matt Roper <matthew.d.roper@intel.com>

BTW, I didn't re-check your *_allowed_dbufs[] tables this time since I
confirmed those on previous revisions.  I'm assuming they haven't been
altered since your previous revisions.


Matt

> 
> > +{
> > +	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
> > +	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
> > +	enum pipe pipe = crtc->pipe;
> > +
> > +	if (IS_GEN(dev_priv, 12))
> > +		return tgl_compute_dbuf_slices(pipe,
> > +					       active_pipes);
> > +	else if (IS_GEN(dev_priv, 11))
> > +		return icl_compute_dbuf_slices(pipe,
> > +					       active_pipes);
> > +	/*
> > +	 * For anything else just return one slice yet.
> > +	 * Should be extended for other platforms.
> > +	 */
> > +	return BIT(DBUF_S1);
> > +}
> > +
> >  static u64
> >  skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
> >  			     const struct intel_plane_state *plane_state,
> > -- 
> > 2.24.1.485.gad05a3d8e5
> > 
> 
> -- 
> Matt Roper
> Graphics Software Engineer
> VTT-OSGC Platform Enablement
> Intel Corporation
> (916) 356-2795

-- 
Matt Roper
Graphics Software Engineer
VTT-OSGC Platform Enablement
Intel Corporation
(916) 356-2795
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL
  2020-01-28 17:35   ` Ville Syrjälä
@ 2020-01-29  8:41     ` Lisovskiy, Stanislav
  2020-01-29 11:47       ` Ville Syrjälä
  0 siblings, 1 reply; 25+ messages in thread
From: Lisovskiy, Stanislav @ 2020-01-29  8:41 UTC (permalink / raw)
  To: ville.syrjala; +Cc: intel-gfx

On Tue, 2020-01-28 at 19:35 +0200, Ville Syrjälä wrote:
> On Fri, Jan 24, 2020 at 10:44:52AM +0200, Stanislav Lisovskiy wrote:
> > Now start using parameterized DBUF_CTL instead
> > of hardcoded, this would allow shorter access
> > functions when reading or storing entire state.
> > 
> > Tried to implement it in a MMIO_PIPE manner, however
> > DBUF_CTL1 address is higher than DBUF_CTL2, which
> > implies that we have to now subtract from base
> > rather than add.
> > 
> > v2: - Removed unneeded DBUF_CTL_DIST and DBUF_CTL_ADDR
> >       macros. Started to use _PICK construct as suggested
> >       by Matt Roper.
> > 
> > v3: - DBUF_CTL_S* to _DBUF_CTL_S*, changed X to "slice"
> >       in macro(Ville Syrjälä)
> >     - Introduced enum for enumerating DBUF slices(Ville Syrjälä)
> > 
> > Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  .../drm/i915/display/intel_display_power.c    | 30 +++++++++++--
> > ------
> >  .../drm/i915/display/intel_display_power.h    |  5 ++++
> >  drivers/gpu/drm/i915/i915_reg.h               |  7 +++--
> >  drivers/gpu/drm/i915/intel_pm.c               |  2 +-
> >  4 files changed, 28 insertions(+), 16 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c
> > b/drivers/gpu/drm/i915/display/intel_display_power.c
> > index 5e1c601f0f99..a59efb24be92 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> > @@ -4418,9 +4418,11 @@ void icl_dbuf_slices_update(struct
> > drm_i915_private *dev_priv,
> >  		return;
> >  
> >  	if (req_slices > hw_enabled_slices)
> > -		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2,
> > true);
> > +		ret = intel_dbuf_slice_set(dev_priv,
> > +					   _DBUF_CTL_S(DBUF_S2), true);
> >  	else
> > -		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2,
> > false);
> > +		ret = intel_dbuf_slice_set(dev_priv,
> > +					   _DBUF_CTL_S(DBUF_S2),
> > false);
> >  
> >  	if (ret)
> >  		dev_priv->enabled_dbuf_slices_num = req_slices;
> > @@ -4428,14 +4430,16 @@ void icl_dbuf_slices_update(struct
> > drm_i915_private *dev_priv,
> >  
> >  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> >  {
> > -	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) |
> > DBUF_POWER_REQUEST);
> > -	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) |
> > DBUF_POWER_REQUEST);
> > -	POSTING_READ(DBUF_CTL_S2);
> > +	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
> > +		   I915_READ(_DBUF_CTL_S(DBUF_S1)) |
> > DBUF_POWER_REQUEST);
> > +	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
> > +		   I915_READ(_DBUF_CTL_S(DBUF_S2)) |
> > DBUF_POWER_REQUEST);
> > +	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
> >  
> >  	udelay(10);
> >  
> > -	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> > -	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> > +	if (!(I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
> > +	    !(I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
> >  		DRM_ERROR("DBuf power enable timeout\n");
> >  	else
> >  		/*
> > @@ -4447,14 +4451,16 @@ static void icl_dbuf_enable(struct
> > drm_i915_private *dev_priv)
> >  
> >  static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
> >  {
> > -	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) &
> > ~DBUF_POWER_REQUEST);
> > -	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) &
> > ~DBUF_POWER_REQUEST);
> > -	POSTING_READ(DBUF_CTL_S2);
> > +	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
> > +		   I915_READ(_DBUF_CTL_S(DBUF_S1)) &
> > ~DBUF_POWER_REQUEST);
> > +	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
> > +		   I915_READ(_DBUF_CTL_S(DBUF_S2)) &
> > ~DBUF_POWER_REQUEST);
> > +	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
> >  
> >  	udelay(10);
> >  
> > -	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> > -	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> > +	if ((I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
> > +	    (I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
> >  		DRM_ERROR("DBuf power disable timeout!\n");
> >  	else
> >  		/*
> > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h
> > b/drivers/gpu/drm/i915/display/intel_display_power.h
> > index 2608a65af7fa..601e000ffd0d 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display_power.h
> > +++ b/drivers/gpu/drm/i915/display/intel_display_power.h
> > @@ -307,6 +307,11 @@ intel_display_power_put_async(struct
> > drm_i915_private *i915,
> >  }
> >  #endif
> >  
> > +enum dbuf_slice {
> > +	DBUF_S1,
> > +	DBUF_S2,
> > +};
> > +
> >  #define with_intel_display_power(i915, domain, wf) \
> >  	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
> >  	     intel_display_power_put_async((i915), (domain), (wf)),
> > (wf) = 0)
> > diff --git a/drivers/gpu/drm/i915/i915_reg.h
> > b/drivers/gpu/drm/i915/i915_reg.h
> > index b93c4c18f05c..625be54d3eae 100644
> > --- a/drivers/gpu/drm/i915/i915_reg.h
> > +++ b/drivers/gpu/drm/i915/i915_reg.h
> > @@ -7748,9 +7748,10 @@ enum {
> >  #define DISP_ARB_CTL2	_MMIO(0x45004)
> >  #define  DISP_DATA_PARTITION_5_6	(1 << 6)
> >  #define  DISP_IPC_ENABLE		(1 << 3)
> > -#define DBUF_CTL	_MMIO(0x45008)
> > -#define DBUF_CTL_S1	_MMIO(0x45008)
> > -#define DBUF_CTL_S2	_MMIO(0x44FE8)
> > +#define DBUF_CTL_ADDR1			0x45008
> > +#define DBUF_CTL_ADDR2			0x44FE8
> > +#define _DBUF_CTL_S(X)			_MMIO(_PICK_EVEN(X,
> > DBUF_CTL_ADDR1, DBUF_CTL_ADDR2))
> 
> That's not at all what I meant. Also the 'X' is still there despite
> what
> the changelog says.
> 
> #define _DBUF_CTL_S1	0x45008
> #define _DBUF_CTL_S2	0x44FE8
> #define DBUF_CTL_S(slice)	_MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1,
> _DBUF_CTL_S2))

My idea was to still be able to use DBUF_CTL_S1 and DBUF_CTL_S2,  of
course this is a bit redundant, but though similar naming DBUF_CTL_S1
and DBUF_CTL_S(0) might confuse somebody into using it. 
For example in your case we can now only use DBUF_CTL_S() macro because
_DBUF_CTL_S1/2 is not any longer using _MMIO.

But _now_ I get your point, I guess by "_" in the beginning, it is
meant not to be used from outside. Some kind of like private class
members in Python :)

I will change this once the rest of patches are reviewed, because that
change anyway does not affect the actual functionality, but purely
cosmetic.

Stan

> 
> 
> > +#define DBUF_CTL			_DBUF_CTL_S(0)
> >  #define  DBUF_POWER_REQUEST		(1 << 31)
> >  #define  DBUF_POWER_STATE		(1 << 30)
> >  #define GEN7_MSG_CTL	_MMIO(0x45010)
> > diff --git a/drivers/gpu/drm/i915/intel_pm.c
> > b/drivers/gpu/drm/i915/intel_pm.c
> > index 04f94057d6b3..b8d78e26515c 100644
> > --- a/drivers/gpu/drm/i915/intel_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > @@ -3660,7 +3660,7 @@ u8 intel_enabled_dbuf_slices_num(struct
> > drm_i915_private *dev_priv)
> >  	 * only that 1 slice enabled until we have a proper way for on-
> > demand
> >  	 * toggling of the second slice.
> >  	 */
> > -	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
> > +	if (0 && I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
> >  		enabled_dbuf_slices_num++;
> >  
> >  	return enabled_dbuf_slices_num;
> > -- 
> > 2.24.1.485.gad05a3d8e5
> 
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes
  2020-01-28 23:15   ` Matt Roper
  2020-01-28 23:38     ` Matt Roper
@ 2020-01-29  9:03     ` Lisovskiy, Stanislav
  1 sibling, 0 replies; 25+ messages in thread
From: Lisovskiy, Stanislav @ 2020-01-29  9:03 UTC (permalink / raw)
  To: Roper, Matthew D; +Cc: intel-gfx

On Tue, 2020-01-28 at 15:15 -0800, Matt Roper wrote:
> On Fri, Jan 24, 2020 at 10:44:54AM +0200, Stanislav Lisovskiy wrote:
> > Added proper DBuf slice mapping to correspondent
> > pipes, depending on pipe configuration as stated
> > in BSpec.
> > 
> > v2:
> >     - Remove unneeded braces
> >     - Stop using macro for DBuf assignments as
> >       it seems to reduce readability.
> > 
> > v3: Start using enabled slices mask in dev_priv
> > 
> > v4: Renamed "enabled_slices" used in dev_priv
> >     to "enabled_dbuf_slices_mask"(Matt Roper)
> > 
> > v5: - Removed redundant parameters from
> >       intel_get_ddb_size function.(Matt Roper)
> >     - Made i915_possible_dbuf_slices static(Matt Roper)
> >     - Renamed total_width into total_width_in_range
> >       so that it now reflects that this is not
> >       a total pipe width but the one in current
> >       dbuf slice allowed range for pipe.(Matt Roper)
> >     - Removed 4th pipe for ICL in DBuf assignment
> >       table(Matt Roper)
> >     - Fixed wrong DBuf slice in DBuf table for TGL
> >       (Matt Roper)
> >     - Added comment regarding why we currently not
> >       using pipe ratio for DBuf assignment for ICL
> > 
> > v6: - Changed u32 to unsigned int in
> >       icl_get_first_dbuf_slice_offset function signature
> >       (Ville Syrjälä)
> >     - Changed also u32 to u8 in dbuf slice mask structure
> >       (Ville Syrjälä)
> >     - Switched from DBUF_S1_BIT to enum + explicit
> >       BIT(DBUF_S1) access(Ville Syrjälä)
> >     - Switched to named initializers in DBuf assignment
> >       arrays(Ville Syrjälä)
> >     - DBuf assignment arrays now use autogeneration tool
> >       from
> >       https://patchwork.freedesktop.org/series/70493/
> >       to avoid typos.
> >     - Renamed i915_find_pipe_conf to *_compute_dbuf_slices
> >       (Ville Syrjälä)
> >     - Changed platforms ordering in skl_compute_dbuf_slices
> >       to be from newest to oldest(Ville Syrjälä)
> > 
> > v7: - Now ORing assigned DBuf slice config always with DBUF_S1
> >       because slice 1 has to be constantly powered on.
> >       (Ville Syrjälä)
> > 
> > v8: - Added pipe_name for neater printing(Ville Syrjälä)
> >     - Renamed width_before_pipe to width_before_pipe_in_range,
> >       to better reflect that now all the calculations are happening
> >       inside DBuf range allowed by current pipe configuration mask
> >       (Ville Syrjälä)
> >     - Shortened FIXME comment message, regarding constant ORing
> > with
> >       DBUF_S1(Ville Syrjälä)
> >     - Added .dbuf_mask named initializer to pipe assignment array
> >       (Ville Syrjälä)
> >     - Edited pipe assignment array to use only single DBuf slice
> >       for gen11 single pipe configurations, until "pipe ratio"
> >       thing is finally sorted out(Ville Syrjälä)
> >     - Removed unused parameter crtc_state for now(Ville Syrjälä)
> >       from icl/tgl_compute_dbuf_slices function
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_pm.c | 385
> > ++++++++++++++++++++++++++++++--
> >  1 file changed, 366 insertions(+), 19 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_pm.c
> > b/drivers/gpu/drm/i915/intel_pm.c
> > index ca5b34d297d9..92c4d4624092 100644
> > --- a/drivers/gpu/drm/i915/intel_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > @@ -3856,13 +3856,29 @@ bool intel_can_enable_sagv(struct
> > intel_atomic_state *state)
> >  	return true;
> >  }
> >  
> > -static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
> > -			      const struct intel_crtc_state
> > *crtc_state,
> > -			      const u64 total_data_rate,
> > -			      const int num_active)
> > +/*
> > + * Calculate initial DBuf slice offset, based on slice size
> > + * and mask(i.e if slice size is 1024 and second slice is enabled
> > + * offset would be 1024)
> > + */
> > +static unsigned int
> > +icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
> > +				u32 slice_size,
> > +				u32 ddb_size)
> > +{
> > +	unsigned int offset = 0;
> > +
> > +	if (!dbuf_slice_mask)
> > +		return 0;
> > +
> > +	offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
> > +
> > +	WARN_ON(offset >= ddb_size);
> > +	return offset;
> > +}
> > +
> > +static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
> >  {
> > -	struct drm_atomic_state *state = crtc_state->uapi.state;
> > -	struct intel_atomic_state *intel_state =
> > to_intel_atomic_state(state);
> >  	u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
> >  
> >  	drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
> > @@ -3870,12 +3886,12 @@ static u16 intel_get_ddb_size(struct
> > drm_i915_private *dev_priv,
> >  	if (INTEL_GEN(dev_priv) < 11)
> >  		return ddb_size - 4; /* 4 blocks for bypass path
> > allocation */
> >  
> > -	intel_state->enabled_dbuf_slices_mask = BIT(DBUF_S1);
> > -	ddb_size /= 2;
> > -
> >  	return ddb_size;
> >  }
> >  
> > +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state
> > *crtc_state,
> > +				  u32 active_pipes);
> > +
> >  static void
> >  skl_ddb_get_pipe_allocation_limits(struct drm_i915_private
> > *dev_priv,
> >  				   const struct intel_crtc_state
> > *crtc_state,
> > @@ -3887,10 +3903,17 @@ skl_ddb_get_pipe_allocation_limits(struct
> > drm_i915_private *dev_priv,
> >  	struct intel_atomic_state *intel_state =
> > to_intel_atomic_state(state);
> >  	struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
> >  	const struct intel_crtc *crtc;
> > -	u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
> > +	u32 pipe_width = 0, total_width_in_range = 0,
> > width_before_pipe_in_range = 0;
> >  	enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
> >  	u16 ddb_size;
> > +	u32 ddb_range_size;
> >  	u32 i;
> > +	u32 dbuf_slice_mask;
> > +	u32 active_pipes;
> > +	u32 offset;
> > +	u32 slice_size;
> > +	u32 total_slice_mask;
> > +	u32 start, end;
> >  
> >  	if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state-
> > >hw.active) {
> >  		alloc->start = 0;
> > @@ -3900,12 +3923,15 @@ skl_ddb_get_pipe_allocation_limits(struct
> > drm_i915_private *dev_priv,
> >  	}
> >  
> >  	if (intel_state->active_pipe_changes)
> > -		*num_active = hweight8(intel_state->active_pipes);
> > +		active_pipes = intel_state->active_pipes;
> >  	else
> > -		*num_active = hweight8(dev_priv->active_pipes);
> > +		active_pipes = dev_priv->active_pipes;
> > +
> > +	*num_active = hweight8(active_pipes);
> > +
> > +	ddb_size = intel_get_ddb_size(dev_priv);
> >  
> > -	ddb_size = intel_get_ddb_size(dev_priv, crtc_state,
> > total_data_rate,
> > -				      *num_active);
> > +	slice_size = ddb_size / INTEL_INFO(dev_priv)-
> > >num_supported_dbuf_slices;
> >  
> >  	/*
> >  	 * If the state doesn't change the active CRTC's or there is no
> > @@ -3924,31 +3950,96 @@ skl_ddb_get_pipe_allocation_limits(struct
> > drm_i915_private *dev_priv,
> >  		return;
> >  	}
> >  
> > +	/*
> > +	 * Get allowed DBuf slices for correspondent pipe and platform.
> > +	 */
> > +	dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
> > active_pipes);
> > +
> > +	DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
> > +		      dbuf_slice_mask,
> > +		      pipe_name(for_pipe), active_pipes);
> > +
> > +	/*
> > +	 * Figure out at which DBuf slice we start, i.e if we start at
> > Dbuf S2
> > +	 * and slice size is 1024, the offset would be 1024
> > +	 */
> > +	offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
> > +						 slice_size, ddb_size);
> > +
> > +	/*
> > +	 * Figure out total size of allowed DBuf slices, which is
> > basically
> > +	 * a number of allowed slices for that pipe multiplied by slice
> > size.
> > +	 * Inside of this
> > +	 * range ddb entries are still allocated in proportion to
> > display width.
> > +	 */
> > +	ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
> > +
> >  	/*
> >  	 * Watermark/ddb requirement highly depends upon width of the
> >  	 * framebuffer, So instead of allocating DDB equally among
> > pipes
> >  	 * distribute DDB based on resolution/width of the display.
> >  	 */
> > +	total_slice_mask = dbuf_slice_mask;
> >  	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state,
> > i) {
> >  		const struct drm_display_mode *adjusted_mode =
> >  			&crtc_state->hw.adjusted_mode;
> >  		enum pipe pipe = crtc->pipe;
> >  		int hdisplay, vdisplay;
> > +		u32 pipe_dbuf_slice_mask;
> >  
> > -		if (!crtc_state->hw.enable)
> > +		if (!crtc_state->hw.active)
> > +			continue;
> > +
> > +		pipe_dbuf_slice_mask =
> > skl_compute_dbuf_slices(crtc_state,
> > +							       active_p
> > ipes);
> > +
> > +		/*
> > +		 * According to BSpec pipe can share one dbuf slice
> > with another
> > +		 * pipes or pipe can use multiple dbufs, in both cases
> > we
> > +		 * account for other pipes only if they have exactly
> > same mask.
> > +		 * However we need to account how many slices we should
> > enable
> > +		 * in total.
> > +		 */
> > +		total_slice_mask |= pipe_dbuf_slice_mask;
> > +
> > +		/*
> > +		 * Do not account pipes using other slice sets
> > +		 * luckily as of current BSpec slice sets do not
> > partially
> > +		 * intersect(pipes share either same one slice or same
> > slice set
> > +		 * i.e no partial intersection), so it is enough to
> > check for
> > +		 * equality for now.
> > +		 */
> > +		if (dbuf_slice_mask != pipe_dbuf_slice_mask)
> >  			continue;
> >  
> >  		drm_mode_get_hv_timing(adjusted_mode, &hdisplay,
> > &vdisplay);
> > -		total_width += hdisplay;
> > +
> > +		total_width_in_range += hdisplay;
> >  
> >  		if (pipe < for_pipe)
> > -			width_before_pipe += hdisplay;
> > +			width_before_pipe_in_range += hdisplay;
> >  		else if (pipe == for_pipe)
> >  			pipe_width = hdisplay;
> >  	}
> >  
> > -	alloc->start = ddb_size * width_before_pipe / total_width;
> > -	alloc->end = ddb_size * (width_before_pipe + pipe_width) /
> > total_width;
> > +	/*
> > +	 * FIXME: For now we always enable slice S1 as per
> > +	 * the Bspec display initialization sequence.
> > +	 */
> > +	intel_state->enabled_dbuf_slices_mask = total_slice_mask |
> > BIT(DBUF_S1);
> > +
> > +	start = ddb_range_size * width_before_pipe_in_range /
> > total_width_in_range;
> > +	end = ddb_range_size *
> > +		(width_before_pipe_in_range + pipe_width) /
> > total_width_in_range;
> > +
> > +	alloc->start = offset + start;
> > +	alloc->end = offset + end;
> > +
> > +	DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
> > +		      alloc->start, alloc->end);
> > +	DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
> > +		      intel_state->enabled_dbuf_slices_mask,
> > +		      INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
> >  }
> >  
> >  static int skl_compute_wm_params(const struct intel_crtc_state
> > *crtc_state,
> > @@ -4119,6 +4210,262 @@ skl_plane_downscale_amount(const struct
> > intel_crtc_state *crtc_state,
> >  	return mul_fixed16(downscale_w, downscale_h);
> >  }
> >  
> > +struct dbuf_slice_conf_entry {
> > +	u8 active_pipes;
> > +	u8 dbuf_mask[I915_MAX_PIPES];
> > +};
> > +
> > +/*
> > + * Table taken from Bspec 12716
> > + * Pipes do have some preferred DBuf slice affinity,
> > + * plus there are some hardcoded requirements on how
> > + * those should be distributed for multipipe scenarios.
> > + * For more DBuf slices algorithm can get even more messy
> > + * and less readable, so decided to use a table almost
> > + * as is from BSpec itself - that way it is at least easier
> > + * to compare, change and check.
> > + */
> > +static struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
> > +/* Autogenerated with igt/tools/intel_dbuf_map tool: */
> > +{
> > +	{
> > +		.active_pipes = BIT(PIPE_A),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) |
> > BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +};
> > +
> > +/*
> > + * Table taken from Bspec 49255
> > + * Pipes do have some preferred DBuf slice affinity,
> > + * plus there are some hardcoded requirements on how
> > + * those should be distributed for multipipe scenarios.
> > + * For more DBuf slices algorithm can get even more messy
> > + * and less readable, so decided to use a table almost
> > + * as is from BSpec itself - that way it is at least easier
> > + * to compare, change and check.
> > + */
> > +static struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
> > +/* Autogenerated with igt/tools/intel_dbuf_map tool: */
> > +{
> > +	{
> > +		.active_pipes = BIT(PIPE_A),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S2),
> > +			[PIPE_B] = BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) |
> > BIT(PIPE_C),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) |
> > BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_C] = BIT(DBUF_S1),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_C) |
> > BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_B) | BIT(PIPE_C) |
> > BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +	{
> > +		.active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C)
> > | BIT(PIPE_D),
> > +		.dbuf_mask = {
> > +			[PIPE_A] = BIT(DBUF_S1),
> > +			[PIPE_B] = BIT(DBUF_S1),
> > +			[PIPE_C] = BIT(DBUF_S2),
> > +			[PIPE_D] = BIT(DBUF_S2)
> > +		}
> > +	},
> > +};
> > +
> > +static u8 compute_dbuf_slices(enum pipe pipe,
> > +			      u32 active_pipes,
> > +			      const struct dbuf_slice_conf_entry
> > *dbuf_slices,
> > +			      int size)
> > +{
> > +	int i;
> > +
> > +	for (i = 0; i < size; i++) {
> > +		if (dbuf_slices[i].active_pipes == active_pipes)
> > +			return dbuf_slices[i].dbuf_mask[pipe];
> > +	}
> > +	return 0;
> > +}
> > +
> > +/*
> > + * This function finds an entry with same enabled pipe
> > configuration and
> > + * returns correspondent DBuf slice mask as stated in BSpec for
> > particular
> > + * platform.
> > + */
> > +static u32 icl_compute_dbuf_slices(enum pipe pipe,
> > +				   u32 active_pipes)
> > +{
> > +	/*
> > +	 * FIXME: For ICL this is still a bit unclear as prev BSpec
> > revision
> > +	 * required calculating "pipe ratio" in order to determine
> > +	 * if one or two slices can be used for single pipe
> > configurations
> > +	 * as additional constraint to the existing table.
> > +	 * However based on recent info, it should be not "pipe ratio"
> > +	 * but rather ratio between pixel_rate and cdclk with
> > additional
> > +	 * constants, so for now we are using only table until this is
> > +	 * clarified. Also this is the reason why crtc_state param is
> > +	 * still here - we will need it once those additional
> > constraints
> > +	 * pop up.
> 
> The last part of this comment no longer applies --- crtc_state isn't
> still here.
> 
> I haven't heard any recent discussion with the hardware folks --- if
> the
> bspec is still unclear in this area, is it safe to try to enable the
> second dbuf slice at this time?  I'm worried that we might add
> regressions due to the incomplete hardware documentation.  Should we
> initially only enable it on TGL until the bspec gets clarified?  Or
> at
> least only enable it on ICL/EHL as a completely separate patch that's
> really easy to revert?  AFAIK, we don't yet have EHL machines in CI,
> so
> even if CI results come back clean on ICL, I'd still be a little bit
> nervous about regressing EHL/JSL.

It is safe to enable second dbuf and even preferable to enable second
slice for multipipe scenarios, as for single pipe, I have changed
icl_allowed_dbufs table to use only single nearest slice, in order
exactly not to get into this situation. Otherwise second Dbuf slice
will make it more robust against underruns.

> 
> > +	 */
> > +	return compute_dbuf_slices(pipe, active_pipes,
> > +				   icl_allowed_dbufs,
> > +				   ARRAY_SIZE(icl_allowed_dbufs));
> > +}
> > +
> > +static u32 tgl_compute_dbuf_slices(enum pipe pipe,
> > +				   u32 active_pipes)
> > +{
> > +	return compute_dbuf_slices(pipe, active_pipes,
> > +				   tgl_allowed_dbufs,
> > +				   ARRAY_SIZE(tgl_allowed_dbufs));
> > +}
> > +
> > +static u8 skl_compute_dbuf_slices(const struct intel_crtc_state
> > *crtc_state,
> > +				  u32 active_pipes)
> 
> Given that this is basically a common frontend function that just
> dispatches to an appropriate per-platform handler, maybe we should
> rename this to to an intel_ prefix rather than skl_?  Up to you.

I think it was initially i915_possible_dbuf_slices, then I renamed it
to this, based on some request. As I see we quite often use skl_ prefix
for functionality which is valid skl+ onwards, like skl_compute_wm/ddb
and so on.. Could be intel_ as well, fine with that, however would
leave it as is currently unless someone is _strongly_ against :)

> 
> Aside from this and the comments above,
> 
> Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
> 
> > +{
> > +	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
> > +	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
> > +	enum pipe pipe = crtc->pipe;
> > +
> > +	if (IS_GEN(dev_priv, 12))
> > +		return tgl_compute_dbuf_slices(pipe,
> > +					       active_pipes);
> > +	else if (IS_GEN(dev_priv, 11))
> > +		return icl_compute_dbuf_slices(pipe,
> > +					       active_pipes);
> > +	/*
> > +	 * For anything else just return one slice yet.
> > +	 * Should be extended for other platforms.
> > +	 */
> > +	return BIT(DBUF_S1);
> > +}
> > +
> >  static u64
> >  skl_plane_relative_data_rate(const struct intel_crtc_state
> > *crtc_state,
> >  			     const struct intel_plane_state
> > *plane_state,
> > -- 
> > 2.24.1.485.gad05a3d8e5
> > 
> 
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex
  2020-01-28 23:33   ` Matt Roper
@ 2020-01-29  9:22     ` Lisovskiy, Stanislav
  2020-01-31 15:22     ` Ville Syrjälä
  1 sibling, 0 replies; 25+ messages in thread
From: Lisovskiy, Stanislav @ 2020-01-29  9:22 UTC (permalink / raw)
  To: Roper, Matthew D; +Cc: intel-gfx

On Tue, 2020-01-28 at 15:33 -0800, Matt Roper wrote:
> On Fri, Jan 24, 2020 at 10:44:55AM +0200, Stanislav Lisovskiy wrote:
> > Now using power_domain mutex to protect from race condition, which
> > can occur because intel_dbuf_slices_update might be running in
> > parallel to gen9_dc_off_power_well_enable being called from
> > intel_dp_detect for instance, which causes assertion triggered by
> > race condition, as gen9_assert_dbuf_enabled might preempt this
> > when registers were already updated, while dev_priv was not.
> 
> I may be overlooking something, but I think your next patch already
> takes care of this by ensuring we only do dbuf updates during
> modesets.
> We already had POWER_DOMAIN_MODESET in our various
> DC_OFF_POWER_DOMAINS
> definitions which would ensure that the "DC off" power well is
> enabled
> (and DC states themselves are disabled) for the entire duration of
> the
> modeset process.

I probably should have clarified this better in commit message.
With previous patch series, I ran into assertion which turned out to be
a consequence of two bugs:
One problem was that it tried to update dbuf slices to 0 in a non-
modeset commit, which didn't have any crtcs in a state, which was wrong
and to prevent this next patch is now checking that we are actually are
doing a modeset, otherwise we will be ocassionally updating dbuf mask
to 0, which should be done only by icl_dbuf_disable according to BSpec.
Also if the commit doesn't have any crtcs in a state, we should not
update a global state in dev priv, because access is not serialized 
according to Ville's idea - in order to read global state we need
to have at least one crtc grabbed, in order to write we need to grab
all crtcs.

Second problem was that there was a race condition in the driver which
this patch takes care of: after device was suspend/resumed, we had a
noop commit which was updating dbuf slices to 0 due to previous
problem,
it was calling icl_dbuf_slices_update function, which was first
writing that value to DBUF_CTL regs and then updating the dev_priv.
However, during that time we could an intel_dp_detect function called
in parallel based hpd irq, which was in turn enabling dc_off power well
and then triggering assertion in gen9_assert_dbuf_enabled, which is 
now checking if dev_priv slices mask and the actual hardware match,
however because icl_dbuf_slices_update was preempted in the middle,
the state didn't match. I reproduced and confirmed this by adding
artifical delay to this update and additional traces.
The most trivial solution to this was as per discussion with Ville was
to use power domains lock here, because then we will be protected
against competing with dc_off power well enabling, because it also 
locks that mutex first.

Previously we didn't hit this issue, because icl_dbuf_slices_update
was simply updating the registers themself and there was no
correspondent global state in dev_priv, also we didn't ever update
slices configuration during a modeset.

Stan

> 
> If we need this, I'm not sure whether it's a good idea to use
> power_domains->lock rather than a new, dedicated lock.  Anything that
> touches power domains in any manner grabs this lock, even though we
> only
> really care about it for stopping races with the specific "DC off"
> power
> well.
> 
> Also, if we bisect to the point right before these last two patches,
> don't we have a problem since there's a point in the git history
> where
> we potentially face a race?
> 
> 
> Matt
> 
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  drivers/gpu/drm/i915/display/intel_display_power.c | 12
> > ++++++++++++
> >  1 file changed, 12 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c
> > b/drivers/gpu/drm/i915/display/intel_display_power.c
> > index 96b38252578b..99ddc21e004c 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> > @@ -4404,12 +4404,22 @@ void icl_dbuf_slices_update(struct
> > drm_i915_private *dev_priv,
> >  {
> >  	int i;
> >  	int max_slices = INTEL_INFO(dev_priv)-
> > >num_supported_dbuf_slices;
> > +	struct i915_power_domains *power_domains = &dev_priv-
> > >power_domains;
> >  
> >  	WARN(hweight8(req_slices) > max_slices,
> >  	     "Invalid number of dbuf slices requested\n");
> >  
> >  	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
> >  
> > +	/*
> > +	 * Might be running this in parallel to
> > gen9_dc_off_power_well_enable
> > +	 * being called from intel_dp_detect for instance,
> > +	 * which causes assertion triggered by race condition,
> > +	 * as gen9_assert_dbuf_enabled might preempt this when
> > registers
> > +	 * were already updated, while dev_priv was not.
> > +	 */
> > +	mutex_lock(&power_domains->lock);
> > +
> >  	for (i = 0; i < max_slices; i++) {
> >  		intel_dbuf_slice_set(dev_priv,
> >  				     _DBUF_CTL_S(i),
> > @@ -4417,6 +4427,8 @@ void icl_dbuf_slices_update(struct
> > drm_i915_private *dev_priv,
> >  	}
> >  
> >  	dev_priv->enabled_dbuf_slices_mask = req_slices;
> > +
> > +	mutex_unlock(&power_domains->lock);
> >  }
> >  
> >  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> > -- 
> > 2.24.1.485.gad05a3d8e5
> > 
> 
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL
  2020-01-29  8:41     ` Lisovskiy, Stanislav
@ 2020-01-29 11:47       ` Ville Syrjälä
  0 siblings, 0 replies; 25+ messages in thread
From: Ville Syrjälä @ 2020-01-29 11:47 UTC (permalink / raw)
  To: Lisovskiy, Stanislav; +Cc: intel-gfx

On Wed, Jan 29, 2020 at 08:41:34AM +0000, Lisovskiy, Stanislav wrote:
> On Tue, 2020-01-28 at 19:35 +0200, Ville Syrjälä wrote:
> > On Fri, Jan 24, 2020 at 10:44:52AM +0200, Stanislav Lisovskiy wrote:
> > > Now start using parameterized DBUF_CTL instead
> > > of hardcoded, this would allow shorter access
> > > functions when reading or storing entire state.
> > > 
> > > Tried to implement it in a MMIO_PIPE manner, however
> > > DBUF_CTL1 address is higher than DBUF_CTL2, which
> > > implies that we have to now subtract from base
> > > rather than add.
> > > 
> > > v2: - Removed unneeded DBUF_CTL_DIST and DBUF_CTL_ADDR
> > >       macros. Started to use _PICK construct as suggested
> > >       by Matt Roper.
> > > 
> > > v3: - DBUF_CTL_S* to _DBUF_CTL_S*, changed X to "slice"
> > >       in macro(Ville Syrjälä)
> > >     - Introduced enum for enumerating DBUF slices(Ville Syrjälä)
> > > 
> > > Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > > Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
> > > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > > ---
> > >  .../drm/i915/display/intel_display_power.c    | 30 +++++++++++--
> > > ------
> > >  .../drm/i915/display/intel_display_power.h    |  5 ++++
> > >  drivers/gpu/drm/i915/i915_reg.h               |  7 +++--
> > >  drivers/gpu/drm/i915/intel_pm.c               |  2 +-
> > >  4 files changed, 28 insertions(+), 16 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c
> > > b/drivers/gpu/drm/i915/display/intel_display_power.c
> > > index 5e1c601f0f99..a59efb24be92 100644
> > > --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> > > +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> > > @@ -4418,9 +4418,11 @@ void icl_dbuf_slices_update(struct
> > > drm_i915_private *dev_priv,
> > >  		return;
> > >  
> > >  	if (req_slices > hw_enabled_slices)
> > > -		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2,
> > > true);
> > > +		ret = intel_dbuf_slice_set(dev_priv,
> > > +					   _DBUF_CTL_S(DBUF_S2), true);
> > >  	else
> > > -		ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2,
> > > false);
> > > +		ret = intel_dbuf_slice_set(dev_priv,
> > > +					   _DBUF_CTL_S(DBUF_S2),
> > > false);
> > >  
> > >  	if (ret)
> > >  		dev_priv->enabled_dbuf_slices_num = req_slices;
> > > @@ -4428,14 +4430,16 @@ void icl_dbuf_slices_update(struct
> > > drm_i915_private *dev_priv,
> > >  
> > >  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> > >  {
> > > -	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) |
> > > DBUF_POWER_REQUEST);
> > > -	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) |
> > > DBUF_POWER_REQUEST);
> > > -	POSTING_READ(DBUF_CTL_S2);
> > > +	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
> > > +		   I915_READ(_DBUF_CTL_S(DBUF_S1)) |
> > > DBUF_POWER_REQUEST);
> > > +	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
> > > +		   I915_READ(_DBUF_CTL_S(DBUF_S2)) |
> > > DBUF_POWER_REQUEST);
> > > +	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
> > >  
> > >  	udelay(10);
> > >  
> > > -	if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> > > -	    !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> > > +	if (!(I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
> > > +	    !(I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
> > >  		DRM_ERROR("DBuf power enable timeout\n");
> > >  	else
> > >  		/*
> > > @@ -4447,14 +4451,16 @@ static void icl_dbuf_enable(struct
> > > drm_i915_private *dev_priv)
> > >  
> > >  static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
> > >  {
> > > -	I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) &
> > > ~DBUF_POWER_REQUEST);
> > > -	I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) &
> > > ~DBUF_POWER_REQUEST);
> > > -	POSTING_READ(DBUF_CTL_S2);
> > > +	I915_WRITE(_DBUF_CTL_S(DBUF_S1),
> > > +		   I915_READ(_DBUF_CTL_S(DBUF_S1)) &
> > > ~DBUF_POWER_REQUEST);
> > > +	I915_WRITE(_DBUF_CTL_S(DBUF_S2),
> > > +		   I915_READ(_DBUF_CTL_S(DBUF_S2)) &
> > > ~DBUF_POWER_REQUEST);
> > > +	POSTING_READ(_DBUF_CTL_S(DBUF_S2));
> > >  
> > >  	udelay(10);
> > >  
> > > -	if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> > > -	    (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> > > +	if ((I915_READ(_DBUF_CTL_S(DBUF_S1)) & DBUF_POWER_STATE) ||
> > > +	    (I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE))
> > >  		DRM_ERROR("DBuf power disable timeout!\n");
> > >  	else
> > >  		/*
> > > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h
> > > b/drivers/gpu/drm/i915/display/intel_display_power.h
> > > index 2608a65af7fa..601e000ffd0d 100644
> > > --- a/drivers/gpu/drm/i915/display/intel_display_power.h
> > > +++ b/drivers/gpu/drm/i915/display/intel_display_power.h
> > > @@ -307,6 +307,11 @@ intel_display_power_put_async(struct
> > > drm_i915_private *i915,
> > >  }
> > >  #endif
> > >  
> > > +enum dbuf_slice {
> > > +	DBUF_S1,
> > > +	DBUF_S2,
> > > +};
> > > +
> > >  #define with_intel_display_power(i915, domain, wf) \
> > >  	for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
> > >  	     intel_display_power_put_async((i915), (domain), (wf)),
> > > (wf) = 0)
> > > diff --git a/drivers/gpu/drm/i915/i915_reg.h
> > > b/drivers/gpu/drm/i915/i915_reg.h
> > > index b93c4c18f05c..625be54d3eae 100644
> > > --- a/drivers/gpu/drm/i915/i915_reg.h
> > > +++ b/drivers/gpu/drm/i915/i915_reg.h
> > > @@ -7748,9 +7748,10 @@ enum {
> > >  #define DISP_ARB_CTL2	_MMIO(0x45004)
> > >  #define  DISP_DATA_PARTITION_5_6	(1 << 6)
> > >  #define  DISP_IPC_ENABLE		(1 << 3)
> > > -#define DBUF_CTL	_MMIO(0x45008)
> > > -#define DBUF_CTL_S1	_MMIO(0x45008)
> > > -#define DBUF_CTL_S2	_MMIO(0x44FE8)
> > > +#define DBUF_CTL_ADDR1			0x45008
> > > +#define DBUF_CTL_ADDR2			0x44FE8
> > > +#define _DBUF_CTL_S(X)			_MMIO(_PICK_EVEN(X,
> > > DBUF_CTL_ADDR1, DBUF_CTL_ADDR2))
> > 
> > That's not at all what I meant. Also the 'X' is still there despite
> > what
> > the changelog says.
> > 
> > #define _DBUF_CTL_S1	0x45008
> > #define _DBUF_CTL_S2	0x44FE8
> > #define DBUF_CTL_S(slice)	_MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1,
> > _DBUF_CTL_S2))
> 
> My idea was to still be able to use DBUF_CTL_S1 and DBUF_CTL_S2,  of

Just don't. Aliases are confusing.

> course this is a bit redundant, but though similar naming DBUF_CTL_S1
> and DBUF_CTL_S(0) might confuse somebody into using it. 
> For example in your case we can now only use DBUF_CTL_S() macro because
> _DBUF_CTL_S1/2 is not any longer using _MMIO.
> 
> But _now_ I get your point, I guess by "_" in the beginning, it is
> meant not to be used from outside. Some kind of like private class
> members in Python :)
> 
> I will change this once the rest of patches are reviewed, because that
> change anyway does not affect the actual functionality, but purely
> cosmetic.
> 
> Stan
> 
> > 
> > 
> > > +#define DBUF_CTL			_DBUF_CTL_S(0)
> > >  #define  DBUF_POWER_REQUEST		(1 << 31)
> > >  #define  DBUF_POWER_STATE		(1 << 30)
> > >  #define GEN7_MSG_CTL	_MMIO(0x45010)
> > > diff --git a/drivers/gpu/drm/i915/intel_pm.c
> > > b/drivers/gpu/drm/i915/intel_pm.c
> > > index 04f94057d6b3..b8d78e26515c 100644
> > > --- a/drivers/gpu/drm/i915/intel_pm.c
> > > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > > @@ -3660,7 +3660,7 @@ u8 intel_enabled_dbuf_slices_num(struct
> > > drm_i915_private *dev_priv)
> > >  	 * only that 1 slice enabled until we have a proper way for on-
> > > demand
> > >  	 * toggling of the second slice.
> > >  	 */
> > > -	if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
> > > +	if (0 && I915_READ(_DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
> > >  		enabled_dbuf_slices_num++;
> > >  
> > >  	return enabled_dbuf_slices_num;
> > > -- 
> > > 2.24.1.485.gad05a3d8e5
> > 
> > 

-- 
Ville Syrjälä
Intel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset
  2020-01-28 23:37   ` Matt Roper
@ 2020-01-31 15:10     ` Ville Syrjälä
  0 siblings, 0 replies; 25+ messages in thread
From: Ville Syrjälä @ 2020-01-31 15:10 UTC (permalink / raw)
  To: Matt Roper; +Cc: intel-gfx

On Tue, Jan 28, 2020 at 03:37:06PM -0800, Matt Roper wrote:
> On Fri, Jan 24, 2020 at 10:44:56AM +0200, Stanislav Lisovskiy wrote:
> > During full modeset, global state(i.e dev_priv) is protected
> > by locking the crtcs in state, otherwise global state is not
> > serialized. Also if it is not a full modeset, we anyway
> > don't need to change DBuf slice configuration as Pipe configuration
> > doesn't change.
> 
> Looks correct, but don't we need this earlier so that we don't have a
> bad bisection point in the git history (assuming we rely on this rather
> than the extra locking from the previous patch to cover the DC off
> race)?

Could perhaps just squash into the patch that moves these calls here.
Or move just after that patch in the series.

Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>

> 
> 
> Matt
> 
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  drivers/gpu/drm/i915/display/intel_display.c | 6 ++++--
> >  1 file changed, 4 insertions(+), 2 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> > index 1c957df5c28c..888a9e94032e 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display.c
> > @@ -15373,7 +15373,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
> >  		intel_encoders_update_prepare(state);
> >  
> >  	/* Enable all new slices, we might need */
> > -	icl_dbuf_slice_pre_update(state);
> > +	if (state->modeset)
> > +		icl_dbuf_slice_pre_update(state);
> >  
> >  	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
> >  	dev_priv->display.commit_modeset_enables(state);
> > @@ -15432,7 +15433,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
> >  	}
> >  
> >  	/* Disable all slices, we don't need */
> > -	icl_dbuf_slice_post_update(state);
> > +	if (state->modeset)
> > +		icl_dbuf_slice_post_update(state);
> >  
> >  	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
> >  		intel_post_plane_update(state, crtc);
> > -- 
> > 2.24.1.485.gad05a3d8e5
> > 
> 
> -- 
> Matt Roper
> Graphics Software Engineer
> VTT-OSGC Platform Enablement
> Intel Corporation
> (916) 356-2795

-- 
Ville Syrjälä
Intel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

* Re: [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex
  2020-01-28 23:33   ` Matt Roper
  2020-01-29  9:22     ` Lisovskiy, Stanislav
@ 2020-01-31 15:22     ` Ville Syrjälä
  1 sibling, 0 replies; 25+ messages in thread
From: Ville Syrjälä @ 2020-01-31 15:22 UTC (permalink / raw)
  To: Matt Roper; +Cc: intel-gfx

On Tue, Jan 28, 2020 at 03:33:11PM -0800, Matt Roper wrote:
> On Fri, Jan 24, 2020 at 10:44:55AM +0200, Stanislav Lisovskiy wrote:
> > Now using power_domain mutex to protect from race condition, which
> > can occur because intel_dbuf_slices_update might be running in
> > parallel to gen9_dc_off_power_well_enable being called from
> > intel_dp_detect for instance, which causes assertion triggered by
> > race condition, as gen9_assert_dbuf_enabled might preempt this
> > when registers were already updated, while dev_priv was not.
> 
> I may be overlooking something, but I think your next patch already
> takes care of this by ensuring we only do dbuf updates during modesets.
> We already had POWER_DOMAIN_MODESET in our various DC_OFF_POWER_DOMAINS
> definitions which would ensure that the "DC off" power well is enabled
> (and DC states themselves are disabled) for the entire duration of the
> modeset process.

Hmm. That's assuming we only do the dbuf assert from the dc off
power well hook. Can't remember if that's the case. If that's not
the only place then we probably miss the lock somewhere else too.

> 
> If we need this, I'm not sure whether it's a good idea to use
> power_domains->lock rather than a new, dedicated lock.  Anything that
> touches power domains in any manner grabs this lock, even though we only
> really care about it for stopping races with the specific "DC off" power
> well.

Separate lock feels a bit overkill to me for something small
like this.

> 
> Also, if we bisect to the point right before these last two patches,
> don't we have a problem since there's a point in the git history where
> we potentially face a race?

Yeah should be earlier in the series I guess. If we need it at all,
which as you point out maybe we don't with the state->modeset checks.
Though maybe we want to get rid of that state->modeset dependency.
I *think* we should start using the global state stuff for dbuf
management, but haven't really looked at the details to figure out
how to organize it in the end. So at that point we may not anymore
be holding the dc off reference (although one might argue that we
should always hold that for dbuf programming so the "wait for it
to enable" thing can't be perturbed by dc transitions).

Anyways for now this seems fine by me
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>

> 
> 
> Matt
> 
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > ---
> >  drivers/gpu/drm/i915/display/intel_display_power.c | 12 ++++++++++++
> >  1 file changed, 12 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
> > index 96b38252578b..99ddc21e004c 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display_power.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display_power.c
> > @@ -4404,12 +4404,22 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
> >  {
> >  	int i;
> >  	int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
> > +	struct i915_power_domains *power_domains = &dev_priv->power_domains;
> >  
> >  	WARN(hweight8(req_slices) > max_slices,
> >  	     "Invalid number of dbuf slices requested\n");
> >  
> >  	DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
> >  
> > +	/*
> > +	 * Might be running this in parallel to gen9_dc_off_power_well_enable
> > +	 * being called from intel_dp_detect for instance,
> > +	 * which causes assertion triggered by race condition,
> > +	 * as gen9_assert_dbuf_enabled might preempt this when registers
> > +	 * were already updated, while dev_priv was not.
> > +	 */
> > +	mutex_lock(&power_domains->lock);
> > +
> >  	for (i = 0; i < max_slices; i++) {
> >  		intel_dbuf_slice_set(dev_priv,
> >  				     _DBUF_CTL_S(i),
> > @@ -4417,6 +4427,8 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
> >  	}
> >  
> >  	dev_priv->enabled_dbuf_slices_mask = req_slices;
> > +
> > +	mutex_unlock(&power_domains->lock);
> >  }
> >  
> >  static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> > -- 
> > 2.24.1.485.gad05a3d8e5
> > 
> 
> -- 
> Matt Roper
> Graphics Software Engineer
> VTT-OSGC Platform Enablement
> Intel Corporation
> (916) 356-2795

-- 
Ville Syrjälä
Intel
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 25+ messages in thread

end of thread, other threads:[~2020-01-31 15:22 UTC | newest]

Thread overview: 25+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-24  8:44 [Intel-gfx] [PATCH v16 0/7] Enable second DBuf slice for ICL and TGL Stanislav Lisovskiy
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 1/7] drm/i915: Remove skl_ddl_allocation struct Stanislav Lisovskiy
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 2/7] drm/i915: Move dbuf slice update to proper place Stanislav Lisovskiy
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 3/7] drm/i915: Introduce parameterized DBUF_CTL Stanislav Lisovskiy
2020-01-28 17:35   ` Ville Syrjälä
2020-01-29  8:41     ` Lisovskiy, Stanislav
2020-01-29 11:47       ` Ville Syrjälä
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 4/7] drm/i915: Manipulate DBuf slices properly Stanislav Lisovskiy
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 5/7] drm/i915: Correctly map DBUF slices to pipes Stanislav Lisovskiy
2020-01-28 23:15   ` Matt Roper
2020-01-28 23:38     ` Matt Roper
2020-01-29  9:03     ` Lisovskiy, Stanislav
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 6/7] drm/i915: Protect intel_dbuf_slices_update with mutex Stanislav Lisovskiy
2020-01-28 23:33   ` Matt Roper
2020-01-29  9:22     ` Lisovskiy, Stanislav
2020-01-31 15:22     ` Ville Syrjälä
2020-01-24  8:44 ` [Intel-gfx] [PATCH v16 7/7] drm/i915: Update dbuf slices only with full modeset Stanislav Lisovskiy
2020-01-28 23:37   ` Matt Roper
2020-01-31 15:10     ` Ville Syrjälä
2020-01-24  9:52 ` [Intel-gfx] ✓ Fi.CI.BAT: success for Enable second DBuf slice for ICL and TGL (rev21) Patchwork
2020-01-26  9:19 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-01-27  7:48   ` Lisovskiy, Stanislav
2020-01-27 12:29     ` Peres, Martin
2020-01-27 13:01 ` [Intel-gfx] ✓ Fi.CI.IGT: success " Patchwork
2020-01-27 13:07 ` Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.