* [PATCH 0/3] Optimize use of DBuf slices
@ 2018-04-05 6:00 Mahesh Kumar
2018-04-05 6:00 ` [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status Mahesh Kumar
` (4 more replies)
0 siblings, 5 replies; 13+ messages in thread
From: Mahesh Kumar @ 2018-04-05 6:00 UTC (permalink / raw)
To: intel-gfx; +Cc: lucas.demarchi, paulo.r.zanoni, rodrigo.vivi
Patches in this series were originally part of series:
https://patchwork.freedesktop.org/series/36993/
Reposting it here after rebase
Mahesh Kumar (3):
drm/i915/icl: track dbuf slice-2 status
drm/i915/icl: Enable 2nd DBuf slice only when needed
drm/i915/icl: update ddb entry start/end mask during hw ddb readout
drivers/gpu/drm/i915/i915_drv.h | 1 +
drivers/gpu/drm/i915/i915_reg.h | 3 ++
drivers/gpu/drm/i915/intel_display.c | 15 ++++++
drivers/gpu/drm/i915/intel_drv.h | 6 +++
drivers/gpu/drm/i915/intel_pm.c | 95 +++++++++++++++++++++++++++------
drivers/gpu/drm/i915/intel_runtime_pm.c | 69 +++++++++++++++++++-----
6 files changed, 160 insertions(+), 29 deletions(-)
--
2.16.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status
2018-04-05 6:00 [PATCH 0/3] Optimize use of DBuf slices Mahesh Kumar
@ 2018-04-05 6:00 ` Mahesh Kumar
2018-04-05 8:25 ` Jani Nikula
2018-04-05 6:00 ` [PATCH 2/3] drm/i915/icl: Enable 2nd DBuf slice only when needed Mahesh Kumar
` (3 subsequent siblings)
4 siblings, 1 reply; 13+ messages in thread
From: Mahesh Kumar @ 2018-04-05 6:00 UTC (permalink / raw)
To: intel-gfx; +Cc: lucas.demarchi, paulo.r.zanoni, rodrigo.vivi
This patch adds support to start tracking status of DBUF slices.
This is foundation to introduce support for enabling/disabling second
DBUF slice dynamically for ICL.
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: James Ausmus <james.ausmus@intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 1 +
drivers/gpu/drm/i915/intel_display.c | 5 +++++
drivers/gpu/drm/i915/intel_pm.c | 20 ++++++++++++++++++++
drivers/gpu/drm/i915/intel_runtime_pm.c | 4 ++++
4 files changed, 30 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5373b171bb96..e51da42297d8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1184,6 +1184,7 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
struct skl_ddb_allocation {
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ uint8_t enabled_slices; /* GEN11 has configurable 2 slices */
};
struct skl_wm_values {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 415fb8cf2cf4..96a1e6a7f69e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11385,6 +11385,11 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
+ if (INTEL_GEN(dev_priv) >= 11)
+ if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
+ DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+ sw_ddb->enabled_slices,
+ hw_ddb.enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
hw_plane_wm = &hw_wm.planes[plane];
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 19e82aaa9863..3ff37d5ba353 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3567,6 +3567,23 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
+static uint8_t intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+{
+ uint8_t enabled_slices;
+
+ /* Slice 1 will always be enabled */
+ enabled_slices = 1;
+
+ /* Gen prior to GEN11 have only one DBuf slice */
+ if (INTEL_GEN(dev_priv) < 11)
+ return enabled_slices;
+
+ if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+ enabled_slices++;
+
+ return enabled_slices;
+}
+
/*
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
@@ -3832,6 +3849,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
memset(ddb, 0, sizeof(*ddb));
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum intel_display_power_domain power_domain;
enum plane_id plane_id;
@@ -5050,6 +5069,7 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
sizeof(dst->ddb.y_plane[pipe]));
memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
sizeof(dst->ddb.plane[pipe]));
+ dst->ddb.enabled_slices = src->ddb.enabled_slices;
}
static void
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 53ea564f971e..58be542d660b 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2664,6 +2664,8 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
!(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout\n");
+ else
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -2677,6 +2679,8 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power disable timeout!\n");
+ else
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
--
2.16.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 2/3] drm/i915/icl: Enable 2nd DBuf slice only when needed
2018-04-05 6:00 [PATCH 0/3] Optimize use of DBuf slices Mahesh Kumar
2018-04-05 6:00 ` [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status Mahesh Kumar
@ 2018-04-05 6:00 ` Mahesh Kumar
2018-04-25 21:01 ` Rodrigo Vivi
2018-04-05 6:00 ` [PATCH 3/3] drm/i915/icl: update ddb entry start/end mask during hw ddb readout Mahesh Kumar
` (2 subsequent siblings)
4 siblings, 1 reply; 13+ messages in thread
From: Mahesh Kumar @ 2018-04-05 6:00 UTC (permalink / raw)
To: intel-gfx; +Cc: lucas.demarchi, paulo.r.zanoni, rodrigo.vivi
ICL has two slices of DBuf, each slice of size 1024 blocks.
We should not always enable slice-2. It should be enabled only if
display total required BW is > 12GBps OR more than 1 pipes are enabled.
Changes since V1:
- typecast total_data_rate to u64 before multiplication to solve any
possible overflow (Rodrigo)
- fix where skl_wm_get_hw_state was memsetting ddb, resulting
enabled_slices to become zero
- Fix the logic of calculating ddb_size
Changes since V2:
- If no-crtc is part of commit required_slices will have value "0",
don't try to disable DBuf slice.
Changes since V3:
- Create a generic helper to enable/disable slice
- don't return early if total_data_rate is 0, it may be cursor only
commit, or atomic modeset without any plane.
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
---
drivers/gpu/drm/i915/intel_display.c | 10 +++++
drivers/gpu/drm/i915/intel_drv.h | 6 +++
drivers/gpu/drm/i915/intel_pm.c | 57 +++++++++++++++++++++++------
drivers/gpu/drm/i915/intel_runtime_pm.c | 65 ++++++++++++++++++++++++++-------
4 files changed, 113 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 96a1e6a7f69e..bc795307657a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12196,6 +12196,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
bool progress;
enum pipe pipe;
int i;
+ uint8_t hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ uint8_t required_slices = intel_state->wm_results.ddb.enabled_slices;
const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
@@ -12204,6 +12206,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (new_crtc_state->active)
entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ /* If 2nd DBuf slice required, enable it here */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
+
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
@@ -12254,6 +12260,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
progress = true;
}
} while (progress);
+
+ /* If 2nd DBuf slice is no more required disable it */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d1452fd2a58d..473a1ad7f32b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -140,6 +140,10 @@
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
+#define KBps(x) (1000 * (x))
+#define MBps(x) KBps(1000 * (x))
+#define GBps(x) ((uint64_t) 1000 * MBps((x)))
+
/*
* Display related stuff
*/
@@ -1914,6 +1918,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ uint8_t req_slices);
static inline void
assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3ff37d5ba353..caa29f949335 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3771,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
return true;
}
+static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *cstate,
+ const unsigned int total_data_rate,
+ const int num_active,
+ struct skl_ddb_allocation *ddb)
+{
+ const struct drm_display_mode *adjusted_mode;
+ u64 total_data_bw;
+ u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
+
+ WARN_ON(ddb_size == 0);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return ddb_size - 4; /* 4 blocks for bypass path allocation */
+
+ adjusted_mode = &cstate->base.adjusted_mode;
+ total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+
+ /*
+ * 12GB/s is maximum BW supported by single DBuf slice.
+ */
+ if (total_data_bw >= GBps(12) || num_active > 1)
+ ddb->enabled_slices = 2;
+ else {
+ ddb->enabled_slices = 1;
+ ddb_size /= 2;
+ }
+
+ return ddb_size;
+}
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
const struct intel_crtc_state *cstate,
+ const unsigned int total_data_rate,
+ struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
@@ -3796,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
else
*num_active = hweight32(dev_priv->active_crtcs);
- ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- WARN_ON(ddb_size == 0);
-
- if (INTEL_GEN(dev_priv) < 11)
- ddb_size -= 4; /* 4 blocks for bypass path allocation */
+ ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+ *num_active, ddb);
/*
* If the state doesn't change the active CRTC's, then there's
@@ -4239,7 +4269,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
+ total_data_rate = skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ plane_y_data_rate);
+ skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
+ alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4274,9 +4308,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*
* FIXME: we may not allocate every single block here.
*/
- total_data_rate = skl_get_total_relative_data_rate(cstate,
- plane_data_rate,
- plane_y_data_rate);
if (total_data_rate == 0)
return 0;
@@ -5382,8 +5413,12 @@ void skl_wm_get_hw_state(struct drm_device *dev)
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
} else {
- /* Easy/common case; just sanitize DDB now if everything off */
- memset(ddb, 0, sizeof(*ddb));
+ /*
+ * Easy/common case; just sanitize DDB now if everything off
+ * Keep dbuf slice info intact
+ */
+ memset(ddb->plane, 0, sizeof(ddb->plane));
+ memset(ddb->y_plane, 0, sizeof(ddb->y_plane));
}
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 58be542d660b..56cdec06e6b2 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2627,32 +2627,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
-static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+static inline
+bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, bool enable)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ u32 val, status;
+ val = I915_READ(reg);
+ val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
udelay(10);
- if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
+ status = I915_READ(reg) & DBUF_POWER_STATE;
+ if ((enable && !status) || (!enable && status)) {
+ DRM_ERROR("DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
+ return false;
+ }
+ return true;
+}
+
+static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
+{
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL);
+ intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
+}
- udelay(10);
+static uint8_t intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return 1;
+ return 2;
+}
- if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
- DRM_ERROR("DBuf power disable timeout!\n");
+void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
+ uint8_t req_slices)
+{
+ uint8_t hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
+ u32 val;
+ bool ret;
+
+ if (req_slices > intel_dbuf_max_slices(dev_priv)) {
+ DRM_ERROR("Invalid number of dbuf slices requested\n");
+ return;
+ }
+
+ if (req_slices == hw_enabled_slices || req_slices == 0)
+ return;
+
+ val = I915_READ(DBUF_CTL_S2);
+ if (req_slices > hw_enabled_slices)
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
+ else
+ ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+
+ if (ret)
+ dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
}
-/*
- * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
- * needed and keep it disabled as much as possible.
- */
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
--
2.16.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 3/3] drm/i915/icl: update ddb entry start/end mask during hw ddb readout
2018-04-05 6:00 [PATCH 0/3] Optimize use of DBuf slices Mahesh Kumar
2018-04-05 6:00 ` [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status Mahesh Kumar
2018-04-05 6:00 ` [PATCH 2/3] drm/i915/icl: Enable 2nd DBuf slice only when needed Mahesh Kumar
@ 2018-04-05 6:00 ` Mahesh Kumar
2018-04-25 21:10 ` Rodrigo Vivi
2018-04-05 6:14 ` ✗ Fi.CI.CHECKPATCH: warning for Optimize use of DBuf slices Patchwork
2018-04-05 6:30 ` ✗ Fi.CI.BAT: " Patchwork
4 siblings, 1 reply; 13+ messages in thread
From: Mahesh Kumar @ 2018-04-05 6:00 UTC (permalink / raw)
To: intel-gfx; +Cc: lucas.demarchi, paulo.r.zanoni, rodrigo.vivi
Gen11/ICL onward ddb entry start/end mask is increased from 10 bits to
11 bits. This patch make changes to use proper mask for ICL+ during
hardware ddb value readout.
Changes since V1:
- Use _MASK & _SHIFT macro (James)
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
---
drivers/gpu/drm/i915/i915_reg.h | 3 +++
drivers/gpu/drm/i915/intel_pm.c | 18 ++++++++++++++----
2 files changed, 17 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 176dca6554f4..e3a6c535617d 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6459,6 +6459,9 @@ enum {
#define _PLANE_BUF_CFG_1_B 0x7127c
#define _PLANE_BUF_CFG_2_B 0x7137c
+#define SKL_DDB_ENTRY_MASK 0x3FF
+#define ICL_DDB_ENTRY_MASK 0x7FF
+#define DDB_ENTRY_END_SHIFT 16
#define _PLANE_BUF_CFG_1(pipe) \
_PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
#define _PLANE_BUF_CFG_2(pipe) \
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index caa29f949335..98e91f4a5ab4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3864,10 +3864,18 @@ static unsigned int skl_cursor_allocation(int num_active)
return 8;
}
-static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
+static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
+ struct skl_ddb_entry *entry, u32 reg)
{
- entry->start = reg & 0x3ff;
- entry->end = (reg >> 16) & 0x3ff;
+ uint16_t mask;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask = ICL_DDB_ENTRY_MASK;
+ else
+ mask = SKL_DDB_ENTRY_MASK;
+ entry->start = reg & mask;
+ entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
+
if (entry->end)
entry->end += 1;
}
@@ -3898,7 +3906,9 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
else
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv,
+ &ddb->plane[pipe][plane_id],
+ val);
}
intel_display_power_put(dev_priv, power_domain);
--
2.16.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* ✗ Fi.CI.CHECKPATCH: warning for Optimize use of DBuf slices
2018-04-05 6:00 [PATCH 0/3] Optimize use of DBuf slices Mahesh Kumar
` (2 preceding siblings ...)
2018-04-05 6:00 ` [PATCH 3/3] drm/i915/icl: update ddb entry start/end mask during hw ddb readout Mahesh Kumar
@ 2018-04-05 6:14 ` Patchwork
2018-04-05 6:30 ` ✗ Fi.CI.BAT: " Patchwork
4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2018-04-05 6:14 UTC (permalink / raw)
To: Mahesh Kumar; +Cc: intel-gfx
== Series Details ==
Series: Optimize use of DBuf slices
URL : https://patchwork.freedesktop.org/series/41180/
State : warning
== Summary ==
$ dim checkpatch origin/drm-tip
e2b3a175155d drm/i915/icl: track dbuf slice-2 status
5cc032e8d384 drm/i915/icl: Enable 2nd DBuf slice only when needed
-:71: CHECK:SPACING: No space is necessary after a cast
#71: FILE: drivers/gpu/drm/i915/intel_drv.h:145:
+#define GBps(x) ((uint64_t) 1000 * MBps((x)))
-:114: CHECK:BRACES: braces {} should be used on all arms of this statement
#114: FILE: drivers/gpu/drm/i915/intel_pm.c:3795:
+ if (total_data_bw >= GBps(12) || num_active > 1)
[...]
+ else {
[...]
-:116: CHECK:BRACES: Unbalanced braces around else statement
#116: FILE: drivers/gpu/drm/i915/intel_pm.c:3797:
+ else {
-:212: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#212: FILE: drivers/gpu/drm/i915/intel_runtime_pm.c:2645:
+ DRM_ERROR("DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
total: 0 errors, 0 warnings, 4 checks, 219 lines checked
587921dcf1da drm/i915/icl: update ddb entry start/end mask during hw ddb readout
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* ✗ Fi.CI.BAT: warning for Optimize use of DBuf slices
2018-04-05 6:00 [PATCH 0/3] Optimize use of DBuf slices Mahesh Kumar
` (3 preceding siblings ...)
2018-04-05 6:14 ` ✗ Fi.CI.CHECKPATCH: warning for Optimize use of DBuf slices Patchwork
@ 2018-04-05 6:30 ` Patchwork
4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2018-04-05 6:30 UTC (permalink / raw)
To: Mahesh Kumar; +Cc: intel-gfx
== Series Details ==
Series: Optimize use of DBuf slices
URL : https://patchwork.freedesktop.org/series/41180/
State : warning
== Summary ==
Series 41180v1 Optimize use of DBuf slices
https://patchwork.freedesktop.org/api/1.0/series/41180/revisions/1/mbox/
---- Possible new issues:
Test kms_force_connector_basic:
Subgroup prune-stale-modes:
pass -> SKIP (fi-snb-2600)
---- Known issues:
Test gem_exec_suspend:
Subgroup basic-s3:
dmesg-warn -> PASS (fi-glk-j4005) fdo#103359
fdo#103359 https://bugs.freedesktop.org/show_bug.cgi?id=103359
fi-bdw-5557u total:285 pass:264 dwarn:0 dfail:0 fail:0 skip:21 time:431s
fi-bdw-gvtdvm total:285 pass:261 dwarn:0 dfail:0 fail:0 skip:24 time:437s
fi-blb-e6850 total:285 pass:220 dwarn:1 dfail:0 fail:0 skip:64 time:381s
fi-bsw-n3050 total:285 pass:239 dwarn:0 dfail:0 fail:0 skip:46 time:538s
fi-bwr-2160 total:285 pass:180 dwarn:0 dfail:0 fail:0 skip:105 time:296s
fi-bxt-dsi total:285 pass:255 dwarn:0 dfail:0 fail:0 skip:30 time:515s
fi-bxt-j4205 total:285 pass:256 dwarn:0 dfail:0 fail:0 skip:29 time:522s
fi-byt-j1900 total:285 pass:250 dwarn:0 dfail:0 fail:0 skip:35 time:521s
fi-byt-n2820 total:285 pass:246 dwarn:0 dfail:0 fail:0 skip:39 time:508s
fi-cfl-8700k total:285 pass:257 dwarn:0 dfail:0 fail:0 skip:28 time:415s
fi-cfl-s3 total:285 pass:259 dwarn:0 dfail:0 fail:0 skip:26 time:560s
fi-cfl-u total:285 pass:259 dwarn:0 dfail:0 fail:0 skip:26 time:508s
fi-elk-e7500 total:285 pass:226 dwarn:0 dfail:0 fail:0 skip:59 time:424s
fi-gdg-551 total:285 pass:176 dwarn:0 dfail:0 fail:1 skip:108 time:316s
fi-glk-1 total:285 pass:257 dwarn:0 dfail:0 fail:0 skip:28 time:535s
fi-glk-j4005 total:285 pass:256 dwarn:0 dfail:0 fail:0 skip:29 time:483s
fi-hsw-4770 total:285 pass:258 dwarn:0 dfail:0 fail:0 skip:27 time:405s
fi-ilk-650 total:285 pass:225 dwarn:0 dfail:0 fail:0 skip:60 time:421s
fi-ivb-3520m total:285 pass:256 dwarn:0 dfail:0 fail:0 skip:29 time:462s
fi-ivb-3770 total:285 pass:252 dwarn:0 dfail:0 fail:0 skip:33 time:433s
fi-kbl-7500u total:285 pass:260 dwarn:1 dfail:0 fail:0 skip:24 time:472s
fi-kbl-7567u total:285 pass:265 dwarn:0 dfail:0 fail:0 skip:20 time:460s
fi-kbl-r total:285 pass:258 dwarn:0 dfail:0 fail:0 skip:27 time:511s
fi-skl-6260u total:285 pass:265 dwarn:0 dfail:0 fail:0 skip:20 time:443s
fi-skl-6600u total:285 pass:258 dwarn:0 dfail:0 fail:0 skip:27 time:534s
fi-skl-6700k2 total:285 pass:261 dwarn:0 dfail:0 fail:0 skip:24 time:499s
fi-skl-6770hq total:285 pass:265 dwarn:0 dfail:0 fail:0 skip:20 time:510s
fi-skl-guc total:285 pass:257 dwarn:0 dfail:0 fail:0 skip:28 time:428s
fi-skl-gvtdvm total:285 pass:262 dwarn:0 dfail:0 fail:0 skip:23 time:446s
fi-snb-2520m total:285 pass:245 dwarn:0 dfail:0 fail:0 skip:40 time:591s
fi-snb-2600 total:285 pass:244 dwarn:0 dfail:0 fail:0 skip:41 time:398s
Blacklisted hosts:
fi-cnl-psr total:285 pass:256 dwarn:3 dfail:0 fail:0 skip:26 time:516s
e29a10513429cca404e9847a399efbdbb4bdd4bf drm-tip: 2018y-04m-04d-20h-47m-24s UTC integration manifest
587921dcf1da drm/i915/icl: update ddb entry start/end mask during hw ddb readout
5cc032e8d384 drm/i915/icl: Enable 2nd DBuf slice only when needed
e2b3a175155d drm/i915/icl: track dbuf slice-2 status
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8586/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status
2018-04-05 6:00 ` [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status Mahesh Kumar
@ 2018-04-05 8:25 ` Jani Nikula
2018-04-05 9:22 ` Kumar, Mahesh
0 siblings, 1 reply; 13+ messages in thread
From: Jani Nikula @ 2018-04-05 8:25 UTC (permalink / raw)
To: Mahesh Kumar, intel-gfx; +Cc: lucas.demarchi, paulo.r.zanoni, rodrigo.vivi
On Thu, 05 Apr 2018, Mahesh Kumar <mahesh1.kumar@intel.com> wrote:
> This patch adds support to start tracking status of DBUF slices.
> This is foundation to introduce support for enabling/disabling second
> DBUF slice dynamically for ICL.
>
> Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
> Reviewed-by: James Ausmus <james.ausmus@intel.com>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 1 +
> drivers/gpu/drm/i915/intel_display.c | 5 +++++
> drivers/gpu/drm/i915/intel_pm.c | 20 ++++++++++++++++++++
> drivers/gpu/drm/i915/intel_runtime_pm.c | 4 ++++
> 4 files changed, 30 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 5373b171bb96..e51da42297d8 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1184,6 +1184,7 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
> struct skl_ddb_allocation {
> struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
> struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
> + uint8_t enabled_slices; /* GEN11 has configurable 2 slices */
Please prefer kernel types u8/u16/u32/u64 over uint8_t and friends. It's
fine to use the stdint types when the context uses them, but please
let's not add new ones.
BR,
Jani.
> };
>
> struct skl_wm_values {
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 415fb8cf2cf4..96a1e6a7f69e 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -11385,6 +11385,11 @@ static void verify_wm_state(struct drm_crtc *crtc,
> skl_ddb_get_hw_state(dev_priv, &hw_ddb);
> sw_ddb = &dev_priv->wm.skl_hw.ddb;
>
> + if (INTEL_GEN(dev_priv) >= 11)
> + if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
> + DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
> + sw_ddb->enabled_slices,
> + hw_ddb.enabled_slices);
> /* planes */
> for_each_universal_plane(dev_priv, pipe, plane) {
> hw_plane_wm = &hw_wm.planes[plane];
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 19e82aaa9863..3ff37d5ba353 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3567,6 +3567,23 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
> return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
> }
>
> +static uint8_t intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
> +{
> + uint8_t enabled_slices;
> +
> + /* Slice 1 will always be enabled */
> + enabled_slices = 1;
> +
> + /* Gen prior to GEN11 have only one DBuf slice */
> + if (INTEL_GEN(dev_priv) < 11)
> + return enabled_slices;
> +
> + if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
> + enabled_slices++;
> +
> + return enabled_slices;
> +}
> +
> /*
> * FIXME: We still don't have the proper code detect if we need to apply the WA,
> * so assume we'll always need it in order to avoid underruns.
> @@ -3832,6 +3849,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
>
> memset(ddb, 0, sizeof(*ddb));
>
> + ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
> +
> for_each_intel_crtc(&dev_priv->drm, crtc) {
> enum intel_display_power_domain power_domain;
> enum plane_id plane_id;
> @@ -5050,6 +5069,7 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
> sizeof(dst->ddb.y_plane[pipe]));
> memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
> sizeof(dst->ddb.plane[pipe]));
> + dst->ddb.enabled_slices = src->ddb.enabled_slices;
> }
>
> static void
> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
> index 53ea564f971e..58be542d660b 100644
> --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> @@ -2664,6 +2664,8 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> DRM_ERROR("DBuf power enable timeout\n");
> + else
> + dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
> }
>
> static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
> @@ -2677,6 +2679,8 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
> if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
> (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
> DRM_ERROR("DBuf power disable timeout!\n");
> + else
> + dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
> }
>
> static void icl_mbus_init(struct drm_i915_private *dev_priv)
--
Jani Nikula, Intel Open Source Technology Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status
2018-04-05 8:25 ` Jani Nikula
@ 2018-04-05 9:22 ` Kumar, Mahesh
0 siblings, 0 replies; 13+ messages in thread
From: Kumar, Mahesh @ 2018-04-05 9:22 UTC (permalink / raw)
To: Jani Nikula, intel-gfx; +Cc: lucas.demarchi, paulo.r.zanoni, rodrigo.vivi
Hi,
On 4/5/2018 1:55 PM, Jani Nikula wrote:
> On Thu, 05 Apr 2018, Mahesh Kumar <mahesh1.kumar@intel.com> wrote:
>> This patch adds support to start tracking status of DBUF slices.
>> This is foundation to introduce support for enabling/disabling second
>> DBUF slice dynamically for ICL.
>>
>> Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
>> Reviewed-by: James Ausmus <james.ausmus@intel.com>
>> ---
>> drivers/gpu/drm/i915/i915_drv.h | 1 +
>> drivers/gpu/drm/i915/intel_display.c | 5 +++++
>> drivers/gpu/drm/i915/intel_pm.c | 20 ++++++++++++++++++++
>> drivers/gpu/drm/i915/intel_runtime_pm.c | 4 ++++
>> 4 files changed, 30 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>> index 5373b171bb96..e51da42297d8 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -1184,6 +1184,7 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
>> struct skl_ddb_allocation {
>> struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
>> struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
>> + uint8_t enabled_slices; /* GEN11 has configurable 2 slices */
> Please prefer kernel types u8/u16/u32/u64 over uint8_t and friends. It's
> fine to use the stdint types when the context uses them, but please
> let's not add new ones.
ok, sure will update in new version
-Mahesh
>
> BR,
> Jani.
>
>
>> };
>>
>> struct skl_wm_values {
>> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
>> index 415fb8cf2cf4..96a1e6a7f69e 100644
>> --- a/drivers/gpu/drm/i915/intel_display.c
>> +++ b/drivers/gpu/drm/i915/intel_display.c
>> @@ -11385,6 +11385,11 @@ static void verify_wm_state(struct drm_crtc *crtc,
>> skl_ddb_get_hw_state(dev_priv, &hw_ddb);
>> sw_ddb = &dev_priv->wm.skl_hw.ddb;
>>
>> + if (INTEL_GEN(dev_priv) >= 11)
>> + if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
>> + DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
>> + sw_ddb->enabled_slices,
>> + hw_ddb.enabled_slices);
>> /* planes */
>> for_each_universal_plane(dev_priv, pipe, plane) {
>> hw_plane_wm = &hw_wm.planes[plane];
>> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
>> index 19e82aaa9863..3ff37d5ba353 100644
>> --- a/drivers/gpu/drm/i915/intel_pm.c
>> +++ b/drivers/gpu/drm/i915/intel_pm.c
>> @@ -3567,6 +3567,23 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
>> return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
>> }
>>
>> +static uint8_t intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
>> +{
>> + uint8_t enabled_slices;
>> +
>> + /* Slice 1 will always be enabled */
>> + enabled_slices = 1;
>> +
>> + /* Gen prior to GEN11 have only one DBuf slice */
>> + if (INTEL_GEN(dev_priv) < 11)
>> + return enabled_slices;
>> +
>> + if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
>> + enabled_slices++;
>> +
>> + return enabled_slices;
>> +}
>> +
>> /*
>> * FIXME: We still don't have the proper code detect if we need to apply the WA,
>> * so assume we'll always need it in order to avoid underruns.
>> @@ -3832,6 +3849,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
>>
>> memset(ddb, 0, sizeof(*ddb));
>>
>> + ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
>> +
>> for_each_intel_crtc(&dev_priv->drm, crtc) {
>> enum intel_display_power_domain power_domain;
>> enum plane_id plane_id;
>> @@ -5050,6 +5069,7 @@ skl_copy_wm_for_pipe(struct skl_wm_values *dst,
>> sizeof(dst->ddb.y_plane[pipe]));
>> memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
>> sizeof(dst->ddb.plane[pipe]));
>> + dst->ddb.enabled_slices = src->ddb.enabled_slices;
>> }
>>
>> static void
>> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
>> index 53ea564f971e..58be542d660b 100644
>> --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
>> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
>> @@ -2664,6 +2664,8 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
>> if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
>> !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
>> DRM_ERROR("DBuf power enable timeout\n");
>> + else
>> + dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
>> }
>>
>> static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
>> @@ -2677,6 +2679,8 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
>> if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
>> (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
>> DRM_ERROR("DBuf power disable timeout!\n");
>> + else
>> + dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
>> }
>>
>> static void icl_mbus_init(struct drm_i915_private *dev_priv)
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 2/3] drm/i915/icl: Enable 2nd DBuf slice only when needed
2018-04-05 6:00 ` [PATCH 2/3] drm/i915/icl: Enable 2nd DBuf slice only when needed Mahesh Kumar
@ 2018-04-25 21:01 ` Rodrigo Vivi
0 siblings, 0 replies; 13+ messages in thread
From: Rodrigo Vivi @ 2018-04-25 21:01 UTC (permalink / raw)
To: Mahesh Kumar; +Cc: intel-gfx, lucas.demarchi, paulo.r.zanoni
On Thu, Apr 05, 2018 at 11:30:18AM +0530, Mahesh Kumar wrote:
> ICL has two slices of DBuf, each slice of size 1024 blocks.
> We should not always enable slice-2. It should be enabled only if
> display total required BW is > 12GBps OR more than 1 pipes are enabled.
>
> Changes since V1:
> - typecast total_data_rate to u64 before multiplication to solve any
> possible overflow (Rodrigo)
> - fix where skl_wm_get_hw_state was memsetting ddb, resulting
> enabled_slices to become zero
> - Fix the logic of calculating ddb_size
> Changes since V2:
> - If no-crtc is part of commit required_slices will have value "0",
> don't try to disable DBuf slice.
> Changes since V3:
> - Create a generic helper to enable/disable slice
> - don't return early if total_data_rate is 0, it may be cursor only
> commit, or atomic modeset without any plane.
>
> Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
> ---
> drivers/gpu/drm/i915/intel_display.c | 10 +++++
> drivers/gpu/drm/i915/intel_drv.h | 6 +++
> drivers/gpu/drm/i915/intel_pm.c | 57 +++++++++++++++++++++++------
> drivers/gpu/drm/i915/intel_runtime_pm.c | 65 ++++++++++++++++++++++++++-------
> 4 files changed, 113 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 96a1e6a7f69e..bc795307657a 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12196,6 +12196,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
> bool progress;
> enum pipe pipe;
> int i;
> + uint8_t hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
> + uint8_t required_slices = intel_state->wm_results.ddb.enabled_slices;
in many parts this enabled_slices meaning required or calculated confused me a bit,
but I don't have better suggestion and the code seems right:
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
>
> const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
>
> @@ -12204,6 +12206,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
> if (new_crtc_state->active)
> entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
>
> + /* If 2nd DBuf slice required, enable it here */
> + if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
> + icl_dbuf_slices_update(dev_priv, required_slices);
> +
> /*
> * Whenever the number of active pipes changes, we need to make sure we
> * update the pipes in the right order so that their ddb allocations
> @@ -12254,6 +12260,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
> progress = true;
> }
> } while (progress);
> +
> + /* If 2nd DBuf slice is no more required disable it */
> + if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
> + icl_dbuf_slices_update(dev_priv, required_slices);
> }
>
> static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index d1452fd2a58d..473a1ad7f32b 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -140,6 +140,10 @@
> #define KHz(x) (1000 * (x))
> #define MHz(x) KHz(1000 * (x))
>
> +#define KBps(x) (1000 * (x))
> +#define MBps(x) KBps(1000 * (x))
> +#define GBps(x) ((uint64_t) 1000 * MBps((x)))
> +
> /*
> * Display related stuff
> */
> @@ -1914,6 +1918,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
> enum intel_display_power_domain domain);
> void intel_display_power_put(struct drm_i915_private *dev_priv,
> enum intel_display_power_domain domain);
> +void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
> + uint8_t req_slices);
>
> static inline void
> assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 3ff37d5ba353..caa29f949335 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3771,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
> return true;
> }
>
> +static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv,
> + const struct intel_crtc_state *cstate,
> + const unsigned int total_data_rate,
> + const int num_active,
> + struct skl_ddb_allocation *ddb)
> +{
> + const struct drm_display_mode *adjusted_mode;
> + u64 total_data_bw;
> + u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
> +
> + WARN_ON(ddb_size == 0);
> +
> + if (INTEL_GEN(dev_priv) < 11)
> + return ddb_size - 4; /* 4 blocks for bypass path allocation */
> +
> + adjusted_mode = &cstate->base.adjusted_mode;
> + total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
> +
> + /*
> + * 12GB/s is maximum BW supported by single DBuf slice.
> + */
> + if (total_data_bw >= GBps(12) || num_active > 1)
> + ddb->enabled_slices = 2;
> + else {
> + ddb->enabled_slices = 1;
> + ddb_size /= 2;
> + }
> +
> + return ddb_size;
> +}
> +
> static void
> skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
> const struct intel_crtc_state *cstate,
> + const unsigned int total_data_rate,
> + struct skl_ddb_allocation *ddb,
> struct skl_ddb_entry *alloc, /* out */
> int *num_active /* out */)
> {
> @@ -3796,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
> else
> *num_active = hweight32(dev_priv->active_crtcs);
>
> - ddb_size = INTEL_INFO(dev_priv)->ddb_size;
> - WARN_ON(ddb_size == 0);
> -
> - if (INTEL_GEN(dev_priv) < 11)
> - ddb_size -= 4; /* 4 blocks for bypass path allocation */
> + ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
> + *num_active, ddb);
>
> /*
> * If the state doesn't change the active CRTC's, then there's
> @@ -4239,7 +4269,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
> return 0;
> }
>
> - skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
> + total_data_rate = skl_get_total_relative_data_rate(cstate,
> + plane_data_rate,
> + plane_y_data_rate);
> + skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
> + alloc, &num_active);
> alloc_size = skl_ddb_entry_size(alloc);
> if (alloc_size == 0)
> return 0;
> @@ -4274,9 +4308,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
> *
> * FIXME: we may not allocate every single block here.
> */
> - total_data_rate = skl_get_total_relative_data_rate(cstate,
> - plane_data_rate,
> - plane_y_data_rate);
> if (total_data_rate == 0)
> return 0;
>
> @@ -5382,8 +5413,12 @@ void skl_wm_get_hw_state(struct drm_device *dev)
> /* Fully recompute DDB on first atomic commit */
> dev_priv->wm.distrust_bios_wm = true;
> } else {
> - /* Easy/common case; just sanitize DDB now if everything off */
> - memset(ddb, 0, sizeof(*ddb));
> + /*
> + * Easy/common case; just sanitize DDB now if everything off
> + * Keep dbuf slice info intact
> + */
> + memset(ddb->plane, 0, sizeof(ddb->plane));
> + memset(ddb->y_plane, 0, sizeof(ddb->y_plane));
> }
> }
>
> diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
> index 58be542d660b..56cdec06e6b2 100644
> --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
> +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
> @@ -2627,32 +2627,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
> mutex_unlock(&power_domains->lock);
> }
>
> -static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
> +static inline
> +bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
> + i915_reg_t reg, bool enable)
> {
> - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
> - POSTING_READ(DBUF_CTL);
> + u32 val, status;
>
> + val = I915_READ(reg);
> + val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
> + I915_WRITE(reg, val);
> + POSTING_READ(reg);
> udelay(10);
>
> - if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
> - DRM_ERROR("DBuf power enable timeout\n");
> + status = I915_READ(reg) & DBUF_POWER_STATE;
> + if ((enable && !status) || (!enable && status)) {
> + DRM_ERROR("DBus power %s timeout!\n",
> + enable ? "enable" : "disable");
> + return false;
> + }
> + return true;
> +}
> +
> +static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
> +{
> + intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
> }
>
> static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
> {
> - I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
> - POSTING_READ(DBUF_CTL);
> + intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
> +}
>
> - udelay(10);
> +static uint8_t intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
> +{
> + if (INTEL_GEN(dev_priv) < 11)
> + return 1;
> + return 2;
> +}
>
> - if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
> - DRM_ERROR("DBuf power disable timeout!\n");
> +void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
> + uint8_t req_slices)
> +{
> + uint8_t hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
> + u32 val;
> + bool ret;
> +
> + if (req_slices > intel_dbuf_max_slices(dev_priv)) {
> + DRM_ERROR("Invalid number of dbuf slices requested\n");
> + return;
> + }
> +
> + if (req_slices == hw_enabled_slices || req_slices == 0)
> + return;
> +
> + val = I915_READ(DBUF_CTL_S2);
> + if (req_slices > hw_enabled_slices)
> + ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
> + else
> + ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
> +
> + if (ret)
> + dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
> }
>
> -/*
> - * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when
> - * needed and keep it disabled as much as possible.
> - */
> static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
> {
> I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
> --
> 2.16.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 3/3] drm/i915/icl: update ddb entry start/end mask during hw ddb readout
2018-04-05 6:00 ` [PATCH 3/3] drm/i915/icl: update ddb entry start/end mask during hw ddb readout Mahesh Kumar
@ 2018-04-25 21:10 ` Rodrigo Vivi
0 siblings, 0 replies; 13+ messages in thread
From: Rodrigo Vivi @ 2018-04-25 21:10 UTC (permalink / raw)
To: Mahesh Kumar; +Cc: intel-gfx, lucas.demarchi, paulo.r.zanoni
On Thu, Apr 05, 2018 at 11:30:19AM +0530, Mahesh Kumar wrote:
> Gen11/ICL onward ddb entry start/end mask is increased from 10 bits to
> 11 bits. This patch make changes to use proper mask for ICL+ during
> hardware ddb value readout.
>
> Changes since V1:
> - Use _MASK & _SHIFT macro (James)
>
> Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> ---
> drivers/gpu/drm/i915/i915_reg.h | 3 +++
> drivers/gpu/drm/i915/intel_pm.c | 18 ++++++++++++++----
> 2 files changed, 17 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 176dca6554f4..e3a6c535617d 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -6459,6 +6459,9 @@ enum {
>
> #define _PLANE_BUF_CFG_1_B 0x7127c
> #define _PLANE_BUF_CFG_2_B 0x7137c
> +#define SKL_DDB_ENTRY_MASK 0x3FF
> +#define ICL_DDB_ENTRY_MASK 0x7FF
> +#define DDB_ENTRY_END_SHIFT 16
> #define _PLANE_BUF_CFG_1(pipe) \
> _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
> #define _PLANE_BUF_CFG_2(pipe) \
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index caa29f949335..98e91f4a5ab4 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3864,10 +3864,18 @@ static unsigned int skl_cursor_allocation(int num_active)
> return 8;
> }
>
> -static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
> +static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
> + struct skl_ddb_entry *entry, u32 reg)
> {
> - entry->start = reg & 0x3ff;
> - entry->end = (reg >> 16) & 0x3ff;
> + uint16_t mask;
> +
> + if (INTEL_GEN(dev_priv) >= 11)
> + mask = ICL_DDB_ENTRY_MASK;
> + else
> + mask = SKL_DDB_ENTRY_MASK;
> + entry->start = reg & mask;
> + entry->end = (reg >> DDB_ENTRY_END_SHIFT) & mask;
> +
> if (entry->end)
> entry->end += 1;
> }
> @@ -3898,7 +3906,9 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
> else
> val = I915_READ(CUR_BUF_CFG(pipe));
>
> - skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
> + skl_ddb_entry_init_from_hw(dev_priv,
> + &ddb->plane[pipe][plane_id],
> + val);
> }
>
> intel_display_power_put(dev_priv, power_domain);
> --
> 2.16.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH 0/3] Optimize use of DBuf slices
2018-04-26 14:25 Mahesh Kumar
@ 2018-04-28 0:15 ` Rodrigo Vivi
0 siblings, 0 replies; 13+ messages in thread
From: Rodrigo Vivi @ 2018-04-28 0:15 UTC (permalink / raw)
To: Mahesh Kumar; +Cc: intel-gfx, lucas.demarchi, paulo.r.zanoni
On Thu, Apr 26, 2018 at 07:55:14PM +0530, Mahesh Kumar wrote:
> Patches in this series were originally part of series:
> https://patchwork.freedesktop.org/series/36993/
>
> Reposting it here after rebase
> use kernel types u8/u16 etc instead of uint8_t
>
> Changes:
> - Rebase the series
pushed to dinq, thanks.
>
> Mahesh Kumar (3):
> drm/i915/icl: track dbuf slice-2 status
> drm/i915/icl: Enable 2nd DBuf slice only when needed
> drm/i915/icl: update ddb entry start/end mask during hw ddb readout
>
> drivers/gpu/drm/i915/i915_drv.h | 1 +
> drivers/gpu/drm/i915/i915_reg.h | 3 +
> drivers/gpu/drm/i915/intel_display.c | 15 +++++
> drivers/gpu/drm/i915/intel_drv.h | 6 ++
> drivers/gpu/drm/i915/intel_pm.c | 103 ++++++++++++++++++++++++++------
> drivers/gpu/drm/i915/intel_runtime_pm.c | 69 ++++++++++++++++-----
> 6 files changed, 165 insertions(+), 32 deletions(-)
>
> --
> 2.16.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 0/3] Optimize use of DBuf slices
@ 2018-04-26 14:25 Mahesh Kumar
2018-04-28 0:15 ` Rodrigo Vivi
0 siblings, 1 reply; 13+ messages in thread
From: Mahesh Kumar @ 2018-04-26 14:25 UTC (permalink / raw)
To: intel-gfx; +Cc: paulo.r.zanoni, lucas.demarchi, rodrigo.vivi
Patches in this series were originally part of series:
https://patchwork.freedesktop.org/series/36993/
Reposting it here after rebase
use kernel types u8/u16 etc instead of uint8_t
Changes:
- Rebase the series
Mahesh Kumar (3):
drm/i915/icl: track dbuf slice-2 status
drm/i915/icl: Enable 2nd DBuf slice only when needed
drm/i915/icl: update ddb entry start/end mask during hw ddb readout
drivers/gpu/drm/i915/i915_drv.h | 1 +
drivers/gpu/drm/i915/i915_reg.h | 3 +
drivers/gpu/drm/i915/intel_display.c | 15 +++++
drivers/gpu/drm/i915/intel_drv.h | 6 ++
drivers/gpu/drm/i915/intel_pm.c | 103 ++++++++++++++++++++++++++------
drivers/gpu/drm/i915/intel_runtime_pm.c | 69 ++++++++++++++++-----
6 files changed, 165 insertions(+), 32 deletions(-)
--
2.16.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 0/3] Optimize use of DBuf slices
@ 2018-04-05 9:17 Mahesh Kumar
0 siblings, 0 replies; 13+ messages in thread
From: Mahesh Kumar @ 2018-04-05 9:17 UTC (permalink / raw)
To: intel-gfx; +Cc: paulo.r.zanoni, lucas.demarchi, rodrigo.vivi
Patches in this series were originally part of series:
https://patchwork.freedesktop.org/series/36993/
Reposting it here after rebase
use kernel types u8/u16 etc instead of uint8_t
Mahesh Kumar (3):
drm/i915/icl: track dbuf slice-2 status
drm/i915/icl: Enable 2nd DBuf slice only when needed
drm/i915/icl: update ddb entry start/end mask during hw ddb readout
drivers/gpu/drm/i915/i915_drv.h | 1 +
drivers/gpu/drm/i915/i915_reg.h | 3 ++
drivers/gpu/drm/i915/intel_display.c | 15 ++++++
drivers/gpu/drm/i915/intel_drv.h | 6 +++
drivers/gpu/drm/i915/intel_pm.c | 95 +++++++++++++++++++++++++++------
drivers/gpu/drm/i915/intel_runtime_pm.c | 69 +++++++++++++++++++-----
6 files changed, 160 insertions(+), 29 deletions(-)
--
2.16.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2018-04-28 0:15 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-05 6:00 [PATCH 0/3] Optimize use of DBuf slices Mahesh Kumar
2018-04-05 6:00 ` [PATCH 1/3] drm/i915/icl: track dbuf slice-2 status Mahesh Kumar
2018-04-05 8:25 ` Jani Nikula
2018-04-05 9:22 ` Kumar, Mahesh
2018-04-05 6:00 ` [PATCH 2/3] drm/i915/icl: Enable 2nd DBuf slice only when needed Mahesh Kumar
2018-04-25 21:01 ` Rodrigo Vivi
2018-04-05 6:00 ` [PATCH 3/3] drm/i915/icl: update ddb entry start/end mask during hw ddb readout Mahesh Kumar
2018-04-25 21:10 ` Rodrigo Vivi
2018-04-05 6:14 ` ✗ Fi.CI.CHECKPATCH: warning for Optimize use of DBuf slices Patchwork
2018-04-05 6:30 ` ✗ Fi.CI.BAT: " Patchwork
2018-04-05 9:17 [PATCH 0/3] " Mahesh Kumar
2018-04-26 14:25 Mahesh Kumar
2018-04-28 0:15 ` Rodrigo Vivi
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.