All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/1] Add sysfs interface to control class-of-service
@ 2019-10-07 16:52 Prathap Kumar Valsan
  2019-10-07 16:52 ` [PATCH v3 1/1] drm/i915/ehl: " Prathap Kumar Valsan
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Prathap Kumar Valsan @ 2019-10-07 16:52 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris P Wilson

For GEN11 MOCS are part of context register state. This means updating
CLOS also needs to update the context state of active contexts.

v3: Rebase
v2: Updated the interface to use two sysfs files(Joonas)
    - Gen12 PCode interface is not ready yet to read the way mask.
      So removed TGL support and added support for Gen11.
    - Updating MOCS in Gen 11 also require changing the context image of
      existing contexts.
      Referred to gen8_configure_all_contexts() as suggested by Chris.

Prathap Kumar Valsan (1):
  drm/i915/ehl: Add sysfs interface to control class-of-service

 drivers/gpu/drm/i915/gt/intel_lrc.c     |   7 +
 drivers/gpu/drm/i915/gt/intel_lrc_reg.h |   1 +
 drivers/gpu/drm/i915/gt/intel_mocs.c    | 216 +++++++++++++++++++++++-
 drivers/gpu/drm/i915/gt/intel_mocs.h    |   6 +-
 drivers/gpu/drm/i915/i915_drv.h         |   8 +
 drivers/gpu/drm/i915/i915_reg.h         |   1 +
 drivers/gpu/drm/i915/i915_sysfs.c       | 100 +++++++++++
 7 files changed, 337 insertions(+), 2 deletions(-)

-- 
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/1] drm/i915/ehl: Add sysfs interface to control class-of-service
  2019-10-07 16:52 [PATCH v3 0/1] Add sysfs interface to control class-of-service Prathap Kumar Valsan
@ 2019-10-07 16:52 ` Prathap Kumar Valsan
  2019-10-07 20:37   ` Chris Wilson
  2019-10-07 16:52 ` ✗ Fi.CI.SPARSE: warning for " Patchwork
  2019-10-07 17:15 ` ✗ Fi.CI.BAT: failure " Patchwork
  2 siblings, 1 reply; 6+ messages in thread
From: Prathap Kumar Valsan @ 2019-10-07 16:52 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris P Wilson

To provide shared last-level-cache isolation to cpu workloads running
concurrently with gpu workloads, the gpu allocation of cache lines needs
to be restricted to certain ways. Currently GPU hardware supports four
class-of-service(CLOS) levels and there is an associated way-mask for
each CLOS. Each LLC MOCS register has a field to select the clos level.
So in-order to globally set the gpu to a clos level, driver needs
to program entire MOCS table.

Hardware supports reading supported way-mask configuration for GPU using
a bios pcode interface. This interface has two files--llc_clos_modes and
llc_clos. The file llc_clos_modes is read only file and will list the
available way masks. The file llc_clos is read/write and will show the
currently active way mask and writing a new way mask will update the
active way mask of the gpu.

Note of Caution: Restricting cache ways using this mechanism presents a
larger attack surface for side-channel attacks.

Example usage:
> cat /sys/class/drm/card0/llc_clos_modes
0xfff 0xfc0 0xc00 0x800

>cat /sys/class/drm/card0/llc_clos
0xfff

Update to new clos
echo "0x800" > /sys/class/drm/card0/llc_clos

Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c     |   7 +
 drivers/gpu/drm/i915/gt/intel_lrc_reg.h |   1 +
 drivers/gpu/drm/i915/gt/intel_mocs.c    | 216 +++++++++++++++++++++++-
 drivers/gpu/drm/i915/gt/intel_mocs.h    |   6 +-
 drivers/gpu/drm/i915/i915_drv.h         |   8 +
 drivers/gpu/drm/i915/i915_reg.h         |   1 +
 drivers/gpu/drm/i915/i915_sysfs.c       | 100 +++++++++++
 7 files changed, 337 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 468438fb47af..054051969d00 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2137,6 +2137,13 @@ __execlists_update_reg_state(const struct intel_context *ce,
 			intel_sseu_make_rpcs(engine->i915, &ce->sseu);
 
 		i915_oa_init_reg_state(ce, engine);
+		/*
+		 * Gen11+ wants to support update of LLC class-of-service via
+		 * sysfs interface. CLOS is defined in MOCS registers and for
+		 * Gen11, MOCS is part of context resgister state.
+		 */
+		if (IS_GEN(engine->i915, 11))
+			intel_mocs_init_reg_state(ce);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 06ab0276e10e..f07a6262217c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -28,6 +28,7 @@
 #define CTX_R_PWR_CLK_STATE		(0x42 + 1)
 
 #define GEN9_CTX_RING_MI_MODE		0x54
+#define GEN11_CTX_GFX_MOCS_BASE		0x4F2
 
 /* GEN12+ Reg State Context */
 #define GEN12_CTX_BB_PER_CTX_PTR		(0x12 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 728704bbbe18..a2669bee9b1b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -26,6 +26,9 @@
 #include "intel_gt.h"
 #include "intel_mocs.h"
 #include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_sideband.h"
+#include "gem/i915_gem_context.h"
 
 /* structures required */
 struct drm_i915_mocs_entry {
@@ -40,6 +43,7 @@ struct drm_i915_mocs_table {
 	const struct drm_i915_mocs_entry *table;
 };
 
+#define ctx_mocsN(N) (GEN11_CTX_GFX_MOCS_BASE + 2 * (N) + 1)
 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
 #define _LE_CACHEABILITY(value)	((value) << 0)
 #define _LE_TGT_CACHE(value)	((value) << 2)
@@ -51,6 +55,7 @@ struct drm_i915_mocs_table {
 #define LE_SCF(value)		((value) << 14)
 #define LE_COS(value)		((value) << 15)
 #define LE_SSE(value)		((value) << 17)
+#define LE_COS_MASK		GENMASK(16, 15)
 
 /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
 #define L3_ESC(value)		((value) << 0)
@@ -377,6 +382,7 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
 	struct intel_gt *gt = engine->gt;
 	struct intel_uncore *uncore = gt->uncore;
 	struct drm_i915_mocs_table table;
+	unsigned int active_clos;
 	unsigned int index;
 	u32 unused_value;
 
@@ -390,11 +396,16 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
 	if (!get_mocs_settings(gt, &table))
 		return;
 
+	active_clos = engine->i915->clos.active_clos;
 	/* Set unused values to PTE */
 	unused_value = table.table[I915_MOCS_PTE].control_value;
+	unused_value &= ~LE_COS_MASK;
+	unused_value |= FIELD_PREP(LE_COS_MASK, active_clos);
 
 	for (index = 0; index < table.size; index++) {
 		u32 value = get_entry_control(&table, index);
+		value &= ~LE_COS_MASK;
+		value |= FIELD_PREP(LE_COS_MASK, active_clos);
 
 		intel_uncore_write_fw(uncore,
 				      mocs_register(engine->id, index),
@@ -408,7 +419,7 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
 				      unused_value);
 }
 
-static void intel_mocs_init_global(struct intel_gt *gt)
+void intel_mocs_init_global(struct intel_gt *gt)
 {
 	struct intel_uncore *uncore = gt->uncore;
 	struct drm_i915_mocs_table table;
@@ -442,6 +453,7 @@ static int emit_mocs_control_table(struct i915_request *rq,
 				   const struct drm_i915_mocs_table *table)
 {
 	enum intel_engine_id engine = rq->engine->id;
+	unsigned int active_clos;
 	unsigned int index;
 	u32 unused_value;
 	u32 *cs;
@@ -449,8 +461,11 @@ static int emit_mocs_control_table(struct i915_request *rq,
 	if (GEM_WARN_ON(table->size > table->n_entries))
 		return -ENODEV;
 
+	active_clos = rq->i915->clos.active_clos;
 	/* Set unused values to PTE */
 	unused_value = table->table[I915_MOCS_PTE].control_value;
+	unused_value &= ~LE_COS_MASK;
+	unused_value |= FIELD_PREP(LE_COS_MASK, active_clos);
 
 	cs = intel_ring_begin(rq, 2 + 2 * table->n_entries);
 	if (IS_ERR(cs))
@@ -460,6 +475,8 @@ static int emit_mocs_control_table(struct i915_request *rq,
 
 	for (index = 0; index < table->size; index++) {
 		u32 value = get_entry_control(table, index);
+		value &= ~LE_COS_MASK;
+		value |= FIELD_PREP(LE_COS_MASK, active_clos);
 
 		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
 		*cs++ = value;
@@ -625,10 +642,207 @@ int intel_mocs_emit(struct i915_request *rq)
 	return 0;
 }
 
+void intel_mocs_init_reg_state(const struct intel_context *ce)
+{
+	struct drm_i915_private *i915 = ce->engine->i915;
+	u32 *reg_state = ce->lrc_reg_state;
+	struct drm_i915_mocs_table t;
+	unsigned int active_clos;
+	u32 value;
+	int i;
+
+	get_mocs_settings(ce->engine->gt, &t);
+
+	active_clos = i915->clos.active_clos;
+
+	if (active_clos == FIELD_GET(LE_COS_MASK, get_entry_control(&t, 0)))
+		return;
+
+	for (i = 0; i < t.n_entries; i++) {
+		value = reg_state[ctx_mocsN(i)];
+		value &= ~LE_COS_MASK;
+		value |= FIELD_PREP(LE_COS_MASK, active_clos);
+		reg_state[ctx_mocsN(i)] = value;
+	}
+}
+
+static int
+mocs_store_clos(struct i915_request *rq,
+		struct intel_context *ce)
+{
+	struct drm_i915_mocs_table t;
+	unsigned int count, active_clos, index;
+	u32 offset;
+	u32 value;
+	u32 *cs;
+
+	if (!get_mocs_settings(rq->engine->gt, &t))
+		return -ENODEV;
+
+	count = t.n_entries;
+	active_clos = rq->i915->clos.active_clos;
+	cs = intel_ring_begin(rq, 4 * count);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+
+	for (index = 0; index < count; index++) {
+		value = ce->lrc_reg_state[ctx_mocsN(index)];
+		value &= ~LE_COS_MASK;
+		value |= FIELD_PREP(LE_COS_MASK, active_clos);
+
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+		*cs++ = offset + ctx_mocsN(index) * sizeof(uint32_t);
+		*cs++ = 0;
+		*cs++ = value;
+	}
+
+	intel_ring_advance(rq, cs);
+
+	return 0;
+}
+
+static int modify_context_mocs(struct intel_context *ce)
+{
+	struct i915_request *rq;
+	int err;
+
+	lockdep_assert_held(&ce->pin_mutex);
+
+	rq = i915_request_create(ce->engine->kernel_context);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	/* Serialise with the remote context */
+	err = intel_context_prepare_remote_request(ce, rq);
+	if (err == 0)
+		err = mocs_store_clos(rq, ce);
+
+	i915_request_add(rq);
+	return err;
+}
+
+static int intel_mocs_configure_context(struct i915_gem_context *ctx)
+{
+	struct i915_gem_engines_iter it;
+	struct intel_context *ce;
+	int err = 0;
+
+	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+		GEM_BUG_ON(ce == ce->engine->kernel_context);
+
+		if (ce->engine->class != RENDER_CLASS)
+			continue;
+
+		err = intel_context_lock_pinned(ce);
+		if (err)
+			break;
+
+		if (intel_context_is_pinned(ce))
+			err = modify_context_mocs(ce);
+
+		intel_context_unlock_pinned(ce);
+		if (err)
+			break;
+	}
+	i915_gem_context_unlock_engines(ctx);
+
+	return err;
+}
+
+static int intel_mocs_configure_all_contexts(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *ctx;
+	enum intel_engine_id id;
+	int err;
+
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	/*
+	 * MOCS registers of render engine are context saved and restored to and
+	 * from a context image.
+	 * So for any MOCS update to reflect on the existing contexts requires
+	 * updating the context image.
+	 */
+	list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
+		if (ctx == i915->kernel_context)
+			continue;
+
+		err = intel_mocs_configure_context(ctx);
+		if (err)
+			return err;
+	}
+
+	/*
+	 * After updating all other contexts, update render context image of
+	 * kernel context. Also update the MOCS of non-render engines.
+	 */
+
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq;
+		struct drm_i915_mocs_table t;
+
+		rq = i915_request_create(engine->kernel_context);
+		if (IS_ERR(rq))
+			return PTR_ERR(rq);
+
+		get_mocs_settings(rq->engine->gt, &t);
+		err = emit_mocs_control_table(rq, &t);
+		if (err) {
+			i915_request_skip(rq, err);
+			i915_request_add(rq);
+			return err;
+		}
+
+		i915_request_add(rq);
+	}
+
+	return 0;
+}
+
+int intel_mocs_update_clos(struct intel_gt *gt)
+{
+	return intel_mocs_configure_all_contexts(gt);
+}
+
+static void intel_read_clos_way_mask(struct intel_gt *gt)
+{
+	struct drm_i915_private *i915 = gt->i915;
+	struct drm_i915_mocs_table table;
+	int ret, i;
+	u32 val;
+
+	if (!get_mocs_settings(gt, &table))
+		return;
+
+	/* CLOS is same for all entries. So its enough to read one*/
+	i915->clos.active_clos = FIELD_GET(LE_COS_MASK,
+					   get_entry_control(&table, 0));
+	for (i = 0; i < NUM_OF_CLOS; i++) {
+		val = i;
+		ret = sandybridge_pcode_read(i915,
+					     ICL_PCODE_LLC_COS_WAY_MASK_INFO,
+					     &val, NULL);
+		if (ret) {
+			DRM_ERROR("Mailbox read error = %d\n", ret);
+			return;
+		}
+
+		i915->clos.way_mask[i] = val;
+	}
+
+	i915->clos.support_way_mask_read = true;
+}
+
 void intel_mocs_init(struct intel_gt *gt)
 {
 	intel_mocs_init_l3cc_table(gt);
 
+	if (IS_GEN(gt->i915, 11))
+		intel_read_clos_way_mask(gt);
+
 	if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
 		intel_mocs_init_global(gt);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 2ae816b7ca19..65566457408e 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -49,13 +49,17 @@
  * context handling keep the MOCS in step.
  */
 
-struct i915_request;
 struct intel_engine_cs;
+struct intel_context;
+struct i915_request;
 struct intel_gt;
 
 void intel_mocs_init(struct intel_gt *gt);
 void intel_mocs_init_engine(struct intel_engine_cs *engine);
 
 int intel_mocs_emit(struct i915_request *rq);
+int intel_mocs_update_clos(struct intel_gt *gt);
+
+void intel_mocs_init_reg_state(const struct intel_context *ce);
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cde4c7fb5570..01c5f7fec634 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1604,6 +1604,14 @@ struct drm_i915_private {
 		bool distrust_bios_wm;
 	} wm;
 
+	/* Last Level Cache  Class of Service */
+	struct {
+		bool support_way_mask_read;
+		u8 active_clos;
+#define NUM_OF_CLOS 4
+		u16 way_mask[NUM_OF_CLOS];
+	} clos;
+
 	struct dram_info {
 		bool valid;
 		bool is_16gb_dimm;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6d67bd238cfe..0a7937234975 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8859,6 +8859,7 @@ enum {
 #define   ICL_PCODE_MEM_SUBSYSYSTEM_INFO	0xd
 #define     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO	(0x0 << 8)
 #define     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point)	(((point) << 16) | (0x1 << 8))
+#define   ICL_PCODE_LLC_COS_WAY_MASK_INFO	0x1d
 #define   GEN6_PCODE_READ_D_COMP		0x10
 #define   GEN6_PCODE_WRITE_D_COMP		0x11
 #define   HSW_PCODE_DE_WRITE_FREQ_REQ		0x17
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index bf039b8ba593..8344ad2380cc 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -36,6 +36,7 @@
 #include "i915_sysfs.h"
 #include "intel_pm.h"
 #include "intel_sideband.h"
+#include "gt/intel_mocs.h"
 
 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
 {
@@ -255,6 +256,88 @@ static const struct bin_attribute dpf_attrs_1 = {
 	.private = (void *)1
 };
 
+static ssize_t llc_clos_show(struct device *kdev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	ssize_t len = 0;
+	int active_clos;
+
+	active_clos = dev_priv->clos.active_clos;
+	len += snprintf(buf + len, PAGE_SIZE, "0x%x\n",
+			dev_priv->clos.way_mask[active_clos]);
+
+	return len;
+}
+
+static ssize_t llc_clos_store(struct device *kdev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	struct drm_device *dev = &dev_priv->drm;
+	u8 active_clos, new_clos, clos_index;
+	bool valid_mask = false;
+	ssize_t ret;
+	u16 way_mask;
+
+	ret = kstrtou16(buf, 0, &way_mask);
+	if (ret)
+		return ret;
+
+	active_clos = dev_priv->clos.active_clos;
+
+	if (dev_priv->clos.way_mask[active_clos] == way_mask)
+		return count;
+
+	for (clos_index = 0; clos_index < NUM_OF_CLOS; clos_index++) {
+		if (dev_priv->clos.way_mask[clos_index] == way_mask) {
+			new_clos = clos_index;
+			valid_mask = true;
+			break;
+		}
+	}
+
+	if (!valid_mask)
+		return -EINVAL;
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		return ret;
+
+	dev_priv->clos.active_clos = new_clos;
+	ret = intel_mocs_update_clos(&dev_priv->gt);
+	if (ret) {
+		DRM_ERROR("Failed to update Class of service\n");
+		dev_priv->clos.active_clos = active_clos;
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return count;
+}
+
+static ssize_t llc_clos_modes_show(struct device *kdev,
+				   struct device_attribute *attr, char *buf)
+{
+	struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+	ssize_t len = 0;
+	int i;
+
+	for (i = 0; i < NUM_OF_CLOS; i++)
+		len += snprintf(buf + len, PAGE_SIZE, "0x%x ",
+				dev_priv->clos.way_mask[i]);
+
+	len += snprintf(buf + len, PAGE_SIZE, "\n");
+
+	return len;
+}
+
+static DEVICE_ATTR_RW(llc_clos);
+static DEVICE_ATTR_RO(llc_clos_modes);
+
 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 				    struct device_attribute *attr, char *buf)
 {
@@ -574,6 +657,18 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 	struct device *kdev = dev_priv->drm.primary->kdev;
 	int ret;
 
+	if (dev_priv->clos.support_way_mask_read) {
+		ret = sysfs_create_file(&kdev->kobj,
+					&dev_attr_llc_clos.attr);
+		if (ret)
+			DRM_ERROR("LLC COS sysfs setup failed\n");
+
+		ret = sysfs_create_file(&kdev->kobj,
+					&dev_attr_llc_clos_modes.attr);
+		if (ret)
+			DRM_ERROR("LLC COS sysfs setup failed\n");
+	}
+
 #ifdef CONFIG_PM
 	if (HAS_RC6(dev_priv)) {
 		ret = sysfs_merge_group(&kdev->kobj,
@@ -624,6 +719,11 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 
 	i915_teardown_error_capture(kdev);
 
+	if (dev_priv->clos.support_way_mask_read) {
+		sysfs_remove_file(&kdev->kobj, &dev_attr_llc_clos.attr);
+		sysfs_remove_file(&kdev->kobj, &dev_attr_llc_clos_modes.attr);
+	}
+
 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 		sysfs_remove_files(&kdev->kobj, vlv_attrs);
 	else
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* ✗ Fi.CI.SPARSE: warning for Add sysfs interface to control class-of-service
  2019-10-07 16:52 [PATCH v3 0/1] Add sysfs interface to control class-of-service Prathap Kumar Valsan
  2019-10-07 16:52 ` [PATCH v3 1/1] drm/i915/ehl: " Prathap Kumar Valsan
@ 2019-10-07 16:52 ` Patchwork
  2019-10-07 17:15 ` ✗ Fi.CI.BAT: failure " Patchwork
  2 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2019-10-07 16:52 UTC (permalink / raw)
  To: Prathap Kumar Valsan; +Cc: intel-gfx

== Series Details ==

Series: Add sysfs interface to control class-of-service
URL   : https://patchwork.freedesktop.org/series/67700/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.6.0
Commit: drm/i915/ehl: Add sysfs interface to control class-of-service
+drivers/gpu/drm/i915/gt/intel_mocs.c:422:6: warning: symbol 'intel_mocs_init_global' was not declared. Should it be static?

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* ✗ Fi.CI.BAT: failure for Add sysfs interface to control class-of-service
  2019-10-07 16:52 [PATCH v3 0/1] Add sysfs interface to control class-of-service Prathap Kumar Valsan
  2019-10-07 16:52 ` [PATCH v3 1/1] drm/i915/ehl: " Prathap Kumar Valsan
  2019-10-07 16:52 ` ✗ Fi.CI.SPARSE: warning for " Patchwork
@ 2019-10-07 17:15 ` Patchwork
  2 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2019-10-07 17:15 UTC (permalink / raw)
  To: Prathap Kumar Valsan; +Cc: intel-gfx

== Series Details ==

Series: Add sysfs interface to control class-of-service
URL   : https://patchwork.freedesktop.org/series/67700/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7024 -> Patchwork_14694
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_14694 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_14694, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_14694:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live_execlists:
    - fi-skl-6260u:       [PASS][1] -> [DMESG-WARN][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7024/fi-skl-6260u/igt@i915_selftest@live_execlists.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/fi-skl-6260u/igt@i915_selftest@live_execlists.html

  
Known issues
------------

  Here are the changes found in Patchwork_14694 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live_execlists:
    - fi-icl-u3:          [PASS][3] -> [DMESG-FAIL][4] ([fdo#111872])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7024/fi-icl-u3/igt@i915_selftest@live_execlists.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/fi-icl-u3/igt@i915_selftest@live_execlists.html

  * igt@prime_vgem@basic-read:
    - fi-icl-u3:          [PASS][5] -> [DMESG-WARN][6] ([fdo#107724])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7024/fi-icl-u3/igt@prime_vgem@basic-read.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/fi-icl-u3/igt@prime_vgem@basic-read.html

  
#### Possible fixes ####

  * igt@gem_mmap_gtt@basic-write-gtt:
    - fi-icl-u3:          [DMESG-WARN][7] ([fdo#107724]) -> [PASS][8] +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7024/fi-icl-u3/igt@gem_mmap_gtt@basic-write-gtt.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/fi-icl-u3/igt@gem_mmap_gtt@basic-write-gtt.html

  * igt@kms_chamelium@dp-edid-read:
    - fi-kbl-7500u:       [WARN][9] ([fdo#109483]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7024/fi-kbl-7500u/igt@kms_chamelium@dp-edid-read.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/fi-kbl-7500u/igt@kms_chamelium@dp-edid-read.html

  
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#109483]: https://bugs.freedesktop.org/show_bug.cgi?id=109483
  [fdo#111872]: https://bugs.freedesktop.org/show_bug.cgi?id=111872


Participating hosts (52 -> 44)
------------------------------

  Missing    (8): fi-ilk-m540 fi-tgl-u fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7024 -> Patchwork_14694

  CI-20190529: 20190529
  CI_DRM_7024: b149aba92ace27b28e068c2541270653c23bca75 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5215: 880c8d3c9831349a269ac6822c8d44e80807089f @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14694: c0fc7c3ea79f2cfb9c05e4e26e3cbd76d74652a2 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

c0fc7c3ea79f drm/i915/ehl: Add sysfs interface to control class-of-service

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14694/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/1] drm/i915/ehl: Add sysfs interface to control class-of-service
  2019-10-07 16:52 ` [PATCH v3 1/1] drm/i915/ehl: " Prathap Kumar Valsan
@ 2019-10-07 20:37   ` Chris Wilson
  2019-10-15  7:38     ` Kumar Valsan, Prathap
  0 siblings, 1 reply; 6+ messages in thread
From: Chris Wilson @ 2019-10-07 20:37 UTC (permalink / raw)
  To: Prathap Kumar Valsan, intel-gfx

Quoting Prathap Kumar Valsan (2019-10-07 17:52:09)
> To provide shared last-level-cache isolation to cpu workloads running
> concurrently with gpu workloads, the gpu allocation of cache lines needs
> to be restricted to certain ways. Currently GPU hardware supports four
> class-of-service(CLOS) levels and there is an associated way-mask for
> each CLOS. Each LLC MOCS register has a field to select the clos level.
> So in-order to globally set the gpu to a clos level, driver needs
> to program entire MOCS table.
> 
> Hardware supports reading supported way-mask configuration for GPU using
> a bios pcode interface. This interface has two files--llc_clos_modes and
> llc_clos. The file llc_clos_modes is read only file and will list the
> available way masks. The file llc_clos is read/write and will show the
> currently active way mask and writing a new way mask will update the
> active way mask of the gpu.
> 
> Note of Caution: Restricting cache ways using this mechanism presents a
> larger attack surface for side-channel attacks.
> 
> Example usage:
> > cat /sys/class/drm/card0/llc_clos_modes
> 0xfff 0xfc0 0xc00 0x800
> 
> >cat /sys/class/drm/card0/llc_clos
> 0xfff
> 
> Update to new clos
> echo "0x800" > /sys/class/drm/card0/llc_clos

So the first question is whether this is global on the device or local
to the GT.
 
> Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/intel_lrc.c     |   7 +
>  drivers/gpu/drm/i915/gt/intel_lrc_reg.h |   1 +
>  drivers/gpu/drm/i915/gt/intel_mocs.c    | 216 +++++++++++++++++++++++-
>  drivers/gpu/drm/i915/gt/intel_mocs.h    |   6 +-
>  drivers/gpu/drm/i915/i915_drv.h         |   8 +
>  drivers/gpu/drm/i915/i915_reg.h         |   1 +
>  drivers/gpu/drm/i915/i915_sysfs.c       | 100 +++++++++++
>  7 files changed, 337 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 468438fb47af..054051969d00 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -2137,6 +2137,13 @@ __execlists_update_reg_state(const struct intel_context *ce,
>                         intel_sseu_make_rpcs(engine->i915, &ce->sseu);
>  
>                 i915_oa_init_reg_state(ce, engine);
> +               /*
> +                * Gen11+ wants to support update of LLC class-of-service via
> +                * sysfs interface. CLOS is defined in MOCS registers and for
> +                * Gen11, MOCS is part of context resgister state.
> +                */
> +               if (IS_GEN(engine->i915, 11))
> +                       intel_mocs_init_reg_state(ce);

Do the filtering in intel_mocs_init_reg_state(). The intel_ prefix says
it should be common to all.

Not IS_ELKHARTLAKE(), that is implies by subject?

> +static int
> +mocs_store_clos(struct i915_request *rq,
> +               struct intel_context *ce)
> +{
> +       struct drm_i915_mocs_table t;
> +       unsigned int count, active_clos, index;
> +       u32 offset;
> +       u32 value;
> +       u32 *cs;
> +
> +       if (!get_mocs_settings(rq->engine->gt, &t))
> +               return -ENODEV;
> +
> +       count = t.n_entries;
> +       active_clos = rq->i915->clos.active_clos;
> +       cs = intel_ring_begin(rq, 4 * count);
> +       if (IS_ERR(cs))
> +               return PTR_ERR(cs);
> +
> +       offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
> +
> +       for (index = 0; index < count; index++) {
> +               value = ce->lrc_reg_state[ctx_mocsN(index)];

Their context image is volatile at this point. They are pinned and
active. If you must do a rmw, do it on the GPU. But do we not know the
full value (as I don't expect it to be nonpriv)? [If doing rmw on the
GPU, we're probably have to use a secure batch here to avoid running out
of ringspace.]

> +               value &= ~LE_COS_MASK;
> +               value |= FIELD_PREP(LE_COS_MASK, active_clos);
> +
> +               *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
> +               *cs++ = offset + ctx_mocsN(index) * sizeof(uint32_t);
> +               *cs++ = 0;
> +               *cs++ = value;
> +       }
> +
> +       intel_ring_advance(rq, cs);
> +
> +       return 0;
> +}
> +
> +static int modify_context_mocs(struct intel_context *ce)
> +{
> +       struct i915_request *rq;
> +       int err;
> +
> +       lockdep_assert_held(&ce->pin_mutex);
> +
> +       rq = i915_request_create(ce->engine->kernel_context);
> +       if (IS_ERR(rq))
> +               return PTR_ERR(rq);
> +
> +       /* Serialise with the remote context */
> +       err = intel_context_prepare_remote_request(ce, rq);
> +       if (err == 0)
> +               err = mocs_store_clos(rq, ce);
> +
> +       i915_request_add(rq);
> +       return err;
> +}
> +
> +static int intel_mocs_configure_context(struct i915_gem_context *ctx)
> +{
> +       struct i915_gem_engines_iter it;
> +       struct intel_context *ce;
> +       int err = 0;
> +
> +       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
> +               GEM_BUG_ON(ce == ce->engine->kernel_context);
> +
> +               if (ce->engine->class != RENDER_CLASS)
> +                       continue;
> +
> +               err = intel_context_lock_pinned(ce);
> +               if (err)
> +                       break;
> +
> +               if (intel_context_is_pinned(ce))
> +                       err = modify_context_mocs(ce);
> +
> +               intel_context_unlock_pinned(ce);
> +               if (err)
> +                       break;
> +       }
> +       i915_gem_context_unlock_engines(ctx);
> +
> +       return err;
> +}
> +
> +static int intel_mocs_configure_all_contexts(struct intel_gt *gt)

So this on the GT,

> +{
> +       struct drm_i915_private *i915 = gt->i915;
> +       struct intel_engine_cs *engine;
> +       struct i915_gem_context *ctx;
> +       enum intel_engine_id id;
> +       int err;
> +
> +       lockdep_assert_held(&i915->drm.struct_mutex);

Wrong lock entirely, you need the context lock for walking the
i915->gem.contexts.list.

> +       /*
> +        * MOCS registers of render engine are context saved and restored to and
> +        * from a context image.
> +        * So for any MOCS update to reflect on the existing contexts requires
> +        * updating the context image.
> +        */
> +       list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
> +               if (ctx == i915->kernel_context)
> +                       continue;
> +
> +               err = intel_mocs_configure_context(ctx);

but this is global,

> +               if (err)
> +                       return err;
> +       }
> +
> +       /*
> +        * After updating all other contexts, update render context image of
> +        * kernel context. Also update the MOCS of non-render engines.
> +        */
> +
> +       for_each_engine(engine, i915, id) {

and here again you are on the gt.

> +               struct i915_request *rq;
> +               struct drm_i915_mocs_table t;
> +
> +               rq = i915_request_create(engine->kernel_context);
> +               if (IS_ERR(rq))
> +                       return PTR_ERR(rq);
> +
> +               get_mocs_settings(rq->engine->gt, &t);
> +               err = emit_mocs_control_table(rq, &t);
> +               if (err) {
> +                       i915_request_skip(rq, err);
> +                       i915_request_add(rq);
> +                       return err;
> +               }
> +
> +               i915_request_add(rq);
> +       }
> +
> +       return 0;
> +}

> diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
> index 2ae816b7ca19..65566457408e 100644
> --- a/drivers/gpu/drm/i915/gt/intel_mocs.h
> +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
> @@ -49,13 +49,17 @@
>   * context handling keep the MOCS in step.
>   */
>  
> -struct i915_request;
>  struct intel_engine_cs;
> +struct intel_context;
> +struct i915_request;
>  struct intel_gt;

Our OCD is to keep these in alphabetical order.
-Chris
---------------------------------------------------------------------
Intel Corporation (UK) Limited
Registered No. 1134945 (England)
Registered Office: Pipers Way, Swindon SN3 1RJ
VAT No: 860 2173 47

This e-mail and any attachments may contain confidential material for
the sole use of the intended recipient(s). Any review or distribution
by others is strictly prohibited. If you are not the intended
recipient, please contact the sender and delete all copies.
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/1] drm/i915/ehl: Add sysfs interface to control class-of-service
  2019-10-07 20:37   ` Chris Wilson
@ 2019-10-15  7:38     ` Kumar Valsan, Prathap
  0 siblings, 0 replies; 6+ messages in thread
From: Kumar Valsan, Prathap @ 2019-10-15  7:38 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Mon, Oct 07, 2019 at 09:37:20PM +0100, Chris Wilson wrote:
> Quoting Prathap Kumar Valsan (2019-10-07 17:52:09)
> > To provide shared last-level-cache isolation to cpu workloads running
> > concurrently with gpu workloads, the gpu allocation of cache lines needs
> > to be restricted to certain ways. Currently GPU hardware supports four
> > class-of-service(CLOS) levels and there is an associated way-mask for
> > each CLOS. Each LLC MOCS register has a field to select the clos level.
> > So in-order to globally set the gpu to a clos level, driver needs
> > to program entire MOCS table.
> > 
> > Hardware supports reading supported way-mask configuration for GPU using
> > a bios pcode interface. This interface has two files--llc_clos_modes and
> > llc_clos. The file llc_clos_modes is read only file and will list the
> > available way masks. The file llc_clos is read/write and will show the
> > currently active way mask and writing a new way mask will update the
> > active way mask of the gpu.
> > 
> > Note of Caution: Restricting cache ways using this mechanism presents a
> > larger attack surface for side-channel attacks.
> > 
> > Example usage:
> > > cat /sys/class/drm/card0/llc_clos_modes
> > 0xfff 0xfc0 0xc00 0x800
> > 
> > >cat /sys/class/drm/card0/llc_clos
> > 0xfff
> > 
> > Update to new clos
> > echo "0x800" > /sys/class/drm/card0/llc_clos
> 
> So the first question is whether this is global on the device or local
> to the GT.

It is global to the device. I have sent the updated patch, which would
explicitly state this in commit message.
>  
> > Signed-off-by: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
> > ---
> >  drivers/gpu/drm/i915/gt/intel_lrc.c     |   7 +
> >  drivers/gpu/drm/i915/gt/intel_lrc_reg.h |   1 +
> >  drivers/gpu/drm/i915/gt/intel_mocs.c    | 216 +++++++++++++++++++++++-
> >  drivers/gpu/drm/i915/gt/intel_mocs.h    |   6 +-
> >  drivers/gpu/drm/i915/i915_drv.h         |   8 +
> >  drivers/gpu/drm/i915/i915_reg.h         |   1 +
> >  drivers/gpu/drm/i915/i915_sysfs.c       | 100 +++++++++++
> >  7 files changed, 337 insertions(+), 2 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > index 468438fb47af..054051969d00 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > @@ -2137,6 +2137,13 @@ __execlists_update_reg_state(const struct intel_context *ce,
> >                         intel_sseu_make_rpcs(engine->i915, &ce->sseu);
> >  
> >                 i915_oa_init_reg_state(ce, engine);
> > +               /*
> > +                * Gen11+ wants to support update of LLC class-of-service via
> > +                * sysfs interface. CLOS is defined in MOCS registers and for
> > +                * Gen11, MOCS is part of context resgister state.
> > +                */
> > +               if (IS_GEN(engine->i915, 11))
> > +                       intel_mocs_init_reg_state(ce);
> 
> Do the filtering in intel_mocs_init_reg_state(). The intel_ prefix says
> it should be common to all.
> 
> Not IS_ELKHARTLAKE(), that is implies by subject?
> 
Gen11 PCode implementation has this support to read way mask. So updated
it to Gen11.
> > +static int
> > +mocs_store_clos(struct i915_request *rq,
> > +               struct intel_context *ce)
> > +{
> > +       struct drm_i915_mocs_table t;
> > +       unsigned int count, active_clos, index;
> > +       u32 offset;
> > +       u32 value;
> > +       u32 *cs;
> > +
> > +       if (!get_mocs_settings(rq->engine->gt, &t))
> > +               return -ENODEV;
> > +
> > +       count = t.n_entries;
> > +       active_clos = rq->i915->clos.active_clos;
> > +       cs = intel_ring_begin(rq, 4 * count);
> > +       if (IS_ERR(cs))
> > +               return PTR_ERR(cs);
> > +
> > +       offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
> > +
> > +       for (index = 0; index < count; index++) {
> > +               value = ce->lrc_reg_state[ctx_mocsN(index)];
> 
> Their context image is volatile at this point. They are pinned and
> active. If you must do a rmw, do it on the GPU. But do we not know the
> full value (as I don't expect it to be nonpriv)? [If doing rmw on the
> GPU, we're probably have to use a secure batch here to avoid running out
> of ringspace.]
Right. Fixed.
> 
> > +               value &= ~LE_COS_MASK;
> > +               value |= FIELD_PREP(LE_COS_MASK, active_clos);
> > +
> > +               *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
> > +               *cs++ = offset + ctx_mocsN(index) * sizeof(uint32_t);
> > +               *cs++ = 0;
> > +               *cs++ = value;
> > +       }
> > +
> > +       intel_ring_advance(rq, cs);
> > +
> > +       return 0;
> > +}
> > +
> > +static int modify_context_mocs(struct intel_context *ce)
> > +{
> > +       struct i915_request *rq;
> > +       int err;
> > +
> > +       lockdep_assert_held(&ce->pin_mutex);
> > +
> > +       rq = i915_request_create(ce->engine->kernel_context);
> > +       if (IS_ERR(rq))
> > +               return PTR_ERR(rq);
> > +
> > +       /* Serialise with the remote context */
> > +       err = intel_context_prepare_remote_request(ce, rq);
> > +       if (err == 0)
> > +               err = mocs_store_clos(rq, ce);
> > +
> > +       i915_request_add(rq);
> > +       return err;
> > +}
> > +
> > +static int intel_mocs_configure_context(struct i915_gem_context *ctx)
> > +{
> > +       struct i915_gem_engines_iter it;
> > +       struct intel_context *ce;
> > +       int err = 0;
> > +
> > +       for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
> > +               GEM_BUG_ON(ce == ce->engine->kernel_context);
> > +
> > +               if (ce->engine->class != RENDER_CLASS)
> > +                       continue;
> > +
> > +               err = intel_context_lock_pinned(ce);
> > +               if (err)
> > +                       break;
> > +
> > +               if (intel_context_is_pinned(ce))
> > +                       err = modify_context_mocs(ce);
> > +
> > +               intel_context_unlock_pinned(ce);
> > +               if (err)
> > +                       break;
> > +       }
> > +       i915_gem_context_unlock_engines(ctx);
> > +
> > +       return err;
> > +}
> > +
> > +static int intel_mocs_configure_all_contexts(struct intel_gt *gt)
> 
> So this on the GT,
> 
> > +{
> > +       struct drm_i915_private *i915 = gt->i915;
> > +       struct intel_engine_cs *engine;
> > +       struct i915_gem_context *ctx;
> > +       enum intel_engine_id id;
> > +       int err;
> > +
> > +       lockdep_assert_held(&i915->drm.struct_mutex);
> 
> Wrong lock entirely, you need the context lock for walking the
> i915->gem.contexts.list.
> 
> > +       /*
> > +        * MOCS registers of render engine are context saved and restored to and
> > +        * from a context image.
> > +        * So for any MOCS update to reflect on the existing contexts requires
> > +        * updating the context image.
> > +        */
> > +       list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
> > +               if (ctx == i915->kernel_context)
> > +                       continue;
> > +
> > +               err = intel_mocs_configure_context(ctx);
> 
> but this is global,
> 

Even though in Gen11, global and GT may not make much difference, I made
an attempt to move the global out of GT.
> > +               if (err)
> > +                       return err;
> > +       }
> > +
> > +       /*
> > +        * After updating all other contexts, update render context image of
> > +        * kernel context. Also update the MOCS of non-render engines.
> > +        */
> > +
> > +       for_each_engine(engine, i915, id) {
> 
> and here again you are on the gt.
> 
> > +               struct i915_request *rq;
> > +               struct drm_i915_mocs_table t;
> > +
> > +               rq = i915_request_create(engine->kernel_context);
> > +               if (IS_ERR(rq))
> > +                       return PTR_ERR(rq);
> > +
> > +               get_mocs_settings(rq->engine->gt, &t);
> > +               err = emit_mocs_control_table(rq, &t);
> > +               if (err) {
> > +                       i915_request_skip(rq, err);
> > +                       i915_request_add(rq);
> > +                       return err;
> > +               }
> > +
> > +               i915_request_add(rq);
> > +       }
> > +
> > +       return 0;
> > +}
> 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
> > index 2ae816b7ca19..65566457408e 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_mocs.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
> > @@ -49,13 +49,17 @@
> >   * context handling keep the MOCS in step.
> >   */
> >  
> > -struct i915_request;
> >  struct intel_engine_cs;
> > +struct intel_context;
> > +struct i915_request;
> >  struct intel_gt;
> 
> Our OCD is to keep these in alphabetical order.
Fixed.

Thank You,
Prathap
> -Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-10-15  7:21 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-07 16:52 [PATCH v3 0/1] Add sysfs interface to control class-of-service Prathap Kumar Valsan
2019-10-07 16:52 ` [PATCH v3 1/1] drm/i915/ehl: " Prathap Kumar Valsan
2019-10-07 20:37   ` Chris Wilson
2019-10-15  7:38     ` Kumar Valsan, Prathap
2019-10-07 16:52 ` ✗ Fi.CI.SPARSE: warning for " Patchwork
2019-10-07 17:15 ` ✗ Fi.CI.BAT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.