* [PATCH 1/4] drm/i915: Verify workarounds immediately after application
@ 2019-04-15 16:00 Chris Wilson
2019-04-15 16:00 ` [PATCH 2/4] drm/i915: Verify the engine workarounds stick on application Chris Wilson
` (4 more replies)
0 siblings, 5 replies; 13+ messages in thread
From: Chris Wilson @ 2019-04-15 16:00 UTC (permalink / raw)
To: intel-gfx
Immediately after writing the workaround, verify that it stuck in the
register.
References: https://bugs.freedesktop.org/show_bug.cgi?id=108954
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/intel_workarounds.c | 32 +++++++++++++-----------
1 file changed, 18 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index ccaf63679435..1c54b5030807 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -913,6 +913,20 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
return fw;
}
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->mask) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg), cur,
+ cur & wa->mask, wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
+}
+
static void
wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
@@ -931,6 +945,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ wa_verify(wa,
+ intel_uncore_read_fw(uncore, wa->reg),
+ wal->name, "applictation");
}
intel_uncore_forcewake_put__locked(uncore, fw);
@@ -942,20 +960,6 @@ void intel_gt_apply_workarounds(struct drm_i915_private *i915)
wa_list_apply(&i915->uncore, &i915->gt_wa_list);
}
-static bool
-wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
-{
- if ((cur ^ wa->val) & wa->mask) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
- name, from, i915_mmio_reg_offset(wa->reg), cur,
- cur & wa->mask, wa->val, wa->mask);
-
- return false;
- }
-
- return true;
-}
-
static bool wa_list_verify(struct intel_uncore *uncore,
const struct i915_wa_list *wal,
const char *from)
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 2/4] drm/i915: Verify the engine workarounds stick on application
2019-04-15 16:00 [PATCH 1/4] drm/i915: Verify workarounds immediately after application Chris Wilson
@ 2019-04-15 16:00 ` Chris Wilson
2019-04-15 16:00 ` [PATCH 3/4] drm/i915: Make workaround verification *optional* Chris Wilson
` (3 subsequent siblings)
4 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2019-04-15 16:00 UTC (permalink / raw)
To: intel-gfx
Read the engine workarounds back using the GPU after loading the initial
context state to verify that we are setting them correctly, and bail if
it fails.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 6 +
drivers/gpu/drm/i915/intel_workarounds.c | 120 ++++++++++++++++++
drivers/gpu/drm/i915/intel_workarounds.h | 2 +
.../drm/i915/selftests/intel_workarounds.c | 53 +-------
4 files changed, 134 insertions(+), 47 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0a818a60ad31..95ae69753e91 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4717,6 +4717,12 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
i915_request_add(rq);
if (err)
goto err_active;
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) &&
+ intel_engine_verify_workarounds(engine, "load")) {
+ err = -EIO;
+ goto err_active;
+ }
}
/* Flush the default context image to memory, and enable powersaving. */
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 1c54b5030807..db99f2e676bb 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -1259,6 +1259,126 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list);
}
+static struct i915_vma *
+create_scratch(struct i915_address_space *vm, int count)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned int size;
+ int err;
+
+ size = round_up(count * 4, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0,
+ i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int
+wa_list_srm(struct i915_request *rq,
+ const struct i915_wa_list *wal,
+ struct i915_vma *vma)
+{
+ const struct i915_wa *wa;
+ u32 srm, *cs;
+ int i;
+
+ srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ if (INTEL_GEN(rq->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * wal->count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
+ *cs++ = 0;
+ }
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int engine_wa_list_verify(struct intel_engine_cs *engine,
+ const struct i915_wa_list * const wal,
+ const char *from)
+{
+ const struct i915_wa *wa;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ unsigned int i;
+ u32 *results;
+ int err;
+
+ if (!wal->count)
+ return 0;
+
+ vma = create_scratch(&engine->i915->ggtt.vm, wal->count);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ rq = i915_request_alloc(engine, engine->kernel_context->gem_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_vma;
+ }
+
+ err = wa_list_srm(rq, wal, vma);
+ if (err)
+ goto err_vma;
+
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ err = -ETIME;
+ goto err_vma;
+ }
+
+ results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto err_vma;
+ }
+
+ err = 0;
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ if (!wa_verify(wa, results[i], wal->name, from))
+ err = -ENXIO;
+
+ i915_gem_object_unpin_map(vma->obj);
+
+err_vma:
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+ return err;
+}
+
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return engine_wa_list_verify(engine, &engine->wa_list, from);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 34eee5ec511e..fdf7ebb90f28 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -30,5 +30,7 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
void intel_engine_init_workarounds(struct intel_engine_cs *engine);
void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 567b6f8dae86..a363748a7a4f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -340,49 +340,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
return err;
}
-static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- goto err_obj;
- }
- memset(ptr, 0xc5, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err_obj;
-
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- if (err)
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
static struct i915_vma *create_batch(struct i915_gem_context *ctx)
{
struct drm_i915_gem_object *obj;
@@ -475,7 +432,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
int err = 0, i, v;
u32 *cs, *results;
- scratch = create_scratch(ctx);
+ scratch = create_scratch(&ctx->ppgtt->vm, 2 * ARRAY_SIZE(values) + 1);
if (IS_ERR(scratch))
return PTR_ERR(scratch);
@@ -752,9 +709,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
- for_each_engine(engine, i915, id)
- ok &= wa_list_verify(engine->uncore,
- &lists->engine[id].wa_list, str);
+ for_each_engine(engine, i915, id) {
+ ok &= engine_wa_list_verify(engine,
+ &lists->engine[id].wa_list,
+ str) == 0;
+ }
return ok;
}
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 3/4] drm/i915: Make workaround verification *optional*
2019-04-15 16:00 [PATCH 1/4] drm/i915: Verify workarounds immediately after application Chris Wilson
2019-04-15 16:00 ` [PATCH 2/4] drm/i915: Verify the engine workarounds stick on application Chris Wilson
@ 2019-04-15 16:00 ` Chris Wilson
2019-04-15 16:00 ` [PATCH 4/4] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
` (2 subsequent siblings)
4 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2019-04-15 16:00 UTC (permalink / raw)
To: intel-gfx
Sometimes the HW doesn't even play fair, and completely forgets about
register writes. Skip verifying known troublemakers.
References: https://bugs.freedesktop.org/show_bug.cgi?id=108954
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/intel_workarounds.c | 40 ++++++++++++++-----
.../gpu/drm/i915/intel_workarounds_types.h | 7 ++--
2 files changed, 33 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index db99f2e676bb..ba58be05f58c 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -122,6 +122,7 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
wal->wa_count++;
wa_->val |= wa->val;
wa_->mask |= wa->mask;
+ wa_->read |= wa->read;
return;
}
}
@@ -146,9 +147,10 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
u32 val)
{
struct i915_wa wa = {
- .reg = reg,
+ .reg = reg,
.mask = mask,
- .val = val
+ .val = val,
+ .read = mask,
};
_wa_add(wal, &wa);
@@ -172,6 +174,19 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
+static void
+ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val,
+ /* Bonkers HW, skip verifying */
+ };
+
+ _wa_add(wal, &wa);
+}
+
#define WA_SET_BIT_MASKED(addr, mask) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
@@ -916,10 +931,11 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
- if ((cur ^ wa->val) & wa->mask) {
+ if ((cur ^ wa->val) & wa->read) {
DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
- name, from, i915_mmio_reg_offset(wa->reg), cur,
- cur & wa->mask, wa->val, wa->mask);
+ name, from, i915_mmio_reg_offset(wa->reg),
+ cur, cur & wa->read,
+ wa->val, wa->mask);
return false;
}
@@ -1122,9 +1138,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
- wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN8_LQSC_FLUSH_COHERENT_LINES);
+ ignore_wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
@@ -1151,9 +1168,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
- wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ ignore_wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h
index 30918da180ff..42ac1fb99572 100644
--- a/drivers/gpu/drm/i915/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/intel_workarounds_types.h
@@ -12,9 +12,10 @@
#include "i915_reg.h"
struct i915_wa {
- i915_reg_t reg;
- u32 mask;
- u32 val;
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+ u32 read;
};
struct i915_wa_list {
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH 4/4] drm/i915/selftests: Verify whitelist of context registers
2019-04-15 16:00 [PATCH 1/4] drm/i915: Verify workarounds immediately after application Chris Wilson
2019-04-15 16:00 ` [PATCH 2/4] drm/i915: Verify the engine workarounds stick on application Chris Wilson
2019-04-15 16:00 ` [PATCH 3/4] drm/i915: Make workaround verification *optional* Chris Wilson
@ 2019-04-15 16:00 ` Chris Wilson
2019-04-15 17:41 ` [PATCH] " Chris Wilson
2019-04-15 16:59 ` ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application Patchwork
2019-04-15 18:33 ` ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application (rev2) Patchwork
4 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2019-04-15 16:00 UTC (permalink / raw)
To: intel-gfx
The RING_NONPRIV allows us to add registers to a whitelist that allows
userspace to modify them. Ideally such registers should be safe and
saved within the context such that they do not impact system behaviour
for other users. This selftest verifies that those registers we do add
are (a) then writable by userspace and (b) only affect a single client.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
.../drm/i915/selftests/intel_workarounds.c | 290 ++++++++++++++++++
1 file changed, 290 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index a363748a7a4f..e9005bbbb6c0 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -700,6 +700,295 @@ static int live_reset_whitelist(void *arg)
return err;
}
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_vma *results)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ u32 srm, *cs;
+ int err, i;
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err;
+ u32 *cs;
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+ for (i = 0; i < engine->whitelist.count; i++) {
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ goto err_unpin;
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+static bool ignore_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists */
+ static const struct {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+ } ignore[] = {
+ { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+ };
+ u32 offset = i915_mmio_reg_offset(reg);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ignore); i++) {
+ if (INTEL_INFO(i915)->gen_mask & ignore[i].gen_mask &&
+ i915_mmio_reg_offset(ignore[i].reg) == offset)
+ return true;
+ }
+
+ return false;
+}
+
+static int eq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] != b[i] &&
+ !ignore_reg(engine->i915, engine->whitelist.list[i].reg)) {
+ pr_err("[%d] Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg),
+ a[i], b[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int neq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] == b[i]) {
+ pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct {
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch[2];
+ } client[2] = {};
+ enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_gem_context *c;
+
+ c = kernel_context(i915);
+ if (IS_ERR(c))
+ goto err;
+
+ client[i].scratch[0] =
+ create_scratch(&c->ppgtt->vm, engine->whitelist.count);
+ if (IS_ERR(client[i].scratch[0])) {
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].scratch[1] =
+ create_scratch(&c->ppgtt->vm, engine->whitelist.count);
+ if (IS_ERR(client[i].scratch[1])) {
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].ctx = c;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!engine->whitelist.count)
+ continue;
+
+ /* Read default values */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(client[0].ctx, engine);
+ if (err)
+ goto err;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(client[1].ctx, engine,
+ client[1].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Verify that both reads return the same default values */
+ err = eq_whitelisted_registers(client[0].scratch[0],
+ client[1].scratch[0],
+ engine);
+ if (err)
+ goto err;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[1]);
+ if (err)
+ goto err;
+
+ /* User should be granted privilege to overwhite regs */
+ err = neq_whitelisted_registers(client[0].scratch[0],
+ client[1].scratch[1],
+ engine);
+ if (err)
+ goto err;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i].ctx)
+ break;
+
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(client[i].ctx);
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
static bool verify_gt_engine_wa(struct drm_i915_private *i915,
struct wa_lists *lists, const char *str)
{
@@ -844,6 +1133,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_isolated_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-15 16:00 ` [PATCH 4/4] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
@ 2019-04-15 17:41 ` Chris Wilson
2019-04-16 14:49 ` [Intel-gfx] " Dan Carpenter
0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2019-04-15 17:41 UTC (permalink / raw)
To: intel-gfx
The RING_NONPRIV allows us to add registers to a whitelist that allows
userspace to modify them. Ideally such registers should be safe and
saved within the context such that they do not impact system behaviour
for other users. This selftest verifies that those registers we do add
are (a) then writable by userspace and (b) only affect a single client.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
.../drm/i915/selftests/intel_workarounds.c | 289 ++++++++++++++++++
1 file changed, 289 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index a363748a7a4f..d71b5b062d6f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -700,6 +700,294 @@ static int live_reset_whitelist(void *arg)
return err;
}
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_vma *results)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ u32 srm, *cs;
+ int err, i;
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err;
+ u32 *cs;
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+ for (i = 0; i < engine->whitelist.count; i++) {
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = STACK_MAGIC;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ goto err_unpin;
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+static bool ignore_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists */
+ static const struct {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+ } ignore[] = {
+ { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+ { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+ };
+ u32 offset = i915_mmio_reg_offset(reg);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ignore); i++) {
+ if (INTEL_INFO(i915)->gen_mask & ignore[i].gen_mask &&
+ i915_mmio_reg_offset(ignore[i].reg) == offset)
+ return true;
+ }
+
+ return false;
+}
+
+static int eq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] != b[i] &&
+ !ignore_reg(engine->i915, engine->whitelist.list[i].reg)) {
+ pr_err("[%d] Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg),
+ a[i], b[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int neq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] == b[i]) {
+ pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct {
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch[2];
+ } client[2] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_gem_context *c;
+
+ c = kernel_context(i915);
+ if (IS_ERR(c))
+ goto err;
+
+ client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[0])) {
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[1])) {
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].ctx = c;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!engine->whitelist.count)
+ continue;
+
+ /* Read default values */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(client[0].ctx, engine);
+ if (err)
+ goto err;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(client[1].ctx, engine,
+ client[1].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Verify that both reads return the same default values */
+ err = eq_whitelisted_registers(client[0].scratch[0],
+ client[1].scratch[0],
+ engine);
+ if (err)
+ goto err;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[1]);
+ if (err)
+ goto err;
+
+ /* User should be granted privilege to overwhite regs */
+ err = neq_whitelisted_registers(client[0].scratch[0],
+ client[1].scratch[1],
+ engine);
+ if (err)
+ goto err;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i].ctx)
+ break;
+
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(client[i].ctx);
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
static bool verify_gt_engine_wa(struct drm_i915_private *i915,
struct wa_lists *lists, const char *str)
{
@@ -844,6 +1132,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_isolated_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-15 17:41 ` [PATCH] " Chris Wilson
@ 2019-04-16 14:49 ` Dan Carpenter
0 siblings, 0 replies; 13+ messages in thread
From: Dan Carpenter @ 2019-04-16 14:49 UTC (permalink / raw)
To: kbuild, Chris Wilson; +Cc: intel-gfx, kbuild-all
Hi Chris,
Thank you for the patch! Perhaps something to improve:
url: https://github.com/0day-ci/linux/commits/Chris-Wilson/drm-i915-selftests-Verify-whitelist-of-context-registers/20190416-105231
base: git://anongit.freedesktop.org/drm-intel for-linux-next
New smatch warnings:
drivers/gpu/drm/i915/selftests/intel_workarounds.c:846 scrub_whitelisted_registers() error: uninitialized symbol 'err'.
# https://github.com/0day-ci/linux/commit/a9ce77a003ecaa54f530f1e9ff81cbae11380380
git remote add linux-review https://github.com/0day-ci/linux
git remote update linux-review
git checkout a9ce77a003ecaa54f530f1e9ff81cbae11380380
vim +/PTR_ERR +759 drivers/gpu/drm/i915/selftests/intel_workarounds.c
a9ce77a0 Chris Wilson 2019-04-15 794 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
a9ce77a0 Chris Wilson 2019-04-15 795 struct intel_engine_cs *engine)
a9ce77a0 Chris Wilson 2019-04-15 796 {
a9ce77a0 Chris Wilson 2019-04-15 797 intel_wakeref_t wakeref;
a9ce77a0 Chris Wilson 2019-04-15 798 struct i915_request *rq;
a9ce77a0 Chris Wilson 2019-04-15 799 struct i915_vma *batch;
a9ce77a0 Chris Wilson 2019-04-15 800 int i, err;
a9ce77a0 Chris Wilson 2019-04-15 801 u32 *cs;
a9ce77a0 Chris Wilson 2019-04-15 802
a9ce77a0 Chris Wilson 2019-04-15 803 batch = create_batch(ctx);
a9ce77a0 Chris Wilson 2019-04-15 804 if (IS_ERR(batch))
a9ce77a0 Chris Wilson 2019-04-15 805 return PTR_ERR(batch);
a9ce77a0 Chris Wilson 2019-04-15 806
a9ce77a0 Chris Wilson 2019-04-15 807 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
a9ce77a0 Chris Wilson 2019-04-15 808 if (IS_ERR(cs)) {
a9ce77a0 Chris Wilson 2019-04-15 809 err = PTR_ERR(cs);
a9ce77a0 Chris Wilson 2019-04-15 810 goto err_batch;
a9ce77a0 Chris Wilson 2019-04-15 811 }
a9ce77a0 Chris Wilson 2019-04-15 812
a9ce77a0 Chris Wilson 2019-04-15 813 *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
a9ce77a0 Chris Wilson 2019-04-15 814 for (i = 0; i < engine->whitelist.count; i++) {
a9ce77a0 Chris Wilson 2019-04-15 815 *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
a9ce77a0 Chris Wilson 2019-04-15 816 *cs++ = STACK_MAGIC;
a9ce77a0 Chris Wilson 2019-04-15 817 }
a9ce77a0 Chris Wilson 2019-04-15 818 *cs++ = MI_BATCH_BUFFER_END;
a9ce77a0 Chris Wilson 2019-04-15 819
a9ce77a0 Chris Wilson 2019-04-15 820 i915_gem_object_flush_map(batch->obj);
a9ce77a0 Chris Wilson 2019-04-15 821 i915_gem_chipset_flush(ctx->i915);
a9ce77a0 Chris Wilson 2019-04-15 822
a9ce77a0 Chris Wilson 2019-04-15 823 rq = ERR_PTR(-ENODEV);
a9ce77a0 Chris Wilson 2019-04-15 824 with_intel_runtime_pm(engine->i915, wakeref)
a9ce77a0 Chris Wilson 2019-04-15 825 rq = i915_request_alloc(engine, ctx);
a9ce77a0 Chris Wilson 2019-04-15 826 if (IS_ERR(rq))
a9ce77a0 Chris Wilson 2019-04-15 827 goto err_unpin;
^^^^^^^^^^^^^^
"err" not set.
a9ce77a0 Chris Wilson 2019-04-15 828
a9ce77a0 Chris Wilson 2019-04-15 829 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
a9ce77a0 Chris Wilson 2019-04-15 830 err = engine->emit_init_breadcrumb(rq);
a9ce77a0 Chris Wilson 2019-04-15 831 if (err)
a9ce77a0 Chris Wilson 2019-04-15 832 goto err_request;
a9ce77a0 Chris Wilson 2019-04-15 833 }
a9ce77a0 Chris Wilson 2019-04-15 834
a9ce77a0 Chris Wilson 2019-04-15 835 err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
a9ce77a0 Chris Wilson 2019-04-15 836
a9ce77a0 Chris Wilson 2019-04-15 837 err_request:
a9ce77a0 Chris Wilson 2019-04-15 838 i915_request_add(rq);
a9ce77a0 Chris Wilson 2019-04-15 839 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
a9ce77a0 Chris Wilson 2019-04-15 840 err = -EIO;
a9ce77a0 Chris Wilson 2019-04-15 841
a9ce77a0 Chris Wilson 2019-04-15 842 err_unpin:
a9ce77a0 Chris Wilson 2019-04-15 843 i915_gem_object_unpin_map(batch->obj);
a9ce77a0 Chris Wilson 2019-04-15 844 err_batch:
a9ce77a0 Chris Wilson 2019-04-15 845 i915_vma_unpin_and_release(&batch, 0);
a9ce77a0 Chris Wilson 2019-04-15 @846 return err;
a9ce77a0 Chris Wilson 2019-04-15 847 }
a9ce77a0 Chris Wilson 2019-04-15 848
^ permalink raw reply [flat|nested] 13+ messages in thread
* ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application
2019-04-15 16:00 [PATCH 1/4] drm/i915: Verify workarounds immediately after application Chris Wilson
` (2 preceding siblings ...)
2019-04-15 16:00 ` [PATCH 4/4] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
@ 2019-04-15 16:59 ` Patchwork
2019-04-15 18:33 ` ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application (rev2) Patchwork
4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2019-04-15 16:59 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/4] drm/i915: Verify workarounds immediately after application
URL : https://patchwork.freedesktop.org/series/59513/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_5934 -> Patchwork_12803
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with Patchwork_12803 absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in Patchwork_12803, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://patchwork.freedesktop.org/api/1.0/series/59513/revisions/1/mbox/
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in Patchwork_12803:
### IGT changes ###
#### Possible regressions ####
* igt@i915_selftest@live_workarounds:
- fi-ivb-3770: PASS -> INCOMPLETE
- fi-ilk-650: PASS -> INCOMPLETE
- fi-bsw-n3050: NOTRUN -> INCOMPLETE
- fi-glk-dsi: PASS -> DMESG-FAIL
- fi-bsw-kefka: PASS -> INCOMPLETE
- fi-blb-e6850: PASS -> INCOMPLETE
- fi-hsw-4770r: PASS -> INCOMPLETE
- fi-cfl-8109u: PASS -> DMESG-FAIL
- fi-kbl-guc: PASS -> DMESG-FAIL
- fi-icl-y: PASS -> DMESG-FAIL
- fi-skl-6600u: PASS -> DMESG-FAIL
- fi-pnv-d510: PASS -> INCOMPLETE
- fi-bdw-5557u: PASS -> INCOMPLETE
- fi-whl-u: PASS -> DMESG-FAIL
- fi-apl-guc: PASS -> DMESG-FAIL
- fi-skl-6700k2: PASS -> DMESG-FAIL
- fi-skl-iommu: PASS -> DMESG-FAIL
- fi-skl-lmem: PASS -> DMESG-FAIL
- fi-skl-6770hq: PASS -> DMESG-FAIL
- fi-kbl-x1275: PASS -> DMESG-FAIL
- fi-icl-u3: PASS -> DMESG-FAIL
- fi-bxt-j4205: PASS -> DMESG-FAIL
- fi-skl-6260u: PASS -> DMESG-FAIL
- fi-kbl-r: PASS -> DMESG-FAIL
- fi-skl-gvtdvm: PASS -> DMESG-FAIL
- fi-kbl-8809g: PASS -> DMESG-FAIL
- fi-hsw-peppy: PASS -> INCOMPLETE
- fi-gdg-551: PASS -> INCOMPLETE
- fi-bwr-2160: PASS -> INCOMPLETE
- fi-skl-guc: PASS -> DMESG-FAIL
- fi-snb-2520m: PASS -> INCOMPLETE
- fi-hsw-4770: PASS -> INCOMPLETE
- fi-bxt-dsi: PASS -> DMESG-FAIL
- fi-cfl-8700k: PASS -> DMESG-FAIL
* igt@runner@aborted:
- fi-ilk-650: NOTRUN -> FAIL
- fi-pnv-d510: NOTRUN -> FAIL
- fi-bdw-gvtdvm: NOTRUN -> FAIL
- fi-hsw-peppy: NOTRUN -> FAIL
- fi-gdg-551: NOTRUN -> FAIL
- fi-snb-2520m: NOTRUN -> FAIL
- fi-hsw-4770: NOTRUN -> FAIL
- fi-ivb-3770: NOTRUN -> FAIL
- fi-byt-j1900: NOTRUN -> FAIL
- fi-bsw-n3050: NOTRUN -> FAIL
- fi-blb-e6850: NOTRUN -> FAIL
- fi-bsw-kefka: NOTRUN -> FAIL
- fi-hsw-4770r: NOTRUN -> FAIL
- fi-byt-clapper: NOTRUN -> FAIL
- fi-bdw-5557u: NOTRUN -> FAIL
- fi-bwr-2160: NOTRUN -> FAIL
- fi-byt-n2820: NOTRUN -> FAIL
- fi-elk-e7500: NOTRUN -> FAIL
#### Suppressed ####
The following results come from untrusted machines, tests, or statuses.
They do not affect the overall result.
* igt@i915_selftest@live_workarounds:
- {fi-kbl-7567u}: PASS -> DMESG-FAIL
Known issues
------------
Here are the changes found in Patchwork_12803 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@i915_selftest@live_execlists:
- fi-apl-guc: PASS -> INCOMPLETE [fdo#103927] / [fdo#109720]
* igt@i915_selftest@live_workarounds:
- fi-byt-clapper: PASS -> INCOMPLETE [fdo#102657]
- fi-byt-n2820: PASS -> INCOMPLETE [fdo#102657]
- fi-elk-e7500: PASS -> INCOMPLETE [fdo#103989]
- fi-byt-j1900: PASS -> INCOMPLETE [fdo#102657]
- fi-bdw-gvtdvm: PASS -> INCOMPLETE [fdo#105600]
* igt@kms_busy@basic-flip-a:
- fi-bsw-n3050: NOTRUN -> SKIP [fdo#109271] / [fdo#109278] +1
* igt@kms_chamelium@hdmi-crc-fast:
- fi-bsw-n3050: NOTRUN -> SKIP [fdo#109271] +57
#### Possible fixes ####
* igt@i915_selftest@live_hangcheck:
- fi-skl-iommu: INCOMPLETE [fdo#108602] / [fdo#108744] -> PASS
* igt@kms_pipe_crc_basic@nonblocking-crc-pipe-a:
- fi-byt-clapper: FAIL [fdo#103191] -> PASS
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo#102657]: https://bugs.freedesktop.org/show_bug.cgi?id=102657
[fdo#103191]: https://bugs.freedesktop.org/show_bug.cgi?id=103191
[fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
[fdo#103989]: https://bugs.freedesktop.org/show_bug.cgi?id=103989
[fdo#105600]: https://bugs.freedesktop.org/show_bug.cgi?id=105600
[fdo#108602]: https://bugs.freedesktop.org/show_bug.cgi?id=108602
[fdo#108744]: https://bugs.freedesktop.org/show_bug.cgi?id=108744
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
[fdo#109720]: https://bugs.freedesktop.org/show_bug.cgi?id=109720
Participating hosts (50 -> 43)
------------------------------
Additional (1): fi-bsw-n3050
Missing (8): fi-kbl-soraka fi-ilk-m540 fi-byt-squawks fi-icl-u2 fi-bsw-cyan fi-ctg-p8600 fi-bdw-samus fi-snb-2600
Build changes
-------------
* Linux: CI_DRM_5934 -> Patchwork_12803
CI_DRM_5934: cc5334c0e706ec423c5f1a139cf3da7bd3287db6 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_4946: 56bdc68638cec64c6b02cd6b220b52b76059b51a @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_12803: 5a6538141f6ac227310bdc7b40cf5c86917d1e01 @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
5a6538141f6a drm/i915/selftests: Verify whitelist of context registers
6fcd1d1a81c3 drm/i915: Make workaround verification *optional*
87d8f277fb3a drm/i915: Verify the engine workarounds stick on application
b793a165c3a7 drm/i915: Verify workarounds immediately after application
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_12803/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application (rev2)
2019-04-15 16:00 [PATCH 1/4] drm/i915: Verify workarounds immediately after application Chris Wilson
` (3 preceding siblings ...)
2019-04-15 16:59 ` ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application Patchwork
@ 2019-04-15 18:33 ` Patchwork
4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2019-04-15 18:33 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/4] drm/i915: Verify workarounds immediately after application (rev2)
URL : https://patchwork.freedesktop.org/series/59513/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_5934 -> Patchwork_12805
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with Patchwork_12805 absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in Patchwork_12805, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://patchwork.freedesktop.org/api/1.0/series/59513/revisions/2/mbox/
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in Patchwork_12805:
### IGT changes ###
#### Possible regressions ####
* igt@i915_selftest@live_workarounds:
- fi-ilk-650: PASS -> INCOMPLETE
- fi-cfl-guc: PASS -> DMESG-FAIL
- fi-kbl-7500u: NOTRUN -> DMESG-FAIL
- fi-cfl-8109u: PASS -> DMESG-FAIL
- fi-kbl-guc: PASS -> DMESG-FAIL
- fi-icl-y: PASS -> DMESG-FAIL
- fi-skl-6600u: PASS -> DMESG-FAIL
- fi-pnv-d510: PASS -> INCOMPLETE
- fi-whl-u: PASS -> DMESG-FAIL
- fi-apl-guc: PASS -> DMESG-FAIL
- fi-skl-6700k2: PASS -> DMESG-FAIL
- fi-skl-6770hq: PASS -> DMESG-FAIL
- fi-icl-u3: PASS -> DMESG-FAIL
- fi-bxt-j4205: PASS -> DMESG-FAIL
- fi-skl-6260u: PASS -> DMESG-FAIL
- fi-kbl-r: PASS -> DMESG-FAIL
- fi-kbl-8809g: PASS -> DMESG-FAIL
- fi-gdg-551: PASS -> INCOMPLETE
- fi-bwr-2160: PASS -> INCOMPLETE
- fi-skl-guc: PASS -> DMESG-FAIL
- fi-snb-2520m: PASS -> INCOMPLETE
- fi-cfl-8700k: PASS -> DMESG-FAIL
* igt@runner@aborted:
- fi-ilk-650: NOTRUN -> FAIL
- fi-pnv-d510: NOTRUN -> FAIL
- fi-gdg-551: NOTRUN -> FAIL
- fi-snb-2520m: NOTRUN -> FAIL
- fi-bwr-2160: NOTRUN -> FAIL
- fi-elk-e7500: NOTRUN -> FAIL
#### Suppressed ####
The following results come from untrusted machines, tests, or statuses.
They do not affect the overall result.
* igt@i915_selftest@live_workarounds:
- {fi-kbl-7567u}: PASS -> DMESG-FAIL
Known issues
------------
Here are the changes found in Patchwork_12805 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@amdgpu/amd_basic@semaphore:
- fi-kbl-7500u: NOTRUN -> SKIP [fdo#109271] +28
* igt@gem_exec_suspend@basic-s3:
- fi-blb-e6850: PASS -> INCOMPLETE [fdo#107718]
* igt@i915_selftest@live_execlists:
- fi-apl-guc: PASS -> INCOMPLETE [fdo#103927] / [fdo#109720]
* igt@i915_selftest@live_workarounds:
- fi-elk-e7500: PASS -> INCOMPLETE [fdo#103989]
* igt@kms_busy@basic-flip-a:
- fi-bsw-n3050: NOTRUN -> SKIP [fdo#109271] / [fdo#109278] +1
* igt@kms_chamelium@common-hpd-after-suspend:
- fi-kbl-7500u: NOTRUN -> DMESG-WARN [fdo#102505] / [fdo#103558] / [fdo#105079] / [fdo#105602]
* igt@kms_chamelium@hdmi-crc-fast:
- fi-bsw-n3050: NOTRUN -> SKIP [fdo#109271] +57
* igt@kms_cursor_legacy@basic-flip-after-cursor-varying-size:
- fi-glk-dsi: PASS -> INCOMPLETE [fdo#103359] / [k.org#198133]
* igt@runner@aborted:
- fi-apl-guc: NOTRUN -> FAIL [fdo#108622] / [fdo#109720]
#### Possible fixes ####
* igt@kms_chamelium@dp-crc-fast:
- fi-kbl-7500u: DMESG-WARN [fdo#103841] -> PASS
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo#102505]: https://bugs.freedesktop.org/show_bug.cgi?id=102505
[fdo#103359]: https://bugs.freedesktop.org/show_bug.cgi?id=103359
[fdo#103558]: https://bugs.freedesktop.org/show_bug.cgi?id=103558
[fdo#103841]: https://bugs.freedesktop.org/show_bug.cgi?id=103841
[fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
[fdo#103989]: https://bugs.freedesktop.org/show_bug.cgi?id=103989
[fdo#105079]: https://bugs.freedesktop.org/show_bug.cgi?id=105079
[fdo#105602]: https://bugs.freedesktop.org/show_bug.cgi?id=105602
[fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
[fdo#108622]: https://bugs.freedesktop.org/show_bug.cgi?id=108622
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
[fdo#109720]: https://bugs.freedesktop.org/show_bug.cgi?id=109720
[k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133
Participating hosts (50 -> 41)
------------------------------
Additional (1): fi-bsw-n3050
Missing (10): fi-kbl-soraka fi-ilk-m540 fi-hsw-peppy fi-byt-squawks fi-icl-u2 fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus fi-snb-2600
Build changes
-------------
* Linux: CI_DRM_5934 -> Patchwork_12805
CI_DRM_5934: cc5334c0e706ec423c5f1a139cf3da7bd3287db6 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_4946: 56bdc68638cec64c6b02cd6b220b52b76059b51a @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_12805: 5eb3a2fcb9309f2e39beed58c9ab54ecbbef51bf @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
5eb3a2fcb930 drm/i915/selftests: Verify whitelist of context registers
59347f637f28 drm/i915: Make workaround verification *optional*
2e134a724252 drm/i915: Verify the engine workarounds stick on application
44b8ff3b57c7 drm/i915: Verify workarounds immediately after application
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_12805/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH v2 4/4] drm/i915/selftests: Verify whitelist of context registers
@ 2019-04-16 8:10 Chris Wilson
2019-04-16 9:12 ` [PATCH] " Chris Wilson
0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2019-04-16 8:10 UTC (permalink / raw)
To: intel-gfx
The RING_NONPRIV allows us to add registers to a whitelist that allows
userspace to modify them. Ideally such registers should be safe and
saved within the context such that they do not impact system behaviour
for other users. This selftest verifies that those registers we do add
are (a) then writable by userspace and (b) only affect a single client.
Opens:
- Is GEN9_SLICE_COMMON_ECO_CHICKEN1 really write-only?
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
.../drm/i915/selftests/intel_workarounds.c | 320 ++++++++++++++++++
1 file changed, 320 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index a363748a7a4f..56799f3538f9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -700,6 +700,325 @@ static int live_reset_whitelist(void *arg)
return err;
}
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_vma *results)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ u32 srm, *cs;
+ int err, i;
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err;
+ u32 *cs;
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+ for (i = 0; i < engine->whitelist.count; i++) {
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = 0xffffffff;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ goto err_unpin;
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists */
+ static const struct {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+ } pardon[] = {
+ { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+ { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+ };
+ u32 offset = i915_mmio_reg_offset(reg);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pardon); i++) {
+ if (INTEL_INFO(i915)->gen_mask & pardon[i].gen_mask &&
+ i915_mmio_reg_offset(pardon[i].reg) == offset)
+ return true;
+ }
+
+ return false;
+}
+
+static int eq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] != b[i] &&
+ !pardon_reg(engine->i915, engine->whitelist.list[i].reg)) {
+ pr_err("[%d] Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg),
+ a[i], b[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ static const struct {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+ } wo[] = {
+ { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
+ };
+ u32 offset = i915_mmio_reg_offset(reg);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wo); i++) {
+ if (INTEL_INFO(i915)->gen_mask & wo[i].gen_mask &&
+ i915_mmio_reg_offset(wo[i].reg) == offset)
+ return true;
+ }
+
+ return false;
+}
+
+static int neq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] == b[i] &&
+ !writeonly_reg(engine->i915, engine->whitelist.list[i])) {
+ pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct {
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch[2];
+ } client[2] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ if (!intel_engines_has_context_isolation(i915))
+ return 0;
+
+ if (!i915->kernel_context->ppgtt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_gem_context *c;
+
+ c = kernel_context(i915);
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto err;
+ }
+
+ client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[0])) {
+ err = PTR_ERR(client[i].scratch[0]);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[1])) {
+ err = PTR_ERR(client[i].scratch[1]);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].ctx = c;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!engine->whitelist.count)
+ continue;
+
+ /* Read default values */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(client[0].ctx, engine);
+ if (err)
+ goto err;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(client[1].ctx, engine,
+ client[1].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Verify that both reads return the same default values */
+ err = eq_whitelisted_registers(client[0].scratch[0],
+ client[1].scratch[0],
+ engine);
+ if (err)
+ goto err;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[1]);
+ if (err)
+ goto err;
+
+ /* User should be granted privilege to overwhite regs */
+ err = neq_whitelisted_registers(client[0].scratch[0],
+ client[0].scratch[1],
+ engine);
+ if (err)
+ goto err;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i].ctx)
+ break;
+
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(client[i].ctx);
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
static bool verify_gt_engine_wa(struct drm_i915_private *i915,
struct wa_lists *lists, const char *str)
{
@@ -844,6 +1163,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_isolated_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-16 8:10 [PATCH v2 4/4] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
@ 2019-04-16 9:12 ` Chris Wilson
2019-04-16 14:50 ` Tvrtko Ursulin
0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2019-04-16 9:12 UTC (permalink / raw)
To: intel-gfx
The RING_NONPRIV allows us to add registers to a whitelist that allows
userspace to modify them. Ideally such registers should be safe and
saved within the context such that they do not impact system behaviour
for other users. This selftest verifies that those registers we do add
are (a) then writable by userspace and (b) only affect a single client.
Opens:
- Is GEN9_SLICE_COMMON_ECO_CHICKEN1 really write-only?
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
Compile fix for writeonly_reg()
---
.../drm/i915/selftests/intel_workarounds.c | 320 ++++++++++++++++++
1 file changed, 320 insertions(+)
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index a363748a7a4f..f40e0937ec96 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -700,6 +700,325 @@ static int live_reset_whitelist(void *arg)
return err;
}
+static int read_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct i915_vma *results)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ u32 srm, *cs;
+ int err, i;
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_req;
+
+ srm = MI_STORE_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ srm++;
+
+ cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_req;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u64 offset = results->node.start + sizeof(u32) * i;
+
+ *cs++ = srm;
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ }
+ intel_ring_advance(rq, cs);
+
+err_req:
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+ return err;
+}
+
+static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ intel_wakeref_t wakeref;
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ int i, err;
+ u32 *cs;
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+
+ *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
+ for (i = 0; i < engine->whitelist.count; i++) {
+ *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ *cs++ = 0xffffffff;
+ }
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ goto err_unpin;
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+
+err_request:
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
+ err = -EIO;
+
+err_unpin:
+ i915_gem_object_unpin_map(batch->obj);
+err_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+ return err;
+}
+
+static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ /* Alas, we must pardon some whitelists */
+ static const struct {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+ } pardon[] = {
+ { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
+ { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
+ };
+ u32 offset = i915_mmio_reg_offset(reg);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pardon); i++) {
+ if (INTEL_INFO(i915)->gen_mask & pardon[i].gen_mask &&
+ i915_mmio_reg_offset(pardon[i].reg) == offset)
+ return true;
+ }
+
+ return false;
+}
+
+static int eq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] != b[i] &&
+ !pardon_reg(engine->i915, engine->whitelist.list[i].reg)) {
+ pr_err("[%d] Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg),
+ a[i], b[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ static const struct {
+ i915_reg_t reg;
+ unsigned long gen_mask;
+ } wo[] = {
+ { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
+ };
+ u32 offset = i915_mmio_reg_offset(reg);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wo); i++) {
+ if (INTEL_INFO(i915)->gen_mask & wo[i].gen_mask &&
+ i915_mmio_reg_offset(wo[i].reg) == offset)
+ return true;
+ }
+
+ return false;
+}
+
+static int neq_whitelisted_registers(struct i915_vma *A,
+ struct i915_vma *B,
+ struct intel_engine_cs *engine)
+{
+ u32 *a, *b;
+ int i, err;
+
+ a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto err_a;
+ }
+
+ err = 0;
+ for (i = 0; i < engine->whitelist.count; i++) {
+ if (a[i] == b[i] &&
+ !writeonly_reg(engine->i915, engine->whitelist.list[i].reg)) {
+ pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
+ i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
+ err = -EINVAL;
+ }
+ }
+
+ i915_gem_object_unpin_map(B->obj);
+err_a:
+ i915_gem_object_unpin_map(A->obj);
+ return err;
+}
+
+static int live_isolated_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct {
+ struct i915_gem_context *ctx;
+ struct i915_vma *scratch[2];
+ } client[2] = {};
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i, err = 0;
+
+ /*
+ * Check that a write into a whitelist register works, but
+ * invisible to a second context.
+ */
+
+ if (!intel_engines_has_context_isolation(i915))
+ return 0;
+
+ if (!i915->kernel_context->ppgtt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct i915_gem_context *c;
+
+ c = kernel_context(i915);
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto err;
+ }
+
+ client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[0])) {
+ err = PTR_ERR(client[i].scratch[0]);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
+ if (IS_ERR(client[i].scratch[1])) {
+ err = PTR_ERR(client[i].scratch[1]);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(c);
+ goto err;
+ }
+
+ client[i].ctx = c;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!engine->whitelist.count)
+ continue;
+
+ /* Read default values */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Try to overwrite registers (should only affect ctx0) */
+ err = scrub_whitelisted_registers(client[0].ctx, engine);
+ if (err)
+ goto err;
+
+ /* Read values from ctx1, we expect these to be defaults */
+ err = read_whitelisted_registers(client[1].ctx, engine,
+ client[1].scratch[0]);
+ if (err)
+ goto err;
+
+ /* Verify that both reads return the same default values */
+ err = eq_whitelisted_registers(client[0].scratch[0],
+ client[1].scratch[0],
+ engine);
+ if (err)
+ goto err;
+
+ /* Read back the updated values in ctx0 */
+ err = read_whitelisted_registers(client[0].ctx, engine,
+ client[0].scratch[1]);
+ if (err)
+ goto err;
+
+ /* User should be granted privilege to overwhite regs */
+ err = neq_whitelisted_registers(client[0].scratch[0],
+ client[0].scratch[1],
+ engine);
+ if (err)
+ goto err;
+ }
+
+err:
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (!client[i].ctx)
+ break;
+
+ i915_vma_unpin_and_release(&client[i].scratch[1], 0);
+ i915_vma_unpin_and_release(&client[i].scratch[0], 0);
+ kernel_context_close(client[i].ctx);
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ return err;
+}
+
static bool verify_gt_engine_wa(struct drm_i915_private *i915,
struct wa_lists *lists, const char *str)
{
@@ -844,6 +1163,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_isolated_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-16 9:12 ` [PATCH] " Chris Wilson
@ 2019-04-16 14:50 ` Tvrtko Ursulin
2019-04-24 10:00 ` Chris Wilson
0 siblings, 1 reply; 13+ messages in thread
From: Tvrtko Ursulin @ 2019-04-16 14:50 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
On 16/04/2019 10:12, Chris Wilson wrote:
> The RING_NONPRIV allows us to add registers to a whitelist that allows
> userspace to modify them. Ideally such registers should be safe and
> saved within the context such that they do not impact system behaviour
> for other users. This selftest verifies that those registers we do add
> are (a) then writable by userspace and (b) only affect a single client.
>
> Opens:
> - Is GEN9_SLICE_COMMON_ECO_CHICKEN1 really write-only?
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> Compile fix for writeonly_reg()
> ---
> .../drm/i915/selftests/intel_workarounds.c | 320 ++++++++++++++++++
> 1 file changed, 320 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> index a363748a7a4f..f40e0937ec96 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> @@ -700,6 +700,325 @@ static int live_reset_whitelist(void *arg)
> return err;
> }
>
> +static int read_whitelisted_registers(struct i915_gem_context *ctx,
> + struct intel_engine_cs *engine,
> + struct i915_vma *results)
> +{
> + intel_wakeref_t wakeref;
> + struct i915_request *rq;
> + u32 srm, *cs;
> + int err, i;
> +
> + rq = ERR_PTR(-ENODEV);
> + with_intel_runtime_pm(engine->i915, wakeref)
> + rq = i915_request_alloc(engine, ctx);
> + if (IS_ERR(rq))
> + return PTR_ERR(rq);
> +
> + err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
> + if (err)
> + goto err_req;
Why do you track the vma? There's a wait below and in scrub you even
flush manually.
> +
> + srm = MI_STORE_REGISTER_MEM;
> + if (INTEL_GEN(ctx->i915) >= 8)
> + srm++;
> +
> + cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
> + if (IS_ERR(cs)) {
> + err = PTR_ERR(cs);
> + goto err_req;
> + }
> +
> + for (i = 0; i < engine->whitelist.count; i++) {
> + u64 offset = results->node.start + sizeof(u32) * i;
> +
> + *cs++ = srm;
> + *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
> + *cs++ = lower_32_bits(offset);
> + *cs++ = upper_32_bits(offset);
> + }
> + intel_ring_advance(rq, cs);
> +
> +err_req:
> + i915_request_add(rq);
> +
> + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
> + err = -EIO;
> +
> + return err;
> +}
> +
> +static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
> + struct intel_engine_cs *engine)
> +{
> + intel_wakeref_t wakeref;
> + struct i915_request *rq;
> + struct i915_vma *batch;
> + int i, err;
> + u32 *cs;
> +
> + batch = create_batch(ctx);
> + if (IS_ERR(batch))
> + return PTR_ERR(batch);
> +
> + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
> + if (IS_ERR(cs)) {
> + err = PTR_ERR(cs);
> + goto err_batch;
> + }
> +
> + *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
> + for (i = 0; i < engine->whitelist.count; i++) {
> + *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
> + *cs++ = 0xffffffff;
> + }
> + *cs++ = MI_BATCH_BUFFER_END;
> +
> + i915_gem_object_flush_map(batch->obj);
> + i915_gem_chipset_flush(ctx->i915);
> +
> + rq = ERR_PTR(-ENODEV);
> + with_intel_runtime_pm(engine->i915, wakeref)
> + rq = i915_request_alloc(engine, ctx);
> + if (IS_ERR(rq))
> + goto err_unpin;
> +
> + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
> + err = engine->emit_init_breadcrumb(rq);
> + if (err)
> + goto err_request;
> + }
> +
> + err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
Why is batch needed here when everything else is happy to run from the ring?
> +
> +err_request:
> + i915_request_add(rq);
> + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
> + err = -EIO;
> +
> +err_unpin:
> + i915_gem_object_unpin_map(batch->obj);
> +err_batch:
> + i915_vma_unpin_and_release(&batch, 0);
> + return err;
> +}
> +
> +static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
> +{
> + /* Alas, we must pardon some whitelists */
> + static const struct {
> + i915_reg_t reg;
> + unsigned long gen_mask;
> + } pardon[] = {
> + { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
> + { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
> + };
> + u32 offset = i915_mmio_reg_offset(reg);
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(pardon); i++) {
> + if (INTEL_INFO(i915)->gen_mask & pardon[i].gen_mask &&
> + i915_mmio_reg_offset(pardon[i].reg) == offset)
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static int eq_whitelisted_registers(struct i915_vma *A,
> + struct i915_vma *B,
> + struct intel_engine_cs *engine)
> +{
> + u32 *a, *b;
> + int i, err;
> +
> + a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
> + if (IS_ERR(a))
> + return PTR_ERR(a);
> +
> + b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
> + if (IS_ERR(b)) {
> + err = PTR_ERR(b);
> + goto err_a;
> + }
> +
> + err = 0;
> + for (i = 0; i < engine->whitelist.count; i++) {
> + if (a[i] != b[i] &&
> + !pardon_reg(engine->i915, engine->whitelist.list[i].reg)) {
> + pr_err("[%d] Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
> + i, i915_mmio_reg_offset(engine->whitelist.list[i].reg),
> + a[i], b[i]);
> + err = -EINVAL;
> + }
> + }
> +
> + i915_gem_object_unpin_map(B->obj);
> +err_a:
> + i915_gem_object_unpin_map(A->obj);
> + return err;
> +}
> +
> +static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
> +{
> + static const struct {
> + i915_reg_t reg;
> + unsigned long gen_mask;
> + } wo[] = {
> + { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
> + };
What is the difference between pardoned and whitelisted?
> + u32 offset = i915_mmio_reg_offset(reg);
> + int i;
> +
> + for (i = 0; i < ARRAY_SIZE(wo); i++) {
> + if (INTEL_INFO(i915)->gen_mask & wo[i].gen_mask &&
> + i915_mmio_reg_offset(wo[i].reg) == offset)
> + return true;
> + }
> +
> + return false;
You could consolidate into one find_reg which takes the array.
> +}
> +
> +static int neq_whitelisted_registers(struct i915_vma *A,
> + struct i915_vma *B,
> + struct intel_engine_cs *engine)
> +{
> + u32 *a, *b;
> + int i, err;
> +
> + a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
> + if (IS_ERR(a))
> + return PTR_ERR(a);
> +
> + b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
> + if (IS_ERR(b)) {
> + err = PTR_ERR(b);
> + goto err_a;
> + }
> +
> + err = 0;
> + for (i = 0; i < engine->whitelist.count; i++) {
> + if (a[i] == b[i] &&
> + !writeonly_reg(engine->i915, engine->whitelist.list[i].reg)) {
> + pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
> + i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
> + err = -EINVAL;
> + }
> + }
> +
> + i915_gem_object_unpin_map(B->obj);
> +err_a:
> + i915_gem_object_unpin_map(A->obj);
> + return err;
> +}
eq and neq could also be cheaply consolidated into one taking table and
error message.
> +
> +static int live_isolated_whitelist(void *arg)
> +{
> + struct drm_i915_private *i915 = arg;
> + struct {
> + struct i915_gem_context *ctx;
> + struct i915_vma *scratch[2];
> + } client[2] = {};
> + struct intel_engine_cs *engine;
> + enum intel_engine_id id;
> + int i, err = 0;
> +
> + /*
> + * Check that a write into a whitelist register works, but
> + * invisible to a second context.
> + */
> +
> + if (!intel_engines_has_context_isolation(i915))
> + return 0;
> +
> + if (!i915->kernel_context->ppgtt)
> + return 0;
> +
> + for (i = 0; i < ARRAY_SIZE(client); i++) {
> + struct i915_gem_context *c;
> +
> + c = kernel_context(i915);
> + if (IS_ERR(c)) {
> + err = PTR_ERR(c);
> + goto err;
> + }
> +
> + client[i].scratch[0] = create_scratch(&c->ppgtt->vm, 1024);
> + if (IS_ERR(client[i].scratch[0])) {
> + err = PTR_ERR(client[i].scratch[0]);
> + kernel_context_close(c);
> + goto err;
> + }
> +
> + client[i].scratch[1] = create_scratch(&c->ppgtt->vm, 1024);
> + if (IS_ERR(client[i].scratch[1])) {
> + err = PTR_ERR(client[i].scratch[1]);
> + i915_vma_unpin_and_release(&client[i].scratch[0], 0);
> + kernel_context_close(c);
> + goto err;
> + }
> +
> + client[i].ctx = c;
> + }
> +
> + for_each_engine(engine, i915, id) {
> + if (!engine->whitelist.count)
> + continue;
> +
> + /* Read default values */
> + err = read_whitelisted_registers(client[0].ctx, engine,
> + client[0].scratch[0]);
> + if (err)
> + goto err;
> +
> + /* Try to overwrite registers (should only affect ctx0) */
> + err = scrub_whitelisted_registers(client[0].ctx, engine);
> + if (err)
> + goto err;
> +
> + /* Read values from ctx1, we expect these to be defaults */
> + err = read_whitelisted_registers(client[1].ctx, engine,
> + client[1].scratch[0]);
> + if (err)
> + goto err;
> +
> + /* Verify that both reads return the same default values */
> + err = eq_whitelisted_registers(client[0].scratch[0],
> + client[1].scratch[0],
> + engine);
> + if (err)
> + goto err;
> +
> + /* Read back the updated values in ctx0 */
> + err = read_whitelisted_registers(client[0].ctx, engine,
> + client[0].scratch[1]);
> + if (err)
> + goto err;
> +
> + /* User should be granted privilege to overwhite regs */
> + err = neq_whitelisted_registers(client[0].scratch[0],
> + client[0].scratch[1],
> + engine);
> + if (err)
> + goto err;
> + }
> +
> +err:
> + for (i = 0; i < ARRAY_SIZE(client); i++) {
> + if (!client[i].ctx)
> + break;
> +
> + i915_vma_unpin_and_release(&client[i].scratch[1], 0);
> + i915_vma_unpin_and_release(&client[i].scratch[0], 0);
> + kernel_context_close(client[i].ctx);
> + }
> +
> + if (igt_flush_test(i915, I915_WAIT_LOCKED))
> + err = -EIO;
> +
> + return err;
> +}
> +
> static bool verify_gt_engine_wa(struct drm_i915_private *i915,
> struct wa_lists *lists, const char *str)
> {
> @@ -844,6 +1163,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
> static const struct i915_subtest tests[] = {
> SUBTEST(live_dirty_whitelist),
> SUBTEST(live_reset_whitelist),
> + SUBTEST(live_isolated_whitelist),
> SUBTEST(live_gpu_reset_gt_engine_workarounds),
> SUBTEST(live_engine_reset_gt_engine_workarounds),
> };
>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-16 14:50 ` Tvrtko Ursulin
@ 2019-04-24 10:00 ` Chris Wilson
2019-04-24 11:03 ` Chris Wilson
0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2019-04-24 10:00 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx
Quoting Tvrtko Ursulin (2019-04-16 15:50:27)
>
> On 16/04/2019 10:12, Chris Wilson wrote:
> > The RING_NONPRIV allows us to add registers to a whitelist that allows
> > userspace to modify them. Ideally such registers should be safe and
> > saved within the context such that they do not impact system behaviour
> > for other users. This selftest verifies that those registers we do add
> > are (a) then writable by userspace and (b) only affect a single client.
> >
> > Opens:
> > - Is GEN9_SLICE_COMMON_ECO_CHICKEN1 really write-only?
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> > Compile fix for writeonly_reg()
> > ---
> > .../drm/i915/selftests/intel_workarounds.c | 320 ++++++++++++++++++
> > 1 file changed, 320 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> > index a363748a7a4f..f40e0937ec96 100644
> > --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> > +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
> > @@ -700,6 +700,325 @@ static int live_reset_whitelist(void *arg)
> > return err;
> > }
> >
> > +static int read_whitelisted_registers(struct i915_gem_context *ctx,
> > + struct intel_engine_cs *engine,
> > + struct i915_vma *results)
> > +{
> > + intel_wakeref_t wakeref;
> > + struct i915_request *rq;
> > + u32 srm, *cs;
> > + int err, i;
> > +
> > + rq = ERR_PTR(-ENODEV);
> > + with_intel_runtime_pm(engine->i915, wakeref)
> > + rq = i915_request_alloc(engine, ctx);
> > + if (IS_ERR(rq))
> > + return PTR_ERR(rq);
> > +
> > + err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
> > + if (err)
> > + goto err_req;
>
> Why do you track the vma? There's a wait below and in scrub you even
> flush manually.
copy-and-paste.
> > + srm = MI_STORE_REGISTER_MEM;
> > + if (INTEL_GEN(ctx->i915) >= 8)
> > + srm++;
> > +
> > + cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
> > + if (IS_ERR(cs)) {
> > + err = PTR_ERR(cs);
> > + goto err_req;
> > + }
> > +
> > + for (i = 0; i < engine->whitelist.count; i++) {
> > + u64 offset = results->node.start + sizeof(u32) * i;
> > +
> > + *cs++ = srm;
> > + *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
> > + *cs++ = lower_32_bits(offset);
> > + *cs++ = upper_32_bits(offset);
> > + }
> > + intel_ring_advance(rq, cs);
> > +
> > +err_req:
> > + i915_request_add(rq);
> > +
> > + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
> > + err = -EIO;
> > +
> > + return err;
> > +}
> > +
> > +static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
> > + struct intel_engine_cs *engine)
> > +{
> > + intel_wakeref_t wakeref;
> > + struct i915_request *rq;
> > + struct i915_vma *batch;
> > + int i, err;
> > + u32 *cs;
> > +
> > + batch = create_batch(ctx);
> > + if (IS_ERR(batch))
> > + return PTR_ERR(batch);
> > +
> > + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
> > + if (IS_ERR(cs)) {
> > + err = PTR_ERR(cs);
> > + goto err_batch;
> > + }
> > +
> > + *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
> > + for (i = 0; i < engine->whitelist.count; i++) {
> > + *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
> > + *cs++ = 0xffffffff;
> > + }
> > + *cs++ = MI_BATCH_BUFFER_END;
> > +
> > + i915_gem_object_flush_map(batch->obj);
> > + i915_gem_chipset_flush(ctx->i915);
> > +
> > + rq = ERR_PTR(-ENODEV);
> > + with_intel_runtime_pm(engine->i915, wakeref)
> > + rq = i915_request_alloc(engine, ctx);
> > + if (IS_ERR(rq))
> > + goto err_unpin;
> > +
> > + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
> > + err = engine->emit_init_breadcrumb(rq);
> > + if (err)
> > + goto err_request;
> > + }
> > +
> > + err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
>
> Why is batch needed here when everything else is happy to run from the ring?
Because we are playing with HW registers, I want reset to work safely.
So long as we have init_breadcrumb, the payload scrubbing should work.
Ok.
> > +static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
> > +{
> > + static const struct {
> > + i915_reg_t reg;
> > + unsigned long gen_mask;
> > + } wo[] = {
> > + { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
> > + };
>
> What is the difference between pardoned and whitelisted?
Pardoned is for non-context registers that are already exposed as
RING_NONPRIV ABI. Mistakes already made.
writeonly is for a register that doesn't seem to stick but everyone
believes is a crucial w/a for glk.
> > + u32 offset = i915_mmio_reg_offset(reg);
> > + int i;
> > +
> > + for (i = 0; i < ARRAY_SIZE(wo); i++) {
> > + if (INTEL_INFO(i915)->gen_mask & wo[i].gen_mask &&
> > + i915_mmio_reg_offset(wo[i].reg) == offset)
> > + return true;
> > + }
> > +
> > + return false;
>
> You could consolidate into one find_reg which takes the array.
Next up, you'll be saying we could bsearch by addr! :)
> > +static int neq_whitelisted_registers(struct i915_vma *A,
> > + struct i915_vma *B,
> > + struct intel_engine_cs *engine)
> > +{
> > + u32 *a, *b;
> > + int i, err;
> > +
> > + a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
> > + if (IS_ERR(a))
> > + return PTR_ERR(a);
> > +
> > + b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
> > + if (IS_ERR(b)) {
> > + err = PTR_ERR(b);
> > + goto err_a;
> > + }
> > +
> > + err = 0;
> > + for (i = 0; i < engine->whitelist.count; i++) {
> > + if (a[i] == b[i] &&
> > + !writeonly_reg(engine->i915, engine->whitelist.list[i].reg)) {
> > + pr_err("[%d] Whitelist register 0x%4x:%08x was unwritable\n",
> > + i, i915_mmio_reg_offset(engine->whitelist.list[i].reg), a[i]);
> > + err = -EINVAL;
> > + }
> > + }
> > +
> > + i915_gem_object_unpin_map(B->obj);
> > +err_a:
> > + i915_gem_object_unpin_map(A->obj);
> > + return err;
> > +}
>
> eq and neq could also be cheaply consolidated into one taking table and
> error message.
And op. The preamble is the same, the loop innards are not, but we can
just make a callback.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-24 10:00 ` Chris Wilson
@ 2019-04-24 11:03 ` Chris Wilson
2019-04-24 11:10 ` Tvrtko Ursulin
0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2019-04-24 11:03 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx
Quoting Chris Wilson (2019-04-24 11:00:03)
> Quoting Tvrtko Ursulin (2019-04-16 15:50:27)
> >
> > On 16/04/2019 10:12, Chris Wilson wrote:
> > > + rq = ERR_PTR(-ENODEV);
> > > + with_intel_runtime_pm(engine->i915, wakeref)
> > > + rq = i915_request_alloc(engine, ctx);
> > > + if (IS_ERR(rq))
> > > + goto err_unpin;
> > > +
> > > + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
> > > + err = engine->emit_init_breadcrumb(rq);
> > > + if (err)
> > > + goto err_request;
> > > + }
> > > +
> > > + err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
> >
> > Why is batch needed here when everything else is happy to run from the ring?
>
> Because we are playing with HW registers, I want reset to work safely.
> So long as we have init_breadcrumb, the payload scrubbing should work.
> Ok.
No, I am an idiot. The batch is so that we emulate userspace with
unprivileged execution
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH] drm/i915/selftests: Verify whitelist of context registers
2019-04-24 11:03 ` Chris Wilson
@ 2019-04-24 11:10 ` Tvrtko Ursulin
0 siblings, 0 replies; 13+ messages in thread
From: Tvrtko Ursulin @ 2019-04-24 11:10 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
On 24/04/2019 12:03, Chris Wilson wrote:
> Quoting Chris Wilson (2019-04-24 11:00:03)
>> Quoting Tvrtko Ursulin (2019-04-16 15:50:27)
>>>
>>> On 16/04/2019 10:12, Chris Wilson wrote:
>>>> + rq = ERR_PTR(-ENODEV);
>>>> + with_intel_runtime_pm(engine->i915, wakeref)
>>>> + rq = i915_request_alloc(engine, ctx);
>>>> + if (IS_ERR(rq))
>>>> + goto err_unpin;
>>>> +
>>>> + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
>>>> + err = engine->emit_init_breadcrumb(rq);
>>>> + if (err)
>>>> + goto err_request;
>>>> + }
>>>> +
>>>> + err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
>>>
>>> Why is batch needed here when everything else is happy to run from the ring?
>>
>> Because we are playing with HW registers, I want reset to work safely.
>> So long as we have init_breadcrumb, the payload scrubbing should work.
>> Ok.
>
> No, I am an idiot. The batch is so that we emulate userspace with
> unprivileged execution
Okay, keep the r-b. :)
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2019-04-24 11:10 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-15 16:00 [PATCH 1/4] drm/i915: Verify workarounds immediately after application Chris Wilson
2019-04-15 16:00 ` [PATCH 2/4] drm/i915: Verify the engine workarounds stick on application Chris Wilson
2019-04-15 16:00 ` [PATCH 3/4] drm/i915: Make workaround verification *optional* Chris Wilson
2019-04-15 16:00 ` [PATCH 4/4] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
2019-04-15 17:41 ` [PATCH] " Chris Wilson
2019-04-16 14:49 ` [Intel-gfx] " Dan Carpenter
2019-04-15 16:59 ` ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application Patchwork
2019-04-15 18:33 ` ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915: Verify workarounds immediately after application (rev2) Patchwork
2019-04-16 8:10 [PATCH v2 4/4] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
2019-04-16 9:12 ` [PATCH] " Chris Wilson
2019-04-16 14:50 ` Tvrtko Ursulin
2019-04-24 10:00 ` Chris Wilson
2019-04-24 11:03 ` Chris Wilson
2019-04-24 11:10 ` Tvrtko Ursulin
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.