* [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest
2018-09-28 13:58 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
@ 2018-09-28 13:58 ` Chris Wilson
2018-09-28 14:04 ` [PATCH] " Chris Wilson
2018-09-28 13:58 ` [PATCH 3/4] drm/i915: Reserve some priority bits for internal use Chris Wilson
` (4 subsequent siblings)
5 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2018-09-28 13:58 UTC (permalink / raw)
To: intel-gfx
Include a batch full of a page of arbitration points in order to provide
a window for inject_preempt_context() in the preemption smoketests.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/selftests/intel_lrc.c | 130 +++++++++++++++------
1 file changed, 97 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index d68a924c530e..44b3e97948f9 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -588,6 +588,7 @@ struct preempt_smoke {
struct drm_i915_private *i915;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
@@ -599,6 +600,49 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
&smoke->prng)];
}
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = i915_request_alloc(smoke->engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
static int smoke_crescendo_thread(void *arg)
{
struct preempt_smoke *smoke = arg;
@@ -608,21 +652,15 @@ static int smoke_crescendo_thread(void *arg)
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
- struct i915_request *rq;
+ int err;
mutex_lock(&smoke->i915->drm.struct_mutex);
-
- ctx->sched.priority = count % I915_PRIORITY_MAX;
-
- rq = i915_request_alloc(smoke->engine, ctx);
- if (IS_ERR(rq)) {
- mutex_unlock(&smoke->i915->drm.struct_mutex);
- return PTR_ERR(rq);
- }
-
- i915_request_add(rq);
-
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
mutex_unlock(&smoke->i915->drm.struct_mutex);
+ if (err)
+ return err;
count++;
} while (!__igt_timeout(end_time, NULL));
@@ -631,7 +669,8 @@ static int smoke_crescendo_thread(void *arg)
return 0;
}
-static int smoke_crescendo(struct preempt_smoke *smoke)
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
{
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct preempt_smoke arg[I915_NUM_ENGINES];
@@ -645,6 +684,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
for_each_engine(engine, smoke->i915, id) {
arg[id] = *smoke;
arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
arg[id].count = 0;
tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
@@ -671,37 +712,37 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
mutex_lock(&smoke->i915->drm.struct_mutex);
- pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
- count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
-static int smoke_random(struct preempt_smoke *smoke)
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
{
- struct intel_engine_cs *engine;
enum intel_engine_id id;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(smoke->engine, smoke->i915, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
- struct i915_request *rq;
-
- ctx->sched.priority = random_priority(&smoke->prng);
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
- i915_request_add(rq);
count++;
}
} while (!__igt_timeout(end_time, NULL));
- pr_info("Submitted %lu random requests across %d engines and %d contexts\n",
- count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
@@ -712,7 +753,9 @@ static int live_preempt_smoke(void *arg)
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
+ const unsigned int phase[] = { 0, BATCH };
int err = -ENOMEM;
+ u32 *cs;
int n;
if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
@@ -727,19 +770,37 @@ static int live_preempt_smoke(void *arg)
mutex_lock(&smoke.i915->drm.struct_mutex);
intel_runtime_pm_get(smoke.i915);
+ smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_unlock;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(smoke.batch);
+
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
if (!smoke.contexts[n])
goto err_ctx;
}
- err = smoke_crescendo(&smoke);
- if (err)
- goto err_ctx;
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
- err = smoke_random(&smoke);
- if (err)
- goto err_ctx;
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
err_ctx:
if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
@@ -751,6 +812,9 @@ static int live_preempt_smoke(void *arg)
kernel_context_close(smoke.contexts[n]);
}
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_unlock:
intel_runtime_pm_put(smoke.i915);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
--
2.19.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH] drm/i915/selftests: Include arbitration points in preemption smoketest
2018-09-28 13:58 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
@ 2018-09-28 14:04 ` Chris Wilson
0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-09-28 14:04 UTC (permalink / raw)
To: intel-gfx
Include a batch full of a page of arbitration points in order to provide
a window for inject_preempt_context() in the preemption smoketests.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/selftests/intel_lrc.c | 134 ++++++++++++++++-----
1 file changed, 101 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index d68a924c530e..d67fe8335ceb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -588,6 +588,7 @@ struct preempt_smoke {
struct drm_i915_private *i915;
struct i915_gem_context **contexts;
struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
unsigned int ncontext;
struct rnd_state prng;
unsigned long count;
@@ -599,6 +600,49 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
&smoke->prng)];
}
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = i915_request_alloc(smoke->engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
static int smoke_crescendo_thread(void *arg)
{
struct preempt_smoke *smoke = arg;
@@ -608,21 +652,15 @@ static int smoke_crescendo_thread(void *arg)
count = 0;
do {
struct i915_gem_context *ctx = smoke_context(smoke);
- struct i915_request *rq;
+ int err;
mutex_lock(&smoke->i915->drm.struct_mutex);
-
- ctx->sched.priority = count % I915_PRIORITY_MAX;
-
- rq = i915_request_alloc(smoke->engine, ctx);
- if (IS_ERR(rq)) {
- mutex_unlock(&smoke->i915->drm.struct_mutex);
- return PTR_ERR(rq);
- }
-
- i915_request_add(rq);
-
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
mutex_unlock(&smoke->i915->drm.struct_mutex);
+ if (err)
+ return err;
count++;
} while (!__igt_timeout(end_time, NULL));
@@ -631,7 +669,8 @@ static int smoke_crescendo_thread(void *arg)
return 0;
}
-static int smoke_crescendo(struct preempt_smoke *smoke)
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
{
struct task_struct *tsk[I915_NUM_ENGINES] = {};
struct preempt_smoke arg[I915_NUM_ENGINES];
@@ -645,6 +684,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
for_each_engine(engine, smoke->i915, id) {
arg[id] = *smoke;
arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
arg[id].count = 0;
tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
@@ -671,37 +712,37 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
mutex_lock(&smoke->i915->drm.struct_mutex);
- pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
- count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
-static int smoke_random(struct preempt_smoke *smoke)
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
{
- struct intel_engine_cs *engine;
enum intel_engine_id id;
IGT_TIMEOUT(end_time);
unsigned long count;
count = 0;
do {
- for_each_engine(engine, smoke->i915, id) {
+ for_each_engine(smoke->engine, smoke->i915, id) {
struct i915_gem_context *ctx = smoke_context(smoke);
- struct i915_request *rq;
-
- ctx->sched.priority = random_priority(&smoke->prng);
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
- i915_request_add(rq);
count++;
}
} while (!__igt_timeout(end_time, NULL));
- pr_info("Submitted %lu random requests across %d engines and %d contexts\n",
- count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
@@ -712,7 +753,9 @@ static int live_preempt_smoke(void *arg)
.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
.ncontext = 1024,
};
+ const unsigned int phase[] = { 0, BATCH };
int err = -ENOMEM;
+ u32 *cs;
int n;
if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
@@ -727,19 +770,41 @@ static int live_preempt_smoke(void *arg)
mutex_lock(&smoke.i915->drm.struct_mutex);
intel_runtime_pm_get(smoke.i915);
+ smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_unlock;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(smoke.batch);
+
+ err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+ if (err)
+ goto err_batch;
+
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
if (!smoke.contexts[n])
goto err_ctx;
}
- err = smoke_crescendo(&smoke);
- if (err)
- goto err_ctx;
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
- err = smoke_random(&smoke);
- if (err)
- goto err_ctx;
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
err_ctx:
if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
@@ -751,6 +816,9 @@ static int live_preempt_smoke(void *arg)
kernel_context_close(smoke.contexts[n]);
}
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_unlock:
intel_runtime_pm_put(smoke.i915);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
--
2.19.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 3/4] drm/i915: Reserve some priority bits for internal use
2018-09-28 13:58 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
2018-09-28 13:58 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
@ 2018-09-28 13:58 ` Chris Wilson
2018-09-28 13:58 ` [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Chris Wilson
` (3 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-09-28 13:58 UTC (permalink / raw)
To: intel-gfx
In the next few patches, we will want to give a small priority boost to
some requests/queues but not so much that we perturb the user controlled
order. As such we will shift the user priority bits higher leaving
ourselves a few low priority bits for our internal bumping.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/i915_drv.h | 2 +-
drivers/gpu/drm/i915/i915_gem_context.c | 9 +++++----
drivers/gpu/drm/i915/i915_scheduler.h | 6 ++++++
drivers/gpu/drm/i915/selftests/intel_lrc.c | 8 +++++---
4 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 30191523c309..b672ed0cac24 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3236,7 +3236,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 15c92f75b1b8..8cbe58070561 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
- ctx->sched.priority = I915_PRIORITY_NORMAL;
+ ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
}
i915_gem_context_clear_bannable(ctx);
- ctx->sched.priority = prio;
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
- args->value = ctx->sched.priority;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
break;
default:
ret = -EINVAL;
@@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
- ctx->sched.priority = priority;
+ ctx->sched.priority =
+ I915_USER_PRIORITY(priority);
}
break;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..89d456312557 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -19,6 +19,12 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
+#define I915_USER_PRIORITY_SHIFT 0
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
struct i915_sched_attr {
/**
* @priority: execution and service priority
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 44b3e97948f9..0dce9f2ab0db 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -292,12 +292,14 @@ static int live_preempt(void *arg)
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@@ -418,7 +420,7 @@ static int live_late_preempt(void *arg)
goto err_wedged;
}
- attr.priority = I915_PRIORITY_MAX;
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
if (!wait_for_spinner(&spin_hi, rq)) {
--
2.19.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket
2018-09-28 13:58 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
2018-09-28 13:58 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
2018-09-28 13:58 ` [PATCH 3/4] drm/i915: Reserve some priority bits for internal use Chris Wilson
@ 2018-09-28 13:58 ` Chris Wilson
2018-10-01 10:38 ` [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Tvrtko Ursulin
` (2 subsequent siblings)
5 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-09-28 13:58 UTC (permalink / raw)
To: intel-gfx
As we are about to allow ourselves to slightly bump the user priority
into a few different sublevels, packthose internal priority lists
into the same i915_priolist to keep the rbtree compact and avoid having
to allocate the default user priority even after the internal bumping.
The downside to having an requests[] rather than a node per active list,
is that we then have to walk over the empty higher priority lists. To
compensate, we track the active buckets and use a small bitmap to skip
over any inactive ones.
v2: Use MASK of internal levels to simplify our usage.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/intel_engine_cs.c | 6 +-
drivers/gpu/drm/i915/intel_guc_submission.c | 12 ++-
drivers/gpu/drm/i915/intel_lrc.c | 87 ++++++++++++++-------
drivers/gpu/drm/i915/intel_ringbuffer.h | 13 ++-
4 files changed, 80 insertions(+), 38 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6726d57f018f..1c6143bdf5a4 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1534,10 +1534,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p =
- rb_entry(rb, typeof(*p), node);
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
- list_for_each_entry(rq, &p->requests, sched.link) {
+ priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ ");
else
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 4874a212754c..ac862b42f6a1 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -746,30 +746,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) {
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
if (submit)
port_assign(port, last);
port++;
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15345e74d8ce..0c37a3c9b719 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,14 +259,49 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
-static struct i915_priolist *
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+ int queue_priority)
+{
+ struct rb_node *rb;
+ int last_prio, i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+ rb_first(&execlists->queue.rb_root));
+
+ last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = to_priolist(rb);
+
+ GEM_BUG_ON(p->priority >= last_prio);
+ last_prio = p->priority;
+
+ GEM_BUG_ON(!p->used);
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+ if (list_empty(&p->requests[i]))
+ continue;
+
+ GEM_BUG_ON(!(p->used & BIT(i)));
+ }
+ }
+}
+
+static struct list_head *
lookup_priolist(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
+ int idx, i;
+
+ assert_priolists(execlists, INT_MAX);
+ /* buckets sorted from highest [in slot 0] to lowest priority */
+ idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+ prio >>= I915_USER_PRIORITY_SHIFT;
if (unlikely(execlists->no_priolist))
prio = I915_PRIORITY_NORMAL;
@@ -283,7 +318,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
parent = &rb->rb_right;
first = false;
} else {
- return p;
+ goto out;
}
}
@@ -309,11 +344,15 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
}
p->priority = prio;
- INIT_LIST_HEAD(&p->requests);
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+ INIT_LIST_HEAD(&p->requests[i]);
rb_link_node(&p->node, rb, parent);
rb_insert_color_cached(&p->node, &execlists->queue, first);
+ p->used = 0;
- return p;
+out:
+ p->used |= BIT(idx);
+ return &p->requests[idx];
}
static void unwind_wa_tail(struct i915_request *rq)
@@ -325,7 +364,7 @@ static void unwind_wa_tail(struct i915_request *rq)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn;
- struct i915_priolist *uninitialized_var(p);
+ struct list_head *uninitialized_var(pl);
int last_prio = I915_PRIORITY_INVALID;
lockdep_assert_held(&engine->timeline.lock);
@@ -342,12 +381,11 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != last_prio) {
last_prio = rq_prio(rq);
- p = lookup_priolist(engine, last_prio);
+ pl = lookup_priolist(engine, last_prio);
}
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(p->priority != rq_prio(rq));
- list_add(&rq->sched.link, &p->requests);
+ list_add(&rq->sched.link, pl);
}
}
@@ -664,8 +702,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -684,11 +723,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
/*
* If GVT overrides us we only ever submit
@@ -698,11 +734,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request) to the second port.
*/
if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context)) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ ctx_single_port_submission(rq->hw_context))
goto done;
- }
GEM_BUG_ON(last->hw_context == rq->hw_context);
@@ -713,15 +746,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
+
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -745,6 +779,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
execlists->queue_priority =
port != execlists->port ? rq_prio(last) : INT_MIN;
+ assert_priolists(execlists, execlists->queue_priority);
if (submit) {
port_assign(port, last);
@@ -856,16 +891,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- INIT_LIST_HEAD(&rq->sched.link);
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -1071,8 +1106,7 @@ static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
- list_add_tail(&node->link,
- &lookup_priolist(engine, prio)->requests);
+ list_add_tail(&node->link, lookup_priolist(engine, prio));
}
static void __update_queue(struct intel_engine_cs *engine, int prio)
@@ -1142,7 +1176,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
static void execlists_schedule(struct i915_request *request,
const struct i915_sched_attr *attr)
{
- struct i915_priolist *uninitialized_var(pl);
+ struct list_head *uninitialized_var(pl);
struct intel_engine_cs *engine, *last;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
@@ -1241,8 +1275,7 @@ static void execlists_schedule(struct i915_request *request,
pl = lookup_priolist(engine, prio);
last = engine;
}
- GEM_BUG_ON(pl->priority != prio);
- list_move_tail(&node->link, &pl->requests);
+ list_move_tail(&node->link, pl);
} else {
/*
* If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..1534de5bb852 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -190,11 +190,22 @@ enum intel_engine_id {
};
struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
- struct list_head requests;
+ unsigned long used;
int priority;
};
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
+
struct st_preempt_hang {
struct completion completion;
bool inject_hang;
--
2.19.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 12+ messages in thread
* Re: [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
2018-09-28 13:58 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
` (2 preceding siblings ...)
2018-09-28 13:58 ` [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Chris Wilson
@ 2018-10-01 10:38 ` Tvrtko Ursulin
2018-10-01 10:53 ` Chris Wilson
2018-10-01 11:01 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2) Patchwork
2018-10-01 11:25 ` ✗ Fi.CI.BAT: failure " Patchwork
5 siblings, 1 reply; 12+ messages in thread
From: Tvrtko Ursulin @ 2018-10-01 10:38 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
On 28/09/2018 14:58, Chris Wilson wrote:
> When submitting chains to each engine, we can do so (mostly) in
> parallel, so delegate submission to threads on a per-engine basis.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
> 1 file changed, 61 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index 3a474bb64c05..d68a924c530e 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
> struct preempt_smoke {
> struct drm_i915_private *i915;
> struct i915_gem_context **contexts;
> + struct intel_engine_cs *engine;
> unsigned int ncontext;
> struct rnd_state prng;
> + unsigned long count;
> };
>
> static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> @@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> &smoke->prng)];
> }
>
> +static int smoke_crescendo_thread(void *arg)
> +{
> + struct preempt_smoke *smoke = arg;
> + IGT_TIMEOUT(end_time);
> + unsigned long count;
> +
> + count = 0;
> + do {
> + struct i915_gem_context *ctx = smoke_context(smoke);
> + struct i915_request *rq;
> +
> + mutex_lock(&smoke->i915->drm.struct_mutex);
> +
> + ctx->sched.priority = count % I915_PRIORITY_MAX;
> +
> + rq = i915_request_alloc(smoke->engine, ctx);
> + if (IS_ERR(rq)) {
> + mutex_unlock(&smoke->i915->drm.struct_mutex);
> + return PTR_ERR(rq);
> + }
> +
> + i915_request_add(rq);
> +
> + mutex_unlock(&smoke->i915->drm.struct_mutex);
> +
> + count++;
Very little outside the mutex so I am not sure if parallelization will
work that well. Every thread could probably fill the ring in it's
timeslice? And then it blocks the others until there is space. It will
heavily rely on scheduler behaviour and mutex fairness I think.
Regards,
Tvrtko
> + } while (!__igt_timeout(end_time, NULL));
> +
> + smoke->count = count;
> + return 0;
> +}
> +
> static int smoke_crescendo(struct preempt_smoke *smoke)
> {
> + struct task_struct *tsk[I915_NUM_ENGINES] = {};
> + struct preempt_smoke arg[I915_NUM_ENGINES];
> struct intel_engine_cs *engine;
> enum intel_engine_id id;
> unsigned long count;
> + int err = 0;
> +
> + mutex_unlock(&smoke->i915->drm.struct_mutex);
>
> - count = 0;
> for_each_engine(engine, smoke->i915, id) {
> - IGT_TIMEOUT(end_time);
> + arg[id] = *smoke;
> + arg[id].engine = engine;
> + arg[id].count = 0;
> +
> + tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
> + "igt/smoke:%d", id);
> + if (IS_ERR(tsk[id])) {
> + err = PTR_ERR(tsk[id]);
> + break;
> + }
> + }
>
> - do {
> - struct i915_gem_context *ctx = smoke_context(smoke);
> - struct i915_request *rq;
> + count = 0;
> + for_each_engine(engine, smoke->i915, id) {
> + int status;
>
> - ctx->sched.priority = count % I915_PRIORITY_MAX;
> + if (IS_ERR_OR_NULL(tsk[id]))
> + continue;
>
> - rq = i915_request_alloc(engine, ctx);
> - if (IS_ERR(rq))
> - return PTR_ERR(rq);
> + status = kthread_stop(tsk[id]);
> + if (status && !err)
> + err = status;
>
> - i915_request_add(rq);
> - count++;
> - } while (!__igt_timeout(end_time, NULL));
> + count += arg[id].count;
> }
>
> + mutex_lock(&smoke->i915->drm.struct_mutex);
> +
> pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
> count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
> return 0;
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
2018-10-01 10:38 ` [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Tvrtko Ursulin
@ 2018-10-01 10:53 ` Chris Wilson
0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 10:53 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx
Quoting Tvrtko Ursulin (2018-10-01 11:38:37)
>
> On 28/09/2018 14:58, Chris Wilson wrote:
> > When submitting chains to each engine, we can do so (mostly) in
> > parallel, so delegate submission to threads on a per-engine basis.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
> > 1 file changed, 61 insertions(+), 12 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> > index 3a474bb64c05..d68a924c530e 100644
> > --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> > @@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
> > struct preempt_smoke {
> > struct drm_i915_private *i915;
> > struct i915_gem_context **contexts;
> > + struct intel_engine_cs *engine;
> > unsigned int ncontext;
> > struct rnd_state prng;
> > + unsigned long count;
> > };
> >
> > static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> > @@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> > &smoke->prng)];
> > }
> >
> > +static int smoke_crescendo_thread(void *arg)
> > +{
> > + struct preempt_smoke *smoke = arg;
> > + IGT_TIMEOUT(end_time);
> > + unsigned long count;
> > +
> > + count = 0;
> > + do {
> > + struct i915_gem_context *ctx = smoke_context(smoke);
> > + struct i915_request *rq;
> > +
> > + mutex_lock(&smoke->i915->drm.struct_mutex);
> > +
> > + ctx->sched.priority = count % I915_PRIORITY_MAX;
> > +
> > + rq = i915_request_alloc(smoke->engine, ctx);
> > + if (IS_ERR(rq)) {
> > + mutex_unlock(&smoke->i915->drm.struct_mutex);
> > + return PTR_ERR(rq);
> > + }
> > +
> > + i915_request_add(rq);
> > +
> > + mutex_unlock(&smoke->i915->drm.struct_mutex);
> > +
> > + count++;
>
> Very little outside the mutex so I am not sure if parallelization will
> work that well. Every thread could probably fill the ring in it's
> timeslice?
Very unlikely due to the randomised ring. And we are working on that,
right? :)
> And then it blocks the others until there is space. It will
> heavily rely on scheduler behaviour and mutex fairness I think.
But it does bring the overall subtest time from num_engines to 1s, for
the same pattern.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2)
2018-09-28 13:58 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
` (3 preceding siblings ...)
2018-10-01 10:38 ` [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Tvrtko Ursulin
@ 2018-10-01 11:01 ` Patchwork
2018-10-01 11:25 ` ✗ Fi.CI.BAT: failure " Patchwork
5 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-10-01 11:01 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2)
URL : https://patchwork.freedesktop.org/series/50329/
State : warning
== Summary ==
$ dim checkpatch origin/drm-tip
949446d4ae2f drm/i915/selftests: Split preemption smoke test into threads
-:33: WARNING:LINE_SPACING: Missing a blank line after declarations
#33: FILE: drivers/gpu/drm/i915/selftests/intel_lrc.c:605:
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
total: 0 errors, 1 warnings, 0 checks, 100 lines checked
27ea74d97340 drm/i915/selftests: Include arbitration points in preemption smoketest
8a4361322e2b drm/i915: Reserve some priority bits for internal use
05453e086003 drm/i915: Combine multiple internal plists into the same i915_priolist bucket
-:167: WARNING:FUNCTION_ARGUMENTS: function definition argument 'pl' should also have an identifier name
#167: FILE: drivers/gpu/drm/i915/intel_lrc.c:367:
+ struct list_head *uninitialized_var(pl);
-:285: WARNING:FUNCTION_ARGUMENTS: function definition argument 'pl' should also have an identifier name
#285: FILE: drivers/gpu/drm/i915/intel_lrc.c:1179:
+ struct list_head *uninitialized_var(pl);
-:314: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'plist' - possible side-effects?
#314: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:199:
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-:314: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'idx' - possible side-effects?
#314: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:199:
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-:318: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'plist' - possible side-effects?
#318: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:203:
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
-:318: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'idx' - possible side-effects?
#318: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:203:
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
total: 0 errors, 2 warnings, 4 checks, 272 lines checked
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2)
2018-09-28 13:58 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
` (4 preceding siblings ...)
2018-10-01 11:01 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2) Patchwork
@ 2018-10-01 11:25 ` Patchwork
2018-10-01 12:09 ` Chris Wilson
5 siblings, 1 reply; 12+ messages in thread
From: Patchwork @ 2018-10-01 11:25 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2)
URL : https://patchwork.freedesktop.org/series/50329/
State : failure
== Summary ==
= CI Bug Log - changes from CI_DRM_4906 -> Patchwork_10302 =
== Summary - FAILURE ==
Serious unknown changes coming with Patchwork_10302 absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in Patchwork_10302, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://patchwork.freedesktop.org/api/1.0/series/50329/revisions/2/mbox/
== Possible new issues ==
Here are the unknown changes that may have been introduced in Patchwork_10302:
=== IGT changes ===
==== Possible regressions ====
igt@gem_close_race@basic-process:
fi-skl-6700k2: PASS -> INCOMPLETE
fi-skl-6700hq: PASS -> INCOMPLETE
fi-kbl-7560u: PASS -> INCOMPLETE
fi-kbl-8809g: PASS -> INCOMPLETE
fi-bsw-kefka: PASS -> INCOMPLETE
fi-kbl-7567u: PASS -> INCOMPLETE
fi-icl-u: PASS -> INCOMPLETE
igt@gem_close_race@basic-threads:
fi-skl-6600u: PASS -> INCOMPLETE
fi-whl-u: PASS -> INCOMPLETE
fi-cnl-u: PASS -> INCOMPLETE
fi-kbl-7500u: PASS -> INCOMPLETE
fi-cfl-8109u: PASS -> INCOMPLETE
fi-kbl-r: PASS -> INCOMPLETE
fi-skl-6260u: PASS -> INCOMPLETE
fi-skl-6770hq: PASS -> INCOMPLETE
fi-kbl-x1275: PASS -> INCOMPLETE
fi-cfl-s3: PASS -> INCOMPLETE
fi-skl-iommu: PASS -> INCOMPLETE
fi-cfl-8700k: PASS -> INCOMPLETE
igt@gem_ctx_switch@basic-default:
fi-bdw-samus: PASS -> INCOMPLETE
fi-bsw-n3050: PASS -> INCOMPLETE
fi-bdw-5557u: PASS -> INCOMPLETE
igt@gem_ctx_switch@basic-default-heavy:
fi-kbl-soraka: PASS -> INCOMPLETE
fi-skl-caroline: NOTRUN -> INCOMPLETE
== Known issues ==
Here are the changes found in Patchwork_10302 that come from known issues:
=== IGT changes ===
==== Issues hit ====
igt@gem_close_race@basic-process:
fi-glk-j4005: PASS -> INCOMPLETE (fdo#103359, k.org#198133)
fi-bxt-dsi: PASS -> INCOMPLETE (fdo#103927)
fi-bxt-j4205: PASS -> INCOMPLETE (fdo#103927)
fi-kbl-guc: PASS -> INCOMPLETE (fdo#106693)
fi-glk-dsi: PASS -> INCOMPLETE (fdo#103359, k.org#198133)
fi-cfl-guc: PASS -> INCOMPLETE (fdo#106693)
igt@gem_close_race@basic-threads:
fi-skl-guc: PASS -> INCOMPLETE (fdo#106693)
fi-skl-gvtdvm: PASS -> INCOMPLETE (fdo#105600)
igt@gem_ctx_switch@basic-default-heavy:
fi-bdw-gvtdvm: PASS -> INCOMPLETE (fdo#105600)
igt@kms_frontbuffer_tracking@basic:
fi-hsw-peppy: PASS -> DMESG-WARN (fdo#102614)
igt@kms_pipe_crc_basic@nonblocking-crc-pipe-a:
fi-byt-clapper: PASS -> FAIL (fdo#107362)
==== Possible fixes ====
igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
fi-byt-clapper: FAIL (fdo#103191, fdo#107362) -> PASS
fdo#102614 https://bugs.freedesktop.org/show_bug.cgi?id=102614
fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
fdo#103359 https://bugs.freedesktop.org/show_bug.cgi?id=103359
fdo#103927 https://bugs.freedesktop.org/show_bug.cgi?id=103927
fdo#105600 https://bugs.freedesktop.org/show_bug.cgi?id=105600
fdo#106693 https://bugs.freedesktop.org/show_bug.cgi?id=106693
fdo#107362 https://bugs.freedesktop.org/show_bug.cgi?id=107362
k.org#198133 https://bugzilla.kernel.org/show_bug.cgi?id=198133
== Participating hosts (50 -> 46) ==
Additional (1): fi-skl-caroline
Missing (5): fi-ctg-p8600 fi-bsw-cyan fi-ilk-m540 fi-byt-squawks fi-icl-u2
== Build changes ==
* Linux: CI_DRM_4906 -> Patchwork_10302
CI_DRM_4906: 187637a6495f71dd240d02badbf2fecc1e3c1bb2 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_4658: cab89ce2c5da684d01deff402d4e8e11441beadb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_10302: 05453e08600332f053e0a1cf63250f70cc70f1a9 @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
05453e086003 drm/i915: Combine multiple internal plists into the same i915_priolist bucket
8a4361322e2b drm/i915: Reserve some priority bits for internal use
27ea74d97340 drm/i915/selftests: Include arbitration points in preemption smoketest
949446d4ae2f drm/i915/selftests: Split preemption smoke test into threads
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10302/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2)
2018-10-01 11:25 ` ✗ Fi.CI.BAT: failure " Patchwork
@ 2018-10-01 12:09 ` Chris Wilson
0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 12:09 UTC (permalink / raw)
To: Patchwork; +Cc: intel-gfx
Quoting Patchwork (2018-10-01 12:25:25)
> == Series Details ==
>
> Series: series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads (rev2)
> URL : https://patchwork.freedesktop.org/series/50329/
> State : failure
>
> == Summary ==
>
> = CI Bug Log - changes from CI_DRM_4906 -> Patchwork_10302 =
>
> == Summary - FAILURE ==
>
> Serious unknown changes coming with Patchwork_10302 absolutely need to be
> verified manually.
>
> If you think the reported changes have nothing to do with the changes
> introduced in Patchwork_10302, please notify your bug team to allow them
> to document this new failure mode, which will reduce false positives in CI.
>
> External URL: https://patchwork.freedesktop.org/api/1.0/series/50329/revisions/2/mbox/
>
> == Possible new issues ==
>
> Here are the unknown changes that may have been introduced in Patchwork_10302:
>
> === IGT changes ===
>
> ==== Possible regressions ====
>
> igt@gem_close_race@basic-process:
> fi-skl-6700k2: PASS -> INCOMPLETE
Ah, my assert is broken with I915_USER_PRIORITY_SHIFT == 0.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 12+ messages in thread