From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: Re: [Intel-gfx] [PATCH 03/57] drm/i915/selftests: Exercise cross-process context isolation
Date: Mon, 01 Feb 2021 18:37:06 +0200 [thread overview]
Message-ID: <87czxj218t.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <20210201085715.27435-3-chris@chris-wilson.co.uk>
Chris Wilson <chris@chris-wilson.co.uk> writes:
> Verify that one context running on engine A cannot manipulate another
> client's context concurrently running on engine B using unprivileged
> access.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/gt/selftest_lrc.c | 275 +++++++++++++++++++++----
> 1 file changed, 238 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 0524232378e4..e97adf1b7729 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -911,6 +911,7 @@ create_user_vma(struct i915_address_space *vm, unsigned long size)
>
> static struct i915_vma *
> store_context(struct intel_context *ce,
> + struct intel_engine_cs *engine,
> struct i915_vma *scratch,
> bool relative)
> {
> @@ -928,7 +929,7 @@ store_context(struct intel_context *ce,
> return ERR_CAST(cs);
> }
>
> - defaults = shmem_pin_map(ce->engine->default_state);
> + defaults = shmem_pin_map(engine->default_state);
> if (!defaults) {
> i915_gem_object_unpin_map(batch->obj);
> i915_vma_put(batch);
> @@ -960,7 +961,7 @@ store_context(struct intel_context *ce,
> if (relative)
> cmd |= MI_LRI_LRM_CS_MMIO;
> else
> - offset = ce->engine->mmio_base;
> + offset = engine->mmio_base;
> }
>
> dw++;
> @@ -979,7 +980,7 @@ store_context(struct intel_context *ce,
>
> *cs++ = MI_BATCH_BUFFER_END;
>
> - shmem_unpin_map(ce->engine->default_state, defaults);
> + shmem_unpin_map(engine->default_state, defaults);
>
> i915_gem_object_flush_map(batch->obj);
> i915_gem_object_unpin_map(batch->obj);
> @@ -1002,23 +1003,48 @@ static int move_to_active(struct i915_request *rq,
> return err;
> }
>
> +struct hwsp_semaphore {
> + u32 ggtt;
> + u32 *va;
> +};
This might grow to be basis of a minilib at some point with
associated inits, emits and signals...
> +
> +static struct hwsp_semaphore hwsp_semaphore(struct intel_engine_cs *engine)
> +{
> + struct hwsp_semaphore s;
> +
> + s.va = memset32(engine->status_page.addr + 1000, 0, 1);
> + s.ggtt = (i915_ggtt_offset(engine->status_page.vma) +
> + offset_in_page(s.va));
> +
> + return s;
> +}
> +
> +static u32 *emit_noops(u32 *cs, int count)
> +{
build_bug_on(!count) but meh, single user.
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> + while (count--)
> + *cs++ = MI_NOOP;
> +
> + return cs;
> +}
> +
> static struct i915_request *
> record_registers(struct intel_context *ce,
> + struct intel_engine_cs *engine,
> struct i915_vma *before,
> struct i915_vma *after,
> bool relative,
> - u32 *sema)
> + const struct hwsp_semaphore *sema)
> {
> struct i915_vma *b_before, *b_after;
> struct i915_request *rq;
> u32 *cs;
> int err;
>
> - b_before = store_context(ce, before, relative);
> + b_before = store_context(ce, engine, before, relative);
> if (IS_ERR(b_before))
> return ERR_CAST(b_before);
>
> - b_after = store_context(ce, after, relative);
> + b_after = store_context(ce, engine, after, relative);
> if (IS_ERR(b_after)) {
> rq = ERR_CAST(b_after);
> goto err_before;
> @@ -1044,7 +1070,7 @@ record_registers(struct intel_context *ce,
> if (err)
> goto err_rq;
>
> - cs = intel_ring_begin(rq, 14);
> + cs = intel_ring_begin(rq, 18);
> if (IS_ERR(cs)) {
> err = PTR_ERR(cs);
> goto err_rq;
> @@ -1055,16 +1081,28 @@ record_registers(struct intel_context *ce,
> *cs++ = lower_32_bits(b_before->node.start);
> *cs++ = upper_32_bits(b_before->node.start);
>
> - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
> - *cs++ = MI_SEMAPHORE_WAIT |
> - MI_SEMAPHORE_GLOBAL_GTT |
> - MI_SEMAPHORE_POLL |
> - MI_SEMAPHORE_SAD_NEQ_SDD;
> - *cs++ = 0;
> - *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
> - offset_in_page(sema);
> - *cs++ = 0;
> - *cs++ = MI_NOOP;
> + if (sema) {
> + WRITE_ONCE(*sema->va, -1);
> +
> + /* Signal the poisoner */
> + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
> + *cs++ = sema->ggtt;
> + *cs++ = 0;
> + *cs++ = 0;
> +
> + /* Then wait for the poison to settle */
> + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
> + *cs++ = MI_SEMAPHORE_WAIT |
> + MI_SEMAPHORE_GLOBAL_GTT |
> + MI_SEMAPHORE_POLL |
> + MI_SEMAPHORE_SAD_NEQ_SDD;
> + *cs++ = 0;
> + *cs++ = sema->ggtt;
> + *cs++ = 0;
> + *cs++ = MI_NOOP;
> + } else {
> + cs = emit_noops(cs, 10);
> + }
>
> *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
> *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
> @@ -1073,7 +1111,6 @@ record_registers(struct intel_context *ce,
>
> intel_ring_advance(rq, cs);
>
> - WRITE_ONCE(*sema, 0);
> i915_request_get(rq);
> i915_request_add(rq);
> err_after:
> @@ -1089,7 +1126,9 @@ record_registers(struct intel_context *ce,
> }
>
> static struct i915_vma *
> -load_context(struct intel_context *ce, u32 poison, bool relative)
> +load_context(struct intel_context *ce,
> + struct intel_engine_cs *engine,
> + u32 poison, bool relative)
> {
> struct i915_vma *batch;
> u32 dw, *cs, *hw;
> @@ -1105,7 +1144,7 @@ load_context(struct intel_context *ce, u32 poison, bool relative)
> return ERR_CAST(cs);
> }
>
> - defaults = shmem_pin_map(ce->engine->default_state);
> + defaults = shmem_pin_map(engine->default_state);
> if (!defaults) {
> i915_gem_object_unpin_map(batch->obj);
> i915_vma_put(batch);
> @@ -1136,7 +1175,7 @@ load_context(struct intel_context *ce, u32 poison, bool relative)
> if (relative)
> cmd |= MI_LRI_LRM_CS_MMIO;
> else
> - offset = ce->engine->mmio_base;
> + offset = engine->mmio_base;
> }
>
> dw++;
> @@ -1152,7 +1191,7 @@ load_context(struct intel_context *ce, u32 poison, bool relative)
>
> *cs++ = MI_BATCH_BUFFER_END;
>
> - shmem_unpin_map(ce->engine->default_state, defaults);
> + shmem_unpin_map(engine->default_state, defaults);
>
> i915_gem_object_flush_map(batch->obj);
> i915_gem_object_unpin_map(batch->obj);
> @@ -1162,16 +1201,17 @@ load_context(struct intel_context *ce, u32 poison, bool relative)
>
> static int
> poison_registers(struct intel_context *ce,
> + struct intel_engine_cs *engine,
> u32 poison,
> bool relative,
> - u32 *sema)
> + const struct hwsp_semaphore *sema)
> {
> struct i915_request *rq;
> struct i915_vma *batch;
> u32 *cs;
> int err;
>
> - batch = load_context(ce, poison, relative);
> + batch = load_context(ce, engine, poison, relative);
> if (IS_ERR(batch))
> return PTR_ERR(batch);
>
> @@ -1185,20 +1225,29 @@ poison_registers(struct intel_context *ce,
> if (err)
> goto err_rq;
>
> - cs = intel_ring_begin(rq, 8);
> + cs = intel_ring_begin(rq, 14);
> if (IS_ERR(cs)) {
> err = PTR_ERR(cs);
> goto err_rq;
> }
>
> + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
> + *cs++ = MI_SEMAPHORE_WAIT |
> + MI_SEMAPHORE_GLOBAL_GTT |
> + MI_SEMAPHORE_POLL |
> + MI_SEMAPHORE_SAD_EQ_SDD;
> + *cs++ = 0;
> + *cs++ = sema->ggtt;
> + *cs++ = 0;
> + *cs++ = MI_NOOP;
> +
> *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
> *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
> *cs++ = lower_32_bits(batch->node.start);
> *cs++ = upper_32_bits(batch->node.start);
>
> *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
> - *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
> - offset_in_page(sema);
> + *cs++ = sema->ggtt;
> *cs++ = 0;
> *cs++ = 1;
>
> @@ -1258,7 +1307,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
> }
> lrc += LRC_STATE_OFFSET / sizeof(*hw);
>
> - defaults = shmem_pin_map(ce->engine->default_state);
> + defaults = shmem_pin_map(engine->default_state);
> if (!defaults) {
> err = -ENOMEM;
> goto err_lrc;
> @@ -1311,7 +1360,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
> } while (dw < PAGE_SIZE / sizeof(u32) &&
> (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
>
> - shmem_unpin_map(ce->engine->default_state, defaults);
> + shmem_unpin_map(engine->default_state, defaults);
> err_lrc:
> i915_gem_object_unpin_map(ce->state->obj);
> err_B1:
> @@ -1328,7 +1377,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
> static int
> __lrc_isolation(struct intel_engine_cs *engine, u32 poison, bool relative)
> {
> - u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
> + struct hwsp_semaphore sema = hwsp_semaphore(engine);
> struct i915_vma *ref[2], *result[2];
> struct intel_context *A, *B;
> struct i915_request *rq;
> @@ -1356,15 +1405,12 @@ __lrc_isolation(struct intel_engine_cs *engine, u32 poison, bool relative)
> goto err_ref0;
> }
>
> - rq = record_registers(A, ref[0], ref[1], relative, sema);
> + rq = record_registers(A, engine, ref[0], ref[1], relative, NULL);
> if (IS_ERR(rq)) {
> err = PTR_ERR(rq);
> goto err_ref1;
> }
>
> - WRITE_ONCE(*sema, 1);
> - wmb();
> -
> if (i915_request_wait(rq, 0, HZ / 2) < 0) {
> i915_request_put(rq);
> err = -ETIME;
> @@ -1384,15 +1430,15 @@ __lrc_isolation(struct intel_engine_cs *engine, u32 poison, bool relative)
> goto err_result0;
> }
>
> - rq = record_registers(A, result[0], result[1], relative, sema);
> + rq = record_registers(A, engine, result[0], result[1], relative, &sema);
> if (IS_ERR(rq)) {
> err = PTR_ERR(rq);
> goto err_result1;
> }
>
> - err = poison_registers(B, poison, relative, sema);
> + err = poison_registers(B, engine, poison, relative, &sema);
> if (err) {
> - WRITE_ONCE(*sema, -1);
> + WRITE_ONCE(*sema.va, -1);
> i915_request_put(rq);
> goto err_result1;
> }
> @@ -1494,6 +1540,160 @@ static int live_lrc_isolation(void *arg)
> return err;
> }
>
> +static int __lrc_cross(struct intel_engine_cs *a,
> + struct intel_engine_cs *b,
> + u32 poison)
> +{
> + struct hwsp_semaphore sema = hwsp_semaphore(a);
> + struct i915_vma *ref[2], *result[2];
> + struct intel_context *A, *B;
> + struct i915_request *rq;
> + int err;
> +
> + GEM_BUG_ON(a->gt->ggtt != b->gt->ggtt);
> +
> + pr_debug("Context on %s, poisoning from %s with %08x\n",
> + a->name, b->name, poison);
> +
> + A = intel_context_create(a);
> + if (IS_ERR(A))
> + return PTR_ERR(A);
> +
> + B = intel_context_create(b);
> + if (IS_ERR(B)) {
> + err = PTR_ERR(B);
> + goto err_A;
> + }
> +
> + ref[0] = create_user_vma(A->vm, SZ_64K);
> + if (IS_ERR(ref[0])) {
> + err = PTR_ERR(ref[0]);
> + goto err_B;
> + }
> +
> + ref[1] = create_user_vma(A->vm, SZ_64K);
> + if (IS_ERR(ref[1])) {
> + err = PTR_ERR(ref[1]);
> + goto err_ref0;
> + }
> +
> + rq = record_registers(A, a, ref[0], ref[1], false, NULL);
> + if (IS_ERR(rq)) {
> + err = PTR_ERR(rq);
> + goto err_ref1;
> + }
> +
> + if (i915_request_wait(rq, 0, HZ / 2) < 0) {
> + i915_request_put(rq);
> + err = -ETIME;
> + goto err_ref1;
> + }
> + i915_request_put(rq);
> +
> + result[0] = create_user_vma(A->vm, SZ_64K);
> + if (IS_ERR(result[0])) {
> + err = PTR_ERR(result[0]);
> + goto err_ref1;
> + }
> +
> + result[1] = create_user_vma(A->vm, SZ_64K);
> + if (IS_ERR(result[1])) {
> + err = PTR_ERR(result[1]);
> + goto err_result0;
> + }
> +
> + rq = record_registers(A, a, result[0], result[1], false, &sema);
> + if (IS_ERR(rq)) {
> + err = PTR_ERR(rq);
> + goto err_result1;
> + }
> +
> + err = poison_registers(B, a, poison, false, &sema);
> + if (err) {
> + WRITE_ONCE(*sema.va, -1);
> + i915_request_put(rq);
> + goto err_result1;
> + }
> +
> + if (i915_request_wait(rq, 0, HZ / 2) < 0) {
> + i915_request_put(rq);
> + err = -ETIME;
> + goto err_result1;
> + }
> + i915_request_put(rq);
> +
> + err = compare_isolation(a, ref, result, A, poison, false);
> +
> +err_result1:
> + i915_vma_put(result[1]);
> +err_result0:
> + i915_vma_put(result[0]);
> +err_ref1:
> + i915_vma_put(ref[1]);
> +err_ref0:
> + i915_vma_put(ref[0]);
> +err_B:
> + intel_context_put(B);
> +err_A:
> + intel_context_put(A);
> + return err;
> +}
> +
> +static int live_lrc_cross(void *arg)
> +{
> + struct intel_gt *gt = arg;
> + struct intel_engine_cs *a, *b;
> + enum intel_engine_id a_id, b_id;
> + const u32 poison[] = {
> + STACK_MAGIC,
> + 0x3a3a3a3a,
> + 0x5c5c5c5c,
> + 0xffffffff,
> + 0xffff0000,
> + };
> + int err = 0;
> + int i;
> +
> + /*
> + * Our goal is to try and tamper with another client's context
> + * running concurrently. The HW's goal is to stop us.
> + */
> +
> + for_each_engine(a, gt, a_id) {
> + if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
> + skip_isolation(a))
> + continue;
> +
> + intel_engine_pm_get(a);
> + for_each_engine(b, gt, b_id) {
> + if (a == b)
> + continue;
> +
> + intel_engine_pm_get(b);
> + for (i = 0; i < ARRAY_SIZE(poison); i++) {
> + int result;
> +
> + result = __lrc_cross(a, b, poison[i]);
> + if (result && !err)
> + err = result;
> +
> + result = __lrc_cross(a, b, ~poison[i]);
> + if (result && !err)
> + err = result;
> + }
> + intel_engine_pm_put(b);
> + }
> + intel_engine_pm_put(a);
> +
> + if (igt_flush_test(gt->i915)) {
> + err = -EIO;
> + break;
> + }
> + }
> +
> + return err;
> +}
> +
> static int indirect_ctx_submit_req(struct intel_context *ce)
> {
> struct i915_request *rq;
> @@ -1884,6 +2084,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
> SUBTEST(live_lrc_isolation),
> SUBTEST(live_lrc_timestamp),
> SUBTEST(live_lrc_garbage),
> + SUBTEST(live_lrc_cross),
> SUBTEST(live_pphwsp_runtime),
> SUBTEST(live_lrc_indirect_ctx_bb),
> };
> --
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2021-02-01 16:37 UTC|newest]
Thread overview: 103+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-01 8:56 [Intel-gfx] [PATCH 01/57] drm/i915/gt: Restrict the GT clock override to just Icelake Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 02/57] drm/i915/selftests: Exercise relative mmio paths to non-privileged registers Chris Wilson
2021-02-01 14:34 ` Mika Kuoppala
2021-02-01 8:56 ` [Intel-gfx] [PATCH 03/57] drm/i915/selftests: Exercise cross-process context isolation Chris Wilson
2021-02-01 16:37 ` Mika Kuoppala [this message]
2021-02-01 8:56 ` [Intel-gfx] [PATCH 04/57] drm/i915: Protect against request freeing during cancellation on wedging Chris Wilson
2021-02-02 9:55 ` Mika Kuoppala
2021-02-01 8:56 ` [Intel-gfx] [PATCH 05/57] drm/i915: Take rcu_read_lock for querying fence's driver/timeline names Chris Wilson
2021-02-02 18:33 ` Mika Kuoppala
2021-02-01 8:56 ` [Intel-gfx] [PATCH 06/57] drm/i915/gt: Always flush the submission queue on checking for idle Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 07/57] drm/i915/gt: Move engine setup out of set_default_submission Chris Wilson
2021-02-02 11:57 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 08/57] drm/i915/gt: Move submission_method into intel_gt Chris Wilson
2021-02-02 12:03 ` Tvrtko Ursulin
2021-02-02 12:18 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 09/57] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 10/57] drm/i915: Restructure priority inheritance Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 11/57] drm/i915/selftests: Measure set-priority duration Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 12/57] drm/i915/selftests: Exercise priority inheritance around an engine loop Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 13/57] drm/i915/selftests: Force a rewind if at first we don't succeed Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 14/57] drm/i915: Improve DFS for priority inheritance Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 15/57] drm/i915: Extract request submission from execlists Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 16/57] drm/i915: Extract request rewinding " Chris Wilson
2021-02-02 13:08 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 17/57] drm/i915: Extract request suspension from the execlists Chris Wilson
2021-02-02 13:15 ` Tvrtko Ursulin
2021-02-02 13:26 ` Chris Wilson
2021-02-02 13:32 ` Tvrtko Ursulin
2021-02-02 13:27 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 18/57] drm/i915: Extract the ability to defer and rerun a request later Chris Wilson
2021-02-02 13:18 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 19/57] drm/i915: Fix the iterative dfs for defering requests Chris Wilson
2021-02-02 14:10 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 20/57] drm/i915: Wrap access to intel_engine.active Chris Wilson
2021-02-04 11:07 ` Tvrtko Ursulin
2021-02-04 11:18 ` Chris Wilson
2021-02-04 11:56 ` Chris Wilson
2021-02-04 12:08 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 21/57] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2021-02-04 11:12 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue Chris Wilson
2021-02-04 11:19 ` Tvrtko Ursulin
2021-02-04 11:32 ` Chris Wilson
2021-02-04 11:40 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 23/57] drm/i915: Move tasklet from execlists to sched Chris Wilson
2021-02-04 14:06 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 24/57] drm/i915/gt: Only kick the scheduler on timeslice/preemption change Chris Wilson
2021-02-04 14:09 ` Tvrtko Ursulin
2021-02-04 14:43 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 25/57] drm/i915: Move submit_request to i915_sched_engine Chris Wilson
2021-02-04 14:13 ` Tvrtko Ursulin
2021-02-04 14:45 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 26/57] drm/i915: Move finding the current active request to the scheduler Chris Wilson
2021-02-04 14:30 ` Tvrtko Ursulin
2021-02-04 14:59 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 27/57] drm/i915: Show execlists queues when dumping state Chris Wilson
2021-02-04 15:04 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 28/57] drm/i915: Wrap i915_request_use_semaphores() Chris Wilson
2021-02-04 15:05 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 29/57] drm/i915: Move scheduler flags Chris Wilson
2021-02-04 15:14 ` Tvrtko Ursulin
2021-02-04 16:05 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 30/57] drm/i915: Move timeslicing flag to scheduler Chris Wilson
2021-02-04 15:18 ` Tvrtko Ursulin
2021-02-04 16:11 ` Chris Wilson
2021-02-05 9:48 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 31/57] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
2021-02-04 15:26 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 32/57] drm/i915: Move needs-breadcrumb flags to scheduler Chris Wilson
2021-02-04 15:28 ` Tvrtko Ursulin
2021-02-04 16:12 ` Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 33/57] drm/i915: Move busywaiting control to the scheduler Chris Wilson
2021-02-04 15:32 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 34/57] drm/i915: Move preempt-reset flag " Chris Wilson
2021-02-04 15:34 ` Tvrtko Ursulin
2021-02-01 8:56 ` [Intel-gfx] [PATCH 35/57] drm/i915: Replace priolist rbtree with a skiplist Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 36/57] drm/i915: Wrap cmpxchg64 with try_cmpxchg64() helper Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 37/57] drm/i915: Fair low-latency scheduling Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 38/57] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 39/57] drm/i915: Extend the priority boosting for the display with a deadline Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 40/57] drm/i915/gt: Support virtual engine queues Chris Wilson
2021-02-01 8:56 ` [Intel-gfx] [PATCH 41/57] drm/i915: Move saturated workload detection back to the context Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 42/57] drm/i915: Bump default timeslicing quantum to 5ms Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 43/57] drm/i915/gt: Delay taking irqoff for execlists submission Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 44/57] drm/i915/gt: Wrap intel_timeline.has_initial_breadcrumb Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 45/57] drm/i915/gt: Track timeline GGTT offset separately from subpage offset Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 46/57] drm/i915/gt: Add timeline "mode" Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 47/57] drm/i915/gt: Use indices for writing into relative timelines Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 48/57] drm/i915/selftests: Exercise relative timeline modes Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 49/57] drm/i915/gt: Use ppHWSP for unshared non-semaphore related timelines Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 50/57] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 51/57] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 52/57] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 53/57] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 54/57] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 55/57] drm/i915/gt: Implement ring scheduler for gen4-7 Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 56/57] drm/i915/gt: Enable ring scheduling for gen5-7 Chris Wilson
2021-02-01 8:57 ` [Intel-gfx] [PATCH 57/57] drm/i915: Support secure dispatch on gen6/gen7 Chris Wilson
2021-02-01 14:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/57] drm/i915/gt: Restrict the GT clock override to just Icelake Patchwork
2021-02-01 14:15 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-02-01 14:15 ` [Intel-gfx] [PATCH 01/57] " Mika Kuoppala
2021-02-01 14:41 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [01/57] " Patchwork
2021-02-01 19:33 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87czxj218t.fsf@gaia.fi.intel.com \
--to=mika.kuoppala@linux.intel.com \
--cc=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).