All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences
@ 2019-12-04 11:26 Chris Wilson
  2019-12-04 12:18 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev2) Patchwork
                   ` (5 more replies)
  0 siblings, 6 replies; 8+ messages in thread
From: Chris Wilson @ 2019-12-04 11:26 UTC (permalink / raw)
  To: intel-gfx

We want the bonded request to have the same scheduler properties as its
master so that it is placed at the same depth in the queue. For example,
consider we have requests A, B and B', where B & B' are a bonded pair to
run in parallel on two engines.

	A -> B
     	     \- B'

B will run after A and so may be scheduled on an idle engine and wait on
A using a semaphore. B' sees B being executed and so enters the queue on
the same engine as A. As B' did not inherit the semaphore-chain from B,
it may have higher precedence than A and so preempts execution. However,
B' then sits on a semaphore waiting for B, who is waiting for A, who is
blocked by B.

Ergo B' needs to inherit the scheduler properties from B (i.e. the
semaphore chain) so that it is scheduled with the same priority as B and
will not be executed ahead of Bs dependencies.

Furthermore, to prevent the priorities changing via the expose fence on
B', we need to couple in the dependencies for PI. This requires us to
relax our sanity-checks that dependencies are strictly in order.

Fixes: ee1136908e9b ("drm/i915/execlists: Virtual engine bonding")
Testcase: igt/gem_exec_balancer/bonded-chain
Testcase: igt/gem_exec_balancer/bonded-semaphore
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
Transfer any semaphore dependencies as well.
---
 drivers/gpu/drm/i915/i915_request.c   | 115 ++++++++++++++++++++------
 drivers/gpu/drm/i915/i915_scheduler.c |   1 -
 2 files changed, 90 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 3109b8a79b9f..e0d24977aa9b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
 }
 
 static int
-__i915_request_await_execution(struct i915_request *rq,
-			       struct i915_request *signal,
-			       void (*hook)(struct i915_request *rq,
-					    struct dma_fence *signal),
-			       gfp_t gfp)
+__await_execution(struct i915_request *rq,
+		  struct i915_request *signal,
+		  void (*hook)(struct i915_request *rq,
+			       struct dma_fence *signal),
+		  gfp_t gfp)
 {
 	struct execute_cb *cb;
 
@@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
 	}
 	spin_unlock_irq(&signal->lock);
 
+	/* Copy across semaphore status as we need the same behaviour */
+	rq->sched.flags |= signal->sched.flags;
 	return 0;
 }
 
@@ -824,31 +826,21 @@ already_busywaiting(struct i915_request *rq)
 }
 
 static int
-emit_semaphore_wait(struct i915_request *to,
-		    struct i915_request *from,
-		    gfp_t gfp)
+__emit_semaphore_wait(struct i915_request *to,
+		      struct i915_request *from,
+		      u32 seqno)
 {
 	const int has_token = INTEL_GEN(to->i915) >= 12;
 	u32 hwsp_offset;
-	int len;
+	int len, err;
 	u32 *cs;
 
 	GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
 
-	/* Just emit the first semaphore we see as request space is limited. */
-	if (already_busywaiting(to) & from->engine->mask)
-		goto await_fence;
-
-	if (i915_request_await_start(to, from) < 0)
-		goto await_fence;
-
-	/* Only submit our spinner after the signaler is running! */
-	if (__i915_request_await_execution(to, from, NULL, gfp))
-		goto await_fence;
-
 	/* We need to pin the signaler's HWSP until we are finished reading. */
-	if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
-		goto await_fence;
+	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
+	if (err)
+		return err;
 
 	len = 4;
 	if (has_token)
@@ -871,7 +863,7 @@ emit_semaphore_wait(struct i915_request *to,
 		 MI_SEMAPHORE_POLL |
 		 MI_SEMAPHORE_SAD_GTE_SDD) +
 		has_token;
-	*cs++ = from->fence.seqno;
+	*cs++ = seqno;
 	*cs++ = hwsp_offset;
 	*cs++ = 0;
 	if (has_token) {
@@ -880,6 +872,28 @@ emit_semaphore_wait(struct i915_request *to,
 	}
 
 	intel_ring_advance(to, cs);
+	return 0;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+		    struct i915_request *from,
+		    gfp_t gfp)
+{
+	/* Just emit the first semaphore we see as request space is limited. */
+	if (already_busywaiting(to) & from->engine->mask)
+		goto await_fence;
+
+	if (i915_request_await_start(to, from) < 0)
+		goto await_fence;
+
+	/* Only submit our spinner after the signaler is running! */
+	if (__await_execution(to, from, NULL, gfp))
+		goto await_fence;
+
+	if (__emit_semaphore_wait(to, from, from->fence.seqno))
+		goto await_fence;
+
 	to->sched.semaphores |= from->engine->mask;
 	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 	return 0;
@@ -993,6 +1007,58 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 	return 0;
 }
 
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+					  struct dma_fence *fence)
+{
+	return __intel_timeline_sync_is_later(tl,
+					      fence->context,
+					      fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+					 const struct dma_fence *fence)
+{
+	return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
+static int
+__i915_request_await_execution(struct i915_request *to,
+			       struct i915_request *from,
+			       void (*hook)(struct i915_request *rq,
+					    struct dma_fence *signal))
+{
+	bool has_sync;
+	int err;
+
+	/* Submit both requests at the same time */
+	err = __await_execution(to, from, hook, I915_FENCE_GFP);
+	if (err)
+		return err;
+
+	if (!to->engine->schedule)
+		return 0;
+
+	/* Squash repeated depenendices to the same timelines */
+	if (intel_timeline_sync_has_start(i915_request_timeline(to),
+					  &from->fence))
+		return 0;
+
+	/* Ensure both start together after all semaphores in signal */
+	if (from->sched.semaphores && !has_sync) {
+		err =__emit_semaphore_wait(to, from, from->fence.seqno - 1);
+		if (err)
+			return err;
+	}
+
+	/* Couple the dependency tree for PI on this exposed to->fence */
+	err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+	if (err < 0)
+		return err;
+
+	return intel_timeline_sync_set_start(i915_request_timeline(to),
+					     &from->fence);
+}
+
 int
 i915_request_await_execution(struct i915_request *rq,
 			     struct dma_fence *fence,
@@ -1026,8 +1092,7 @@ i915_request_await_execution(struct i915_request *rq,
 		if (dma_fence_is_i915(fence))
 			ret = __i915_request_await_execution(rq,
 							     to_request(fence),
-							     hook,
-							     I915_FENCE_GFP);
+							     hook);
 		else
 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
 							    I915_FENCE_TIMEOUT,
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index ad6ff52bc316..00f5d107f2b7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -492,7 +492,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 	 * so we may be called out-of-order.
 	 */
 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
-		GEM_BUG_ON(!node_signaled(dep->signaler));
 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
 		list_del(&dep->wait_link);
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev2)
  2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
@ 2019-12-04 12:18 ` Patchwork
  2019-12-04 13:33 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-12-04 12:18 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Copy across scheduler behaviour flags across submit fences (rev2)
URL   : https://patchwork.freedesktop.org/series/70107/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
d65868d85183 drm/i915: Copy across scheduler behaviour flags across submit fences
-:185: ERROR:SPACING: spaces required around that '=' (ctx:WxV)
#185: FILE: drivers/gpu/drm/i915/i915_request.c:1048:
+		err =__emit_semaphore_wait(to, from, from->fence.seqno - 1);
 		    ^

total: 1 errors, 0 warnings, 0 checks, 172 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915: Copy across scheduler behaviour flags across submit fences (rev2)
  2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
  2019-12-04 12:18 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev2) Patchwork
@ 2019-12-04 13:33 ` Patchwork
  2019-12-04 14:12 ` [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Tvrtko Ursulin
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-12-04 13:33 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Copy across scheduler behaviour flags across submit fences (rev2)
URL   : https://patchwork.freedesktop.org/series/70107/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7480 -> Patchwork_15579
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/index.html

Known issues
------------

  Here are the changes found in Patchwork_15579 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live_blt:
    - fi-ivb-3770:        [PASS][1] -> [DMESG-FAIL][2] ([i915#563])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-ivb-3770/igt@i915_selftest@live_blt.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-ivb-3770/igt@i915_selftest@live_blt.html
    - fi-bsw-nick:        [PASS][3] -> [DMESG-FAIL][4] ([i915#563])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-bsw-nick/igt@i915_selftest@live_blt.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-bsw-nick/igt@i915_selftest@live_blt.html

  * igt@i915_selftest@live_gem_contexts:
    - fi-byt-n2820:       [PASS][5] -> [INCOMPLETE][6] ([i915#45])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-byt-n2820/igt@i915_selftest@live_gem_contexts.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-byt-n2820/igt@i915_selftest@live_gem_contexts.html

  
#### Possible fixes ####

  * igt@i915_selftest@live_gem_contexts:
    - fi-skl-lmem:        [INCOMPLETE][7] ([i915#424]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-skl-lmem/igt@i915_selftest@live_gem_contexts.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-skl-lmem/igt@i915_selftest@live_gem_contexts.html
    - fi-hsw-peppy:       [INCOMPLETE][9] ([i915#694]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-hsw-peppy/igt@i915_selftest@live_gem_contexts.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-hsw-peppy/igt@i915_selftest@live_gem_contexts.html

  
#### Warnings ####

  * igt@i915_pm_rpm@module-reload:
    - fi-skl-6770hq:      [DMESG-WARN][11] ([i915#592]) -> [FAIL][12] ([i915#178])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-skl-6770hq/igt@i915_pm_rpm@module-reload.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-skl-6770hq/igt@i915_pm_rpm@module-reload.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [FAIL][13] ([fdo#111096] / [i915#323]) -> [FAIL][14] ([fdo#111407])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_flip@basic-flip-vs-modeset:
    - fi-kbl-x1275:       [DMESG-WARN][15] ([i915#62] / [i915#92]) -> [DMESG-WARN][16] ([i915#62] / [i915#92] / [i915#95]) +7 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-kbl-x1275/igt@kms_flip@basic-flip-vs-modeset.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-kbl-x1275/igt@kms_flip@basic-flip-vs-modeset.html

  * igt@kms_pipe_crc_basic@read-crc-pipe-b:
    - fi-kbl-x1275:       [DMESG-WARN][17] ([i915#62] / [i915#92] / [i915#95]) -> [DMESG-WARN][18] ([i915#62] / [i915#92]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7480/fi-kbl-x1275/igt@kms_pipe_crc_basic@read-crc-pipe-b.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/fi-kbl-x1275/igt@kms_pipe_crc_basic@read-crc-pipe-b.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111593]: https://bugs.freedesktop.org/show_bug.cgi?id=111593
  [i915#178]: https://gitlab.freedesktop.org/drm/intel/issues/178
  [i915#323]: https://gitlab.freedesktop.org/drm/intel/issues/323
  [i915#424]: https://gitlab.freedesktop.org/drm/intel/issues/424
  [i915#45]: https://gitlab.freedesktop.org/drm/intel/issues/45
  [i915#563]: https://gitlab.freedesktop.org/drm/intel/issues/563
  [i915#592]: https://gitlab.freedesktop.org/drm/intel/issues/592
  [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
  [i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
  [i915#710]: https://gitlab.freedesktop.org/drm/intel/issues/710
  [i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95


Participating hosts (50 -> 47)
------------------------------

  Additional (2): fi-hsw-4770r fi-tgl-guc 
  Missing    (5): fi-ilk-m540 fi-byt-squawks fi-bsw-cyan fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7480 -> Patchwork_15579

  CI-20190529: 20190529
  CI_DRM_7480: acd4664d865fde46dcd32c48ccac1b048b355bea @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5323: b0f877d06a78b9c38ed246be2537a0453b6c214f @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_15579: d65868d851833a8bb238c9d5bc85e7acdb1a7030 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

d65868d85183 drm/i915: Copy across scheduler behaviour flags across submit fences

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15579/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences
  2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
  2019-12-04 12:18 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev2) Patchwork
  2019-12-04 13:33 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2019-12-04 14:12 ` Tvrtko Ursulin
  2019-12-04 14:15   ` Chris Wilson
  2019-12-04 14:19 ` Chris Wilson
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 8+ messages in thread
From: Tvrtko Ursulin @ 2019-12-04 14:12 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 04/12/2019 11:26, Chris Wilson wrote:
> We want the bonded request to have the same scheduler properties as its
> master so that it is placed at the same depth in the queue. For example,
> consider we have requests A, B and B', where B & B' are a bonded pair to
> run in parallel on two engines.
> 
> 	A -> B
>       	     \- B'
> 
> B will run after A and so may be scheduled on an idle engine and wait on
> A using a semaphore. B' sees B being executed and so enters the queue on
> the same engine as A. As B' did not inherit the semaphore-chain from B,
> it may have higher precedence than A and so preempts execution. However,
> B' then sits on a semaphore waiting for B, who is waiting for A, who is
> blocked by B.
> 
> Ergo B' needs to inherit the scheduler properties from B (i.e. the
> semaphore chain) so that it is scheduled with the same priority as B and
> will not be executed ahead of Bs dependencies.
> 
> Furthermore, to prevent the priorities changing via the expose fence on
> B', we need to couple in the dependencies for PI. This requires us to
> relax our sanity-checks that dependencies are strictly in order.
> 
> Fixes: ee1136908e9b ("drm/i915/execlists: Virtual engine bonding")
> Testcase: igt/gem_exec_balancer/bonded-chain
> Testcase: igt/gem_exec_balancer/bonded-semaphore
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> Transfer any semaphore dependencies as well.
> ---
>   drivers/gpu/drm/i915/i915_request.c   | 115 ++++++++++++++++++++------
>   drivers/gpu/drm/i915/i915_scheduler.c |   1 -
>   2 files changed, 90 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 3109b8a79b9f..e0d24977aa9b 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
>   }
>   
>   static int
> -__i915_request_await_execution(struct i915_request *rq,
> -			       struct i915_request *signal,
> -			       void (*hook)(struct i915_request *rq,
> -					    struct dma_fence *signal),
> -			       gfp_t gfp)
> +__await_execution(struct i915_request *rq,
> +		  struct i915_request *signal,
> +		  void (*hook)(struct i915_request *rq,
> +			       struct dma_fence *signal),
> +		  gfp_t gfp)
>   {
>   	struct execute_cb *cb;
>   
> @@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
>   	}
>   	spin_unlock_irq(&signal->lock);
>   
> +	/* Copy across semaphore status as we need the same behaviour */
> +	rq->sched.flags |= signal->sched.flags;
>   	return 0;
>   }
>   
> @@ -824,31 +826,21 @@ already_busywaiting(struct i915_request *rq)
>   }
>   
>   static int
> -emit_semaphore_wait(struct i915_request *to,
> -		    struct i915_request *from,
> -		    gfp_t gfp)
> +__emit_semaphore_wait(struct i915_request *to,
> +		      struct i915_request *from,
> +		      u32 seqno)
>   {
>   	const int has_token = INTEL_GEN(to->i915) >= 12;
>   	u32 hwsp_offset;
> -	int len;
> +	int len, err;
>   	u32 *cs;
>   
>   	GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
>   
> -	/* Just emit the first semaphore we see as request space is limited. */
> -	if (already_busywaiting(to) & from->engine->mask)
> -		goto await_fence;
> -
> -	if (i915_request_await_start(to, from) < 0)
> -		goto await_fence;
> -
> -	/* Only submit our spinner after the signaler is running! */
> -	if (__i915_request_await_execution(to, from, NULL, gfp))
> -		goto await_fence;
> -
>   	/* We need to pin the signaler's HWSP until we are finished reading. */
> -	if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
> -		goto await_fence;
> +	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
> +	if (err)
> +		return err;
>   
>   	len = 4;
>   	if (has_token)
> @@ -871,7 +863,7 @@ emit_semaphore_wait(struct i915_request *to,
>   		 MI_SEMAPHORE_POLL |
>   		 MI_SEMAPHORE_SAD_GTE_SDD) +
>   		has_token;
> -	*cs++ = from->fence.seqno;
> +	*cs++ = seqno;
>   	*cs++ = hwsp_offset;
>   	*cs++ = 0;
>   	if (has_token) {
> @@ -880,6 +872,28 @@ emit_semaphore_wait(struct i915_request *to,
>   	}
>   
>   	intel_ring_advance(to, cs);
> +	return 0;
> +}
> +
> +static int
> +emit_semaphore_wait(struct i915_request *to,
> +		    struct i915_request *from,
> +		    gfp_t gfp)
> +{
> +	/* Just emit the first semaphore we see as request space is limited. */
> +	if (already_busywaiting(to) & from->engine->mask)
> +		goto await_fence;
> +
> +	if (i915_request_await_start(to, from) < 0)
> +		goto await_fence;
> +
> +	/* Only submit our spinner after the signaler is running! */
> +	if (__await_execution(to, from, NULL, gfp))
> +		goto await_fence;
> +
> +	if (__emit_semaphore_wait(to, from, from->fence.seqno))
> +		goto await_fence;
> +
>   	to->sched.semaphores |= from->engine->mask;
>   	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
>   	return 0;
> @@ -993,6 +1007,58 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
>   	return 0;
>   }
>   
> +static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
> +					  struct dma_fence *fence)
> +{
> +	return __intel_timeline_sync_is_later(tl,
> +					      fence->context,
> +					      fence->seqno - 1);
> +}
> +
> +static int intel_timeline_sync_set_start(struct intel_timeline *tl,
> +					 const struct dma_fence *fence)
> +{
> +	return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
> +}
> +
> +static int
> +__i915_request_await_execution(struct i915_request *to,
> +			       struct i915_request *from,
> +			       void (*hook)(struct i915_request *rq,
> +					    struct dma_fence *signal))
> +{
> +	bool has_sync;
> +	int err;
> +
> +	/* Submit both requests at the same time */
> +	err = __await_execution(to, from, hook, I915_FENCE_GFP);
> +	if (err)
> +		return err;
> +
> +	if (!to->engine->schedule)
> +		return 0;
> +
> +	/* Squash repeated depenendices to the same timelines */
> +	if (intel_timeline_sync_has_start(i915_request_timeline(to),
> +					  &from->fence))
> +		return 0;
> +
> +	/* Ensure both start together after all semaphores in signal */
> +	if (from->sched.semaphores && !has_sync) {
> +		err =__emit_semaphore_wait(to, from, from->fence.seqno - 1);

Forgot to git add something? has_sync is uninitialized.

Regards,

Tvrtko

> +		if (err)
> +			return err;
> +	}
> +
> +	/* Couple the dependency tree for PI on this exposed to->fence */
> +	err = i915_sched_node_add_dependency(&to->sched, &from->sched);
> +	if (err < 0)
> +		return err;
> +
> +	return intel_timeline_sync_set_start(i915_request_timeline(to),
> +					     &from->fence);
> +}
> +
>   int
>   i915_request_await_execution(struct i915_request *rq,
>   			     struct dma_fence *fence,
> @@ -1026,8 +1092,7 @@ i915_request_await_execution(struct i915_request *rq,
>   		if (dma_fence_is_i915(fence))
>   			ret = __i915_request_await_execution(rq,
>   							     to_request(fence),
> -							     hook,
> -							     I915_FENCE_GFP);
> +							     hook);
>   		else
>   			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
>   							    I915_FENCE_TIMEOUT,
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index ad6ff52bc316..00f5d107f2b7 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -492,7 +492,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
>   	 * so we may be called out-of-order.
>   	 */
>   	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
> -		GEM_BUG_ON(!node_signaled(dep->signaler));
>   		GEM_BUG_ON(!list_empty(&dep->dfs_link));
>   
>   		list_del(&dep->wait_link);
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences
  2019-12-04 14:12 ` [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Tvrtko Ursulin
@ 2019-12-04 14:15   ` Chris Wilson
  0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2019-12-04 14:15 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-12-04 14:12:42)
> 
> On 04/12/2019 11:26, Chris Wilson wrote:
> > We want the bonded request to have the same scheduler properties as its
> > master so that it is placed at the same depth in the queue. For example,
> > consider we have requests A, B and B', where B & B' are a bonded pair to
> > run in parallel on two engines.
> > 
> >       A -> B
> >                    \- B'
> > 
> > B will run after A and so may be scheduled on an idle engine and wait on
> > A using a semaphore. B' sees B being executed and so enters the queue on
> > the same engine as A. As B' did not inherit the semaphore-chain from B,
> > it may have higher precedence than A and so preempts execution. However,
> > B' then sits on a semaphore waiting for B, who is waiting for A, who is
> > blocked by B.
> > 
> > Ergo B' needs to inherit the scheduler properties from B (i.e. the
> > semaphore chain) so that it is scheduled with the same priority as B and
> > will not be executed ahead of Bs dependencies.
> > 
> > Furthermore, to prevent the priorities changing via the expose fence on
> > B', we need to couple in the dependencies for PI. This requires us to
> > relax our sanity-checks that dependencies are strictly in order.
> > 
> > Fixes: ee1136908e9b ("drm/i915/execlists: Virtual engine bonding")
> > Testcase: igt/gem_exec_balancer/bonded-chain
> > Testcase: igt/gem_exec_balancer/bonded-semaphore
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> > Transfer any semaphore dependencies as well.
> > ---
> >   drivers/gpu/drm/i915/i915_request.c   | 115 ++++++++++++++++++++------
> >   drivers/gpu/drm/i915/i915_scheduler.c |   1 -
> >   2 files changed, 90 insertions(+), 26 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > index 3109b8a79b9f..e0d24977aa9b 100644
> > --- a/drivers/gpu/drm/i915/i915_request.c
> > +++ b/drivers/gpu/drm/i915/i915_request.c
> > @@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
> >   }
> >   
> >   static int
> > -__i915_request_await_execution(struct i915_request *rq,
> > -                            struct i915_request *signal,
> > -                            void (*hook)(struct i915_request *rq,
> > -                                         struct dma_fence *signal),
> > -                            gfp_t gfp)
> > +__await_execution(struct i915_request *rq,
> > +               struct i915_request *signal,
> > +               void (*hook)(struct i915_request *rq,
> > +                            struct dma_fence *signal),
> > +               gfp_t gfp)
> >   {
> >       struct execute_cb *cb;
> >   
> > @@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
> >       }
> >       spin_unlock_irq(&signal->lock);
> >   
> > +     /* Copy across semaphore status as we need the same behaviour */
> > +     rq->sched.flags |= signal->sched.flags;
> >       return 0;
> >   }
> >   
> > @@ -824,31 +826,21 @@ already_busywaiting(struct i915_request *rq)
> >   }
> >   
> >   static int
> > -emit_semaphore_wait(struct i915_request *to,
> > -                 struct i915_request *from,
> > -                 gfp_t gfp)
> > +__emit_semaphore_wait(struct i915_request *to,
> > +                   struct i915_request *from,
> > +                   u32 seqno)
> >   {
> >       const int has_token = INTEL_GEN(to->i915) >= 12;
> >       u32 hwsp_offset;
> > -     int len;
> > +     int len, err;
> >       u32 *cs;
> >   
> >       GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
> >   
> > -     /* Just emit the first semaphore we see as request space is limited. */
> > -     if (already_busywaiting(to) & from->engine->mask)
> > -             goto await_fence;
> > -
> > -     if (i915_request_await_start(to, from) < 0)
> > -             goto await_fence;
> > -
> > -     /* Only submit our spinner after the signaler is running! */
> > -     if (__i915_request_await_execution(to, from, NULL, gfp))
> > -             goto await_fence;
> > -
> >       /* We need to pin the signaler's HWSP until we are finished reading. */
> > -     if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
> > -             goto await_fence;
> > +     err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
> > +     if (err)
> > +             return err;
> >   
> >       len = 4;
> >       if (has_token)
> > @@ -871,7 +863,7 @@ emit_semaphore_wait(struct i915_request *to,
> >                MI_SEMAPHORE_POLL |
> >                MI_SEMAPHORE_SAD_GTE_SDD) +
> >               has_token;
> > -     *cs++ = from->fence.seqno;
> > +     *cs++ = seqno;
> >       *cs++ = hwsp_offset;
> >       *cs++ = 0;
> >       if (has_token) {
> > @@ -880,6 +872,28 @@ emit_semaphore_wait(struct i915_request *to,
> >       }
> >   
> >       intel_ring_advance(to, cs);
> > +     return 0;
> > +}
> > +
> > +static int
> > +emit_semaphore_wait(struct i915_request *to,
> > +                 struct i915_request *from,
> > +                 gfp_t gfp)
> > +{
> > +     /* Just emit the first semaphore we see as request space is limited. */
> > +     if (already_busywaiting(to) & from->engine->mask)
> > +             goto await_fence;
> > +
> > +     if (i915_request_await_start(to, from) < 0)
> > +             goto await_fence;
> > +
> > +     /* Only submit our spinner after the signaler is running! */
> > +     if (__await_execution(to, from, NULL, gfp))
> > +             goto await_fence;
> > +
> > +     if (__emit_semaphore_wait(to, from, from->fence.seqno))
> > +             goto await_fence;
> > +
> >       to->sched.semaphores |= from->engine->mask;
> >       to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
> >       return 0;
> > @@ -993,6 +1007,58 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
> >       return 0;
> >   }
> >   
> > +static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
> > +                                       struct dma_fence *fence)
> > +{
> > +     return __intel_timeline_sync_is_later(tl,
> > +                                           fence->context,
> > +                                           fence->seqno - 1);
> > +}
> > +
> > +static int intel_timeline_sync_set_start(struct intel_timeline *tl,
> > +                                      const struct dma_fence *fence)
> > +{
> > +     return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
> > +}
> > +
> > +static int
> > +__i915_request_await_execution(struct i915_request *to,
> > +                            struct i915_request *from,
> > +                            void (*hook)(struct i915_request *rq,
> > +                                         struct dma_fence *signal))
> > +{
> > +     bool has_sync;
> > +     int err;
> > +
> > +     /* Submit both requests at the same time */
> > +     err = __await_execution(to, from, hook, I915_FENCE_GFP);
> > +     if (err)
> > +             return err;
> > +
> > +     if (!to->engine->schedule)
> > +             return 0;
> > +
> > +     /* Squash repeated depenendices to the same timelines */
> > +     if (intel_timeline_sync_has_start(i915_request_timeline(to),
> > +                                       &from->fence))
> > +             return 0;
> > +
> > +     /* Ensure both start together after all semaphores in signal */
> > +     if (from->sched.semaphores && !has_sync) {
> > +             err =__emit_semaphore_wait(to, from, from->fence.seqno - 1);
> 
> Forgot to git add something? has_sync is uninitialized.

has_sync was intel_timeline_sync_has_start, after rearranging the code
it died.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences
  2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
                   ` (2 preceding siblings ...)
  2019-12-04 14:12 ` [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Tvrtko Ursulin
@ 2019-12-04 14:19 ` Chris Wilson
  2019-12-04 20:11 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev3) Patchwork
  2019-12-04 20:34 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
  5 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2019-12-04 14:19 UTC (permalink / raw)
  To: intel-gfx

We want the bonded request to have the same scheduler properties as its
master so that it is placed at the same depth in the queue. For example,
consider we have requests A, B and B', where B & B' are a bonded pair to
run in parallel on two engines.

	A -> B
     	     \- B'

B will run after A and so may be scheduled on an idle engine and wait on
A using a semaphore. B' sees B being executed and so enters the queue on
the same engine as A. As B' did not inherit the semaphore-chain from B,
it may have higher precedence than A and so preempts execution. However,
B' then sits on a semaphore waiting for B, who is waiting for A, who is
blocked by B.

Ergo B' needs to inherit the scheduler properties from B (i.e. the
semaphore chain) so that it is scheduled with the same priority as B and
will not be executed ahead of Bs dependencies.

Furthermore, to prevent the priorities changing via the expose fence on
B', we need to couple in the dependencies for PI. This requires us to
relax our sanity-checks that dependencies are strictly in order.

Fixes: ee1136908e9b ("drm/i915/execlists: Virtual engine bonding")
Testcase: igt/gem_exec_balancer/bonded-chain
Testcase: igt/gem_exec_balancer/bonded-semaphore
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
Take two with less has_sync
---
 drivers/gpu/drm/i915/i915_request.c   | 114 ++++++++++++++++++++------
 drivers/gpu/drm/i915/i915_scheduler.c |   1 -
 2 files changed, 89 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 3109b8a79b9f..298fd1b44d7a 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
 }
 
 static int
-__i915_request_await_execution(struct i915_request *rq,
-			       struct i915_request *signal,
-			       void (*hook)(struct i915_request *rq,
-					    struct dma_fence *signal),
-			       gfp_t gfp)
+__await_execution(struct i915_request *rq,
+		  struct i915_request *signal,
+		  void (*hook)(struct i915_request *rq,
+			       struct dma_fence *signal),
+		  gfp_t gfp)
 {
 	struct execute_cb *cb;
 
@@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
 	}
 	spin_unlock_irq(&signal->lock);
 
+	/* Copy across semaphore status as we need the same behaviour */
+	rq->sched.flags |= signal->sched.flags;
 	return 0;
 }
 
@@ -824,31 +826,21 @@ already_busywaiting(struct i915_request *rq)
 }
 
 static int
-emit_semaphore_wait(struct i915_request *to,
-		    struct i915_request *from,
-		    gfp_t gfp)
+__emit_semaphore_wait(struct i915_request *to,
+		      struct i915_request *from,
+		      u32 seqno)
 {
 	const int has_token = INTEL_GEN(to->i915) >= 12;
 	u32 hwsp_offset;
-	int len;
+	int len, err;
 	u32 *cs;
 
 	GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
 
-	/* Just emit the first semaphore we see as request space is limited. */
-	if (already_busywaiting(to) & from->engine->mask)
-		goto await_fence;
-
-	if (i915_request_await_start(to, from) < 0)
-		goto await_fence;
-
-	/* Only submit our spinner after the signaler is running! */
-	if (__i915_request_await_execution(to, from, NULL, gfp))
-		goto await_fence;
-
 	/* We need to pin the signaler's HWSP until we are finished reading. */
-	if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
-		goto await_fence;
+	err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
+	if (err)
+		return err;
 
 	len = 4;
 	if (has_token)
@@ -871,7 +863,7 @@ emit_semaphore_wait(struct i915_request *to,
 		 MI_SEMAPHORE_POLL |
 		 MI_SEMAPHORE_SAD_GTE_SDD) +
 		has_token;
-	*cs++ = from->fence.seqno;
+	*cs++ = seqno;
 	*cs++ = hwsp_offset;
 	*cs++ = 0;
 	if (has_token) {
@@ -880,6 +872,28 @@ emit_semaphore_wait(struct i915_request *to,
 	}
 
 	intel_ring_advance(to, cs);
+	return 0;
+}
+
+static int
+emit_semaphore_wait(struct i915_request *to,
+		    struct i915_request *from,
+		    gfp_t gfp)
+{
+	/* Just emit the first semaphore we see as request space is limited. */
+	if (already_busywaiting(to) & from->engine->mask)
+		goto await_fence;
+
+	if (i915_request_await_start(to, from) < 0)
+		goto await_fence;
+
+	/* Only submit our spinner after the signaler is running! */
+	if (__await_execution(to, from, NULL, gfp))
+		goto await_fence;
+
+	if (__emit_semaphore_wait(to, from, from->fence.seqno))
+		goto await_fence;
+
 	to->sched.semaphores |= from->engine->mask;
 	to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 	return 0;
@@ -993,6 +1007,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 	return 0;
 }
 
+static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
+					  struct dma_fence *fence)
+{
+	return __intel_timeline_sync_is_later(tl,
+					      fence->context,
+					      fence->seqno - 1);
+}
+
+static int intel_timeline_sync_set_start(struct intel_timeline *tl,
+					 const struct dma_fence *fence)
+{
+	return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
+}
+
+static int
+__i915_request_await_execution(struct i915_request *to,
+			       struct i915_request *from,
+			       void (*hook)(struct i915_request *rq,
+					    struct dma_fence *signal))
+{
+	int err;
+
+	/* Submit both requests at the same time */
+	err = __await_execution(to, from, hook, I915_FENCE_GFP);
+	if (err)
+		return err;
+
+	if (!to->engine->schedule)
+		return 0;
+
+	/* Squash repeated depenendices to the same timelines */
+	if (intel_timeline_sync_has_start(i915_request_timeline(to),
+					  &from->fence))
+		return 0;
+
+	/* Ensure both start together after all semaphores in signal */
+	if (from->sched.semaphores) {
+		err =__emit_semaphore_wait(to, from, from->fence.seqno - 1);
+		if (err)
+			return err;
+	}
+
+	/* Couple the dependency tree for PI on this exposed to->fence */
+	err = i915_sched_node_add_dependency(&to->sched, &from->sched);
+	if (err < 0)
+		return err;
+
+	return intel_timeline_sync_set_start(i915_request_timeline(to),
+					     &from->fence);
+}
+
 int
 i915_request_await_execution(struct i915_request *rq,
 			     struct dma_fence *fence,
@@ -1026,8 +1091,7 @@ i915_request_await_execution(struct i915_request *rq,
 		if (dma_fence_is_i915(fence))
 			ret = __i915_request_await_execution(rq,
 							     to_request(fence),
-							     hook,
-							     I915_FENCE_GFP);
+							     hook);
 		else
 			ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
 							    I915_FENCE_TIMEOUT,
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index ad6ff52bc316..00f5d107f2b7 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -492,7 +492,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
 	 * so we may be called out-of-order.
 	 */
 	list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
-		GEM_BUG_ON(!node_signaled(dep->signaler));
 		GEM_BUG_ON(!list_empty(&dep->dfs_link));
 
 		list_del(&dep->wait_link);
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev3)
  2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
                   ` (3 preceding siblings ...)
  2019-12-04 14:19 ` Chris Wilson
@ 2019-12-04 20:11 ` Patchwork
  2019-12-04 20:34 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-12-04 20:11 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Copy across scheduler behaviour flags across submit fences (rev3)
URL   : https://patchwork.freedesktop.org/series/70107/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
dd51b5894071 drm/i915: Copy across scheduler behaviour flags across submit fences
-:184: ERROR:SPACING: spaces required around that '=' (ctx:WxV)
#184: FILE: drivers/gpu/drm/i915/i915_request.c:1047:
+		err =__emit_semaphore_wait(to, from, from->fence.seqno - 1);
 		    ^

total: 1 errors, 0 warnings, 0 checks, 171 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915: Copy across scheduler behaviour flags across submit fences (rev3)
  2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
                   ` (4 preceding siblings ...)
  2019-12-04 20:11 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev3) Patchwork
@ 2019-12-04 20:34 ` Patchwork
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-12-04 20:34 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Copy across scheduler behaviour flags across submit fences (rev3)
URL   : https://patchwork.freedesktop.org/series/70107/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7484 -> Patchwork_15582
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_15582 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_15582, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15582:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live_gem_contexts:
    - fi-cfl-8700k:       [PASS][1] -> [DMESG-FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html

  
Known issues
------------

  Here are the changes found in Patchwork_15582 that come from known issues:

### IGT changes ###

#### Possible fixes ####

  * igt@gem_ctx_create@basic-files:
    - {fi-tgl-u}:         [INCOMPLETE][3] ([fdo#111735]) -> [PASS][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-tgl-u/igt@gem_ctx_create@basic-files.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-tgl-u/igt@gem_ctx_create@basic-files.html

  * igt@i915_selftest@live_blt:
    - fi-hsw-4770r:       [DMESG-FAIL][5] ([i915#683]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-hsw-4770r/igt@i915_selftest@live_blt.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-hsw-4770r/igt@i915_selftest@live_blt.html

  * igt@i915_selftest@live_gem_contexts:
    - fi-byt-n2820:       [INCOMPLETE][7] ([i915#45]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-byt-n2820/igt@i915_selftest@live_gem_contexts.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-byt-n2820/igt@i915_selftest@live_gem_contexts.html

  
#### Warnings ####

  * igt@i915_selftest@live_blt:
    - fi-hsw-4770:        [DMESG-FAIL][9] ([i915#683]) -> [DMESG-FAIL][10] ([i915#563])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-hsw-4770/igt@i915_selftest@live_blt.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-hsw-4770/igt@i915_selftest@live_blt.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [FAIL][11] ([fdo#111407]) -> [FAIL][12] ([fdo#111096] / [i915#323])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_flip@basic-flip-vs-modeset:
    - fi-kbl-x1275:       [DMESG-WARN][13] ([i915#62] / [i915#92]) -> [DMESG-WARN][14] ([i915#62] / [i915#92] / [i915#95]) +6 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-kbl-x1275/igt@kms_flip@basic-flip-vs-modeset.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-kbl-x1275/igt@kms_flip@basic-flip-vs-modeset.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - fi-kbl-x1275:       [DMESG-WARN][15] ([i915#62] / [i915#92] / [i915#95]) -> [DMESG-WARN][16] ([i915#62] / [i915#92]) +8 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7484/fi-kbl-x1275/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/fi-kbl-x1275/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111735]: https://bugs.freedesktop.org/show_bug.cgi?id=111735
  [i915#323]: https://gitlab.freedesktop.org/drm/intel/issues/323
  [i915#45]: https://gitlab.freedesktop.org/drm/intel/issues/45
  [i915#563]: https://gitlab.freedesktop.org/drm/intel/issues/563
  [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
  [i915#683]: https://gitlab.freedesktop.org/drm/intel/issues/683
  [i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95


Participating hosts (54 -> 47)
------------------------------

  Missing    (7): fi-icl-1065g7 fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7484 -> Patchwork_15582

  CI-20190529: 20190529
  CI_DRM_7484: 1b2e7ab84231764361a41bbe00a4cd501d467f04 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5327: c2f7d1f79b865e5afca2a063c478b272a7362d6c @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_15582: dd51b5894071b6637da8e270d1dec61c6f9facd7 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

dd51b5894071 drm/i915: Copy across scheduler behaviour flags across submit fences

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15582/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2019-12-04 20:34 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-04 11:26 [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Chris Wilson
2019-12-04 12:18 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev2) Patchwork
2019-12-04 13:33 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2019-12-04 14:12 ` [Intel-gfx] [PATCH v2] drm/i915: Copy across scheduler behaviour flags across submit fences Tvrtko Ursulin
2019-12-04 14:15   ` Chris Wilson
2019-12-04 14:19 ` Chris Wilson
2019-12-04 20:11 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Copy across scheduler behaviour flags across submit fences (rev3) Patchwork
2019-12-04 20:34 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.