All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
@ 2020-05-12 13:22 Chris Wilson
  2020-05-12 14:23 ` [Intel-gfx] ✓ Fi.CI.BAT: success for " Patchwork
                   ` (6 more replies)
  0 siblings, 7 replies; 13+ messages in thread
From: Chris Wilson @ 2020-05-12 13:22 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

The second try at staging the transfer of the breadcrumb. In part one,
we realised we could not simply move to the second engine as we were
only holding the breadcrumb lock on the first. So in commit 6c81e21a4742
("drm/i915/gt: Stage the transfer of the virtual breadcrumb"), we
removed it from the first engine and marked up this request to reattach
the signaling on the new engine. However, this failed to take into
account that we only attach the breadcrumb if the new request is added
at the start of the queue, which if we are transferring, it is because
we know there to be a request to be signaled (and hence we would not be
attached).

In this attempt, we try to transfer the completed requests to the
irq_worker on its rq->engine->breadcrumbs. This preserves the coupling
between the rq and its breadcrumbs, so that
i915_request_cancel_breadcrumb() does not attempt to manipulate the list
under the wrong lock.

Fixes: 6c81e21a4742 ("drm/i915/gt: Stage the transfer of the virtual breadcrumb")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
v2: rewrite from scratch with a new idea
---
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  | 33 ++++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine.h       |  3 ++
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  2 ++
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 26 ++-------------
 4 files changed, 41 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cbedba857d43..e09dc162b508 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -155,6 +155,8 @@ static void signal_irq_work(struct irq_work *work)
 	if (b->irq_armed && list_empty(&b->signalers))
 		__intel_breadcrumbs_disarm_irq(b);
 
+	list_splice_init(&b->signaled_requests, &signal);
+
 	list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
 		GEM_BUG_ON(list_empty(&ce->signals));
 
@@ -255,6 +257,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 
 	spin_lock_init(&b->irq_lock);
 	INIT_LIST_HEAD(&b->signalers);
+	INIT_LIST_HEAD(&b->signaled_requests);
 
 	init_irq_work(&b->irq_work, signal_irq_work);
 }
@@ -274,6 +277,36 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 	spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
+void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
+				       struct intel_context *ce)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&b->irq_lock, flags);
+	if (!list_empty(&ce->signals)) {
+		struct i915_request *rq, *next;
+
+		list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
+			GEM_BUG_ON(rq->engine != engine);
+			GEM_BUG_ON(!i915_request_completed(rq));
+
+			clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+			if (!__dma_fence_signal(&rq->fence))
+				continue;
+
+			i915_request_get(rq);
+			list_add_tail(&rq->signal_link, &b->signaled_requests);
+		}
+
+		INIT_LIST_HEAD(&ce->signals);
+		list_del_init(&ce->signal_link);
+
+		irq_work_queue(&b->irq_work);
+	}
+	spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cb789c8bf06b..45418f887953 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -238,6 +238,9 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
+void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
+				       struct intel_context *ce);
+
 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
 				    struct drm_printer *p);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index c113b7805e65..e20b39eefd79 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -377,6 +377,8 @@ struct intel_engine_cs {
 		spinlock_t irq_lock;
 		struct list_head signalers;
 
+		struct list_head signaled_requests;
+
 		struct irq_work irq_work; /* for use from inside irq_lock */
 
 		unsigned int irq_enabled;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 15716e4d6b76..ac32d494b07d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1821,30 +1821,10 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	return true;
 }
 
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
-				     struct i915_request *rq)
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
 {
-	struct intel_engine_cs *old = ve->siblings[0];
-
 	/* All unattached (rq->engine == old) must already be completed */
-
-	spin_lock(&old->breadcrumbs.irq_lock);
-	if (!list_empty(&ve->context.signal_link)) {
-		list_del_init(&ve->context.signal_link);
-
-		/*
-		 * We cannot acquire the new engine->breadcrumbs.irq_lock
-		 * (as we are holding a breadcrumbs.irq_lock already),
-		 * so attach this request to the signaler on submission.
-		 * The queued irq_work will occur when we finally drop
-		 * the engine->active.lock after dequeue.
-		 */
-		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
-
-		/* Also transfer the pending irq_work for the old breadcrumb. */
-		intel_engine_signal_breadcrumbs(rq->engine);
-	}
-	spin_unlock(&old->breadcrumbs.irq_lock);
+	intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);
 }
 
 #define for_each_waiter(p__, rq__) \
@@ -2279,7 +2259,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 									engine);
 
 				if (!list_empty(&ve->context.signals))
-					virtual_xfer_breadcrumbs(ve, rq);
+					virtual_xfer_breadcrumbs(ve);
 
 				/*
 				 * Move the bound engine to the top of the list
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
@ 2020-05-12 14:23 ` Patchwork
  2020-05-12 15:17 ` [Intel-gfx] [PATCH v2] " Tvrtko Ursulin
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-05-12 14:23 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
URL   : https://patchwork.freedesktop.org/series/77191/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_8470 -> Patchwork_17633
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/index.html

Known issues
------------

  Here are the changes found in Patchwork_17633 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live@gt_engines:
    - fi-bwr-2160:        [PASS][1] -> [INCOMPLETE][2] ([i915#489])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bwr-2160/igt@i915_selftest@live@gt_engines.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/fi-bwr-2160/igt@i915_selftest@live@gt_engines.html

  
#### Possible fixes ####

  * igt@i915_selftest@live@active:
    - fi-apl-guc:         [DMESG-FAIL][3] ([i915#666]) -> [PASS][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-apl-guc/igt@i915_selftest@live@active.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/fi-apl-guc/igt@i915_selftest@live@active.html

  
#### Warnings ####

  * igt@i915_pm_rpm@module-reload:
    - fi-kbl-x1275:       [FAIL][5] ([i915#62]) -> [SKIP][6] ([fdo#109271])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-x1275/igt@i915_pm_rpm@module-reload.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/fi-kbl-x1275/igt@i915_pm_rpm@module-reload.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#1803]: https://gitlab.freedesktop.org/drm/intel/issues/1803
  [i915#489]: https://gitlab.freedesktop.org/drm/intel/issues/489
  [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
  [i915#666]: https://gitlab.freedesktop.org/drm/intel/issues/666


Participating hosts (49 -> 43)
------------------------------

  Missing    (6): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-kbl-7560u fi-byt-clapper 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8470 -> Patchwork_17633

  CI-20190529: 20190529
  CI_DRM_8470: d2c5ae86eac811c49f2fe22c4fa02b6e6d31cd67 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5651: e54e2642f1967ca3c488db32264607df670d1dfb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_17633: d0a8dd622ee47a4a2010d9166902c5638e362c14 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

d0a8dd622ee4 drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
  2020-05-12 14:23 ` [Intel-gfx] ✓ Fi.CI.BAT: success for " Patchwork
@ 2020-05-12 15:17 ` Tvrtko Ursulin
  2020-05-12 15:52   ` Chris Wilson
  2020-05-12 16:50 ` [Intel-gfx] [PATCH v3] " Chris Wilson
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Tvrtko Ursulin @ 2020-05-12 15:17 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 12/05/2020 14:22, Chris Wilson wrote:
> The second try at staging the transfer of the breadcrumb. In part one,
> we realised we could not simply move to the second engine as we were
> only holding the breadcrumb lock on the first. So in commit 6c81e21a4742
> ("drm/i915/gt: Stage the transfer of the virtual breadcrumb"), we
> removed it from the first engine and marked up this request to reattach
> the signaling on the new engine. However, this failed to take into
> account that we only attach the breadcrumb if the new request is added
> at the start of the queue, which if we are transferring, it is because
> we know there to be a request to be signaled (and hence we would not be
> attached).
> 
> In this attempt, we try to transfer the completed requests to the
> irq_worker on its rq->engine->breadcrumbs. This preserves the coupling
> between the rq and its breadcrumbs, so that
> i915_request_cancel_breadcrumb() does not attempt to manipulate the list
> under the wrong lock.
> 
> Fixes: 6c81e21a4742 ("drm/i915/gt: Stage the transfer of the virtual breadcrumb")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> v2: rewrite from scratch with a new idea
> ---
>   drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  | 33 ++++++++++++++++++++
>   drivers/gpu/drm/i915/gt/intel_engine.h       |  3 ++
>   drivers/gpu/drm/i915/gt/intel_engine_types.h |  2 ++
>   drivers/gpu/drm/i915/gt/intel_lrc.c          | 26 ++-------------
>   4 files changed, 41 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
> index cbedba857d43..e09dc162b508 100644
> --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
> @@ -155,6 +155,8 @@ static void signal_irq_work(struct irq_work *work)
>   	if (b->irq_armed && list_empty(&b->signalers))
>   		__intel_breadcrumbs_disarm_irq(b);
>   
> +	list_splice_init(&b->signaled_requests, &signal);
> +
>   	list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
>   		GEM_BUG_ON(list_empty(&ce->signals));
>   
> @@ -255,6 +257,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
>   
>   	spin_lock_init(&b->irq_lock);
>   	INIT_LIST_HEAD(&b->signalers);
> +	INIT_LIST_HEAD(&b->signaled_requests);
>   
>   	init_irq_work(&b->irq_work, signal_irq_work);
>   }
> @@ -274,6 +277,36 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
>   	spin_unlock_irqrestore(&b->irq_lock, flags);
>   }
>   
> +void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
> +				       struct intel_context *ce)
> +{
> +	struct intel_breadcrumbs *b = &engine->breadcrumbs;
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&b->irq_lock, flags);
> +	if (!list_empty(&ce->signals)) {
> +		struct i915_request *rq, *next;
> +
> +		list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
> +			GEM_BUG_ON(rq->engine != engine);
> +			GEM_BUG_ON(!i915_request_completed(rq));

Do you remember why the breadcrumbs code uses local __request_completed 
helper?

 From here vvv

> +
> +			clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
> +			if (!__dma_fence_signal(&rq->fence))
> +				continue;
> +
> +			i915_request_get(rq);
> +			list_add_tail(&rq->signal_link, &b->signaled_requests);

^^^ to here looks like a block which could be shared with signal_irq_work.

> +		}
> +
> +		INIT_LIST_HEAD(&ce->signals);

Hm because list_add and not list_move you can't assert all have been 
unlinked.

> +		list_del_init(&ce->signal_link);
> +
> +		irq_work_queue(&b->irq_work);
> +	}
> +	spin_unlock_irqrestore(&b->irq_lock, flags);
> +}
> +
>   void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
>   {
>   }
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> index cb789c8bf06b..45418f887953 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> @@ -238,6 +238,9 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
>   void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
>   void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
>   
> +void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
> +				       struct intel_context *ce);
> +
>   void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
>   				    struct drm_printer *p);
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index c113b7805e65..e20b39eefd79 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -377,6 +377,8 @@ struct intel_engine_cs {
>   		spinlock_t irq_lock;
>   		struct list_head signalers;
>   
> +		struct list_head signaled_requests;
> +
>   		struct irq_work irq_work; /* for use from inside irq_lock */
>   
>   		unsigned int irq_enabled;
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 15716e4d6b76..ac32d494b07d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1821,30 +1821,10 @@ static bool virtual_matches(const struct virtual_engine *ve,
>   	return true;
>   }
>   
> -static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
> -				     struct i915_request *rq)
> +static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
>   {
> -	struct intel_engine_cs *old = ve->siblings[0];
> -
>   	/* All unattached (rq->engine == old) must already be completed */

This comments feels a bit out of place now.

> -
> -	spin_lock(&old->breadcrumbs.irq_lock);
> -	if (!list_empty(&ve->context.signal_link)) {
> -		list_del_init(&ve->context.signal_link);
> -
> -		/*
> -		 * We cannot acquire the new engine->breadcrumbs.irq_lock
> -		 * (as we are holding a breadcrumbs.irq_lock already),
> -		 * so attach this request to the signaler on submission.
> -		 * The queued irq_work will occur when we finally drop
> -		 * the engine->active.lock after dequeue.
> -		 */
> -		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
> -
> -		/* Also transfer the pending irq_work for the old breadcrumb. */
> -		intel_engine_signal_breadcrumbs(rq->engine);
> -	}
> -	spin_unlock(&old->breadcrumbs.irq_lock);
> +	intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);

But isn't ve->siblings[0] the old engine at this point so new target 
engine would have to be explicitly passed in?

>   }
>   
>   #define for_each_waiter(p__, rq__) \
> @@ -2279,7 +2259,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   									engine);
>   
>   				if (!list_empty(&ve->context.signals))
> -					virtual_xfer_breadcrumbs(ve, rq);
> +					virtual_xfer_breadcrumbs(ve);
>   
>   				/*
>   				 * Move the bound engine to the top of the list
> 

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 15:17 ` [Intel-gfx] [PATCH v2] " Tvrtko Ursulin
@ 2020-05-12 15:52   ` Chris Wilson
  2020-05-12 16:07     ` Tvrtko Ursulin
  0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2020-05-12 15:52 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-05-12 16:17:30)
> 
> On 12/05/2020 14:22, Chris Wilson wrote:
> > The second try at staging the transfer of the breadcrumb. In part one,
> > we realised we could not simply move to the second engine as we were
> > only holding the breadcrumb lock on the first. So in commit 6c81e21a4742
> > ("drm/i915/gt: Stage the transfer of the virtual breadcrumb"), we
> > removed it from the first engine and marked up this request to reattach
> > the signaling on the new engine. However, this failed to take into
> > account that we only attach the breadcrumb if the new request is added
> > at the start of the queue, which if we are transferring, it is because
> > we know there to be a request to be signaled (and hence we would not be
> > attached).
> > 
> > In this attempt, we try to transfer the completed requests to the
> > irq_worker on its rq->engine->breadcrumbs. This preserves the coupling
> > between the rq and its breadcrumbs, so that
> > i915_request_cancel_breadcrumb() does not attempt to manipulate the list
> > under the wrong lock.
> > 
> > Fixes: 6c81e21a4742 ("drm/i915/gt: Stage the transfer of the virtual breadcrumb")
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> > v2: rewrite from scratch with a new idea
> > ---
> >   drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  | 33 ++++++++++++++++++++
> >   drivers/gpu/drm/i915/gt/intel_engine.h       |  3 ++
> >   drivers/gpu/drm/i915/gt/intel_engine_types.h |  2 ++
> >   drivers/gpu/drm/i915/gt/intel_lrc.c          | 26 ++-------------
> >   4 files changed, 41 insertions(+), 23 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
> > index cbedba857d43..e09dc162b508 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
> > @@ -155,6 +155,8 @@ static void signal_irq_work(struct irq_work *work)
> >       if (b->irq_armed && list_empty(&b->signalers))
> >               __intel_breadcrumbs_disarm_irq(b);
> >   
> > +     list_splice_init(&b->signaled_requests, &signal);
> > +
> >       list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
> >               GEM_BUG_ON(list_empty(&ce->signals));
> >   
> > @@ -255,6 +257,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
> >   
> >       spin_lock_init(&b->irq_lock);
> >       INIT_LIST_HEAD(&b->signalers);
> > +     INIT_LIST_HEAD(&b->signaled_requests);
> >   
> >       init_irq_work(&b->irq_work, signal_irq_work);
> >   }
> > @@ -274,6 +277,36 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
> >       spin_unlock_irqrestore(&b->irq_lock, flags);
> >   }
> >   
> > +void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
> > +                                    struct intel_context *ce)
> > +{
> > +     struct intel_breadcrumbs *b = &engine->breadcrumbs;
> > +     unsigned long flags;
> > +
> > +     spin_lock_irqsave(&b->irq_lock, flags);
> > +     if (!list_empty(&ce->signals)) {
> > +             struct i915_request *rq, *next;
> > +
> > +             list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
> > +                     GEM_BUG_ON(rq->engine != engine);
> > +                     GEM_BUG_ON(!i915_request_completed(rq));
> 
> Do you remember why the breadcrumbs code uses local __request_completed 
> helper?

It knows the hwsp isn't going to disappear, we know the same here, just
this code was first written in intel_lrc.c

Fwiw, the rcu_read_lock() we have in i915_request_completed() is one of
our worst lockdep hotspots

>  From here vvv
> 
> > +
> > +                     clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
> > +                     if (!__dma_fence_signal(&rq->fence))
> > +                             continue;
> > +
> > +                     i915_request_get(rq);
> > +                     list_add_tail(&rq->signal_link, &b->signaled_requests);
> 
> ^^^ to here looks like a block which could be shared with signal_irq_work.

And not even a suggestion of a function name.

> > +             }
> > +
> > +             INIT_LIST_HEAD(&ce->signals);
> 
> Hm because list_add and not list_move you can't assert all have been 
> unlinked.

Which is the point, we don't want to have to repeatedly do the same
unlinks when we can do them en masse.

> > +             list_del_init(&ce->signal_link);
> > +
> > +             irq_work_queue(&b->irq_work);
> > +     }
> > +     spin_unlock_irqrestore(&b->irq_lock, flags);
> > +}
> > +
> >   void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
> >   {
> >   }
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> > index cb789c8bf06b..45418f887953 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> > @@ -238,6 +238,9 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
> >   void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
> >   void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
> >   
> > +void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
> > +                                    struct intel_context *ce);
> > +
> >   void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
> >                                   struct drm_printer *p);
> >   
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > index c113b7805e65..e20b39eefd79 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > @@ -377,6 +377,8 @@ struct intel_engine_cs {
> >               spinlock_t irq_lock;
> >               struct list_head signalers;
> >   
> > +             struct list_head signaled_requests;
> > +
> >               struct irq_work irq_work; /* for use from inside irq_lock */
> >   
> >               unsigned int irq_enabled;
> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > index 15716e4d6b76..ac32d494b07d 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > @@ -1821,30 +1821,10 @@ static bool virtual_matches(const struct virtual_engine *ve,
> >       return true;
> >   }
> >   
> > -static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
> > -                                  struct i915_request *rq)
> > +static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
> >   {
> > -     struct intel_engine_cs *old = ve->siblings[0];
> > -
> >       /* All unattached (rq->engine == old) must already be completed */
> 
> This comments feels a bit out of place now.

It's still true, just phrasing is hard.

> > -
> > -     spin_lock(&old->breadcrumbs.irq_lock);
> > -     if (!list_empty(&ve->context.signal_link)) {
> > -             list_del_init(&ve->context.signal_link);
> > -
> > -             /*
> > -              * We cannot acquire the new engine->breadcrumbs.irq_lock
> > -              * (as we are holding a breadcrumbs.irq_lock already),
> > -              * so attach this request to the signaler on submission.
> > -              * The queued irq_work will occur when we finally drop
> > -              * the engine->active.lock after dequeue.
> > -              */
> > -             set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
> > -
> > -             /* Also transfer the pending irq_work for the old breadcrumb. */
> > -             intel_engine_signal_breadcrumbs(rq->engine);
> > -     }
> > -     spin_unlock(&old->breadcrumbs.irq_lock);
> > +     intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);
> 
> But isn't ve->siblings[0] the old engine at this point so new target 
> engine would have to be explicitly passed in?

ve->siblings[0] is the old engine, which is holding the completed
requests and their signals. Since their rq->engine == ve->siblings[0]
and we can't update rq->engine as we can't take the required locks, we
need to keep the breadcrumbs relative to ve->siblings[0] and not the new
engine (the i915_request_cancel_breadcrumb conundrum).
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 15:52   ` Chris Wilson
@ 2020-05-12 16:07     ` Tvrtko Ursulin
  2020-05-12 16:20       ` Chris Wilson
  0 siblings, 1 reply; 13+ messages in thread
From: Tvrtko Ursulin @ 2020-05-12 16:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 12/05/2020 16:52, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2020-05-12 16:17:30)
>>
>> On 12/05/2020 14:22, Chris Wilson wrote:
>>> The second try at staging the transfer of the breadcrumb. In part one,
>>> we realised we could not simply move to the second engine as we were
>>> only holding the breadcrumb lock on the first. So in commit 6c81e21a4742
>>> ("drm/i915/gt: Stage the transfer of the virtual breadcrumb"), we
>>> removed it from the first engine and marked up this request to reattach
>>> the signaling on the new engine. However, this failed to take into
>>> account that we only attach the breadcrumb if the new request is added
>>> at the start of the queue, which if we are transferring, it is because
>>> we know there to be a request to be signaled (and hence we would not be
>>> attached).
>>>
>>> In this attempt, we try to transfer the completed requests to the
>>> irq_worker on its rq->engine->breadcrumbs. This preserves the coupling
>>> between the rq and its breadcrumbs, so that
>>> i915_request_cancel_breadcrumb() does not attempt to manipulate the list
>>> under the wrong lock.
>>>
>>> Fixes: 6c81e21a4742 ("drm/i915/gt: Stage the transfer of the virtual breadcrumb")
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>> ---
>>> v2: rewrite from scratch with a new idea
>>> ---
>>>    drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  | 33 ++++++++++++++++++++
>>>    drivers/gpu/drm/i915/gt/intel_engine.h       |  3 ++
>>>    drivers/gpu/drm/i915/gt/intel_engine_types.h |  2 ++
>>>    drivers/gpu/drm/i915/gt/intel_lrc.c          | 26 ++-------------
>>>    4 files changed, 41 insertions(+), 23 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
>>> index cbedba857d43..e09dc162b508 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
>>> @@ -155,6 +155,8 @@ static void signal_irq_work(struct irq_work *work)
>>>        if (b->irq_armed && list_empty(&b->signalers))
>>>                __intel_breadcrumbs_disarm_irq(b);
>>>    
>>> +     list_splice_init(&b->signaled_requests, &signal);
>>> +
>>>        list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
>>>                GEM_BUG_ON(list_empty(&ce->signals));
>>>    
>>> @@ -255,6 +257,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
>>>    
>>>        spin_lock_init(&b->irq_lock);
>>>        INIT_LIST_HEAD(&b->signalers);
>>> +     INIT_LIST_HEAD(&b->signaled_requests);
>>>    
>>>        init_irq_work(&b->irq_work, signal_irq_work);
>>>    }
>>> @@ -274,6 +277,36 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
>>>        spin_unlock_irqrestore(&b->irq_lock, flags);
>>>    }
>>>    
>>> +void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
>>> +                                    struct intel_context *ce)
>>> +{
>>> +     struct intel_breadcrumbs *b = &engine->breadcrumbs;
>>> +     unsigned long flags;
>>> +
>>> +     spin_lock_irqsave(&b->irq_lock, flags);
>>> +     if (!list_empty(&ce->signals)) {
>>> +             struct i915_request *rq, *next;
>>> +
>>> +             list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
>>> +                     GEM_BUG_ON(rq->engine != engine);
>>> +                     GEM_BUG_ON(!i915_request_completed(rq));
>>
>> Do you remember why the breadcrumbs code uses local __request_completed
>> helper?
> 
> It knows the hwsp isn't going to disappear, we know the same here, just
> this code was first written in intel_lrc.c
> 
> Fwiw, the rcu_read_lock() we have in i915_request_completed() is one of
> our worst lockdep hotspots
> 
>>   From here vvv
>>
>>> +
>>> +                     clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
>>> +                     if (!__dma_fence_signal(&rq->fence))
>>> +                             continue;
>>> +
>>> +                     i915_request_get(rq);
>>> +                     list_add_tail(&rq->signal_link, &b->signaled_requests);
>>
>> ^^^ to here looks like a block which could be shared with signal_irq_work.
> 
> And not even a suggestion of a function name.

I expected you'd say it's not worth the hassle so I did not bother. :)

> 
>>> +             }
>>> +
>>> +             INIT_LIST_HEAD(&ce->signals);
>>
>> Hm because list_add and not list_move you can't assert all have been
>> unlinked.
> 
> Which is the point, we don't want to have to repeatedly do the same
> unlinks when we can do them en masse.

I know but being sure of the state would be reassuring. There is one 
skip in the loop before list_add_tail.

> 
>>> +             list_del_init(&ce->signal_link);
>>> +
>>> +             irq_work_queue(&b->irq_work);
>>> +     }
>>> +     spin_unlock_irqrestore(&b->irq_lock, flags);
>>> +}
>>> +
>>>    void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
>>>    {
>>>    }
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
>>> index cb789c8bf06b..45418f887953 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_engine.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
>>> @@ -238,6 +238,9 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
>>>    void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
>>>    void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
>>>    
>>> +void intel_engine_transfer_breadcrumbs(struct intel_engine_cs *engine,
>>> +                                    struct intel_context *ce);
>>> +
>>>    void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
>>>                                    struct drm_printer *p);
>>>    
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
>>> index c113b7805e65..e20b39eefd79 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
>>> @@ -377,6 +377,8 @@ struct intel_engine_cs {
>>>                spinlock_t irq_lock;
>>>                struct list_head signalers;
>>>    
>>> +             struct list_head signaled_requests;
>>> +
>>>                struct irq_work irq_work; /* for use from inside irq_lock */
>>>    
>>>                unsigned int irq_enabled;
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
>>> index 15716e4d6b76..ac32d494b07d 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
>>> @@ -1821,30 +1821,10 @@ static bool virtual_matches(const struct virtual_engine *ve,
>>>        return true;
>>>    }
>>>    
>>> -static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
>>> -                                  struct i915_request *rq)
>>> +static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
>>>    {
>>> -     struct intel_engine_cs *old = ve->siblings[0];
>>> -
>>>        /* All unattached (rq->engine == old) must already be completed */
>>
>> This comments feels a bit out of place now.
> 
> It's still true, just phrasing is hard.
> 
>>> -
>>> -     spin_lock(&old->breadcrumbs.irq_lock);
>>> -     if (!list_empty(&ve->context.signal_link)) {
>>> -             list_del_init(&ve->context.signal_link);
>>> -
>>> -             /*
>>> -              * We cannot acquire the new engine->breadcrumbs.irq_lock
>>> -              * (as we are holding a breadcrumbs.irq_lock already),
>>> -              * so attach this request to the signaler on submission.
>>> -              * The queued irq_work will occur when we finally drop
>>> -              * the engine->active.lock after dequeue.
>>> -              */
>>> -             set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
>>> -
>>> -             /* Also transfer the pending irq_work for the old breadcrumb. */
>>> -             intel_engine_signal_breadcrumbs(rq->engine);
>>> -     }
>>> -     spin_unlock(&old->breadcrumbs.irq_lock);
>>> +     intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);
>>
>> But isn't ve->siblings[0] the old engine at this point so new target
>> engine would have to be explicitly passed in?
> 
> ve->siblings[0] is the old engine, which is holding the completed
> requests and their signals. Since their rq->engine == ve->siblings[0]
> and we can't update rq->engine as we can't take the required locks, we
> need to keep the breadcrumbs relative to ve->siblings[0] and not the new
> engine (the i915_request_cancel_breadcrumb conundrum).

Who then enables breadcrumbs on the new engine?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 16:07     ` Tvrtko Ursulin
@ 2020-05-12 16:20       ` Chris Wilson
  2020-05-13  8:10         ` Tvrtko Ursulin
  0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2020-05-12 16:20 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-05-12 17:07:23)
> 
> On 12/05/2020 16:52, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2020-05-12 16:17:30)
> >>
> >> On 12/05/2020 14:22, Chris Wilson wrote:
> >>> -     spin_lock(&old->breadcrumbs.irq_lock);
> >>> -     if (!list_empty(&ve->context.signal_link)) {
> >>> -             list_del_init(&ve->context.signal_link);
> >>> -
> >>> -             /*
> >>> -              * We cannot acquire the new engine->breadcrumbs.irq_lock
> >>> -              * (as we are holding a breadcrumbs.irq_lock already),
> >>> -              * so attach this request to the signaler on submission.
> >>> -              * The queued irq_work will occur when we finally drop
> >>> -              * the engine->active.lock after dequeue.
> >>> -              */
> >>> -             set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
> >>> -
> >>> -             /* Also transfer the pending irq_work for the old breadcrumb. */
> >>> -             intel_engine_signal_breadcrumbs(rq->engine);
> >>> -     }
> >>> -     spin_unlock(&old->breadcrumbs.irq_lock);
> >>> +     intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);
> >>
> >> But isn't ve->siblings[0] the old engine at this point so new target
> >> engine would have to be explicitly passed in?
> > 
> > ve->siblings[0] is the old engine, which is holding the completed
> > requests and their signals. Since their rq->engine == ve->siblings[0]
> > and we can't update rq->engine as we can't take the required locks, we
> > need to keep the breadcrumbs relative to ve->siblings[0] and not the new
> > engine (the i915_request_cancel_breadcrumb conundrum).
> 
> Who then enables breadcrumbs on the new engine?

If we enable signaling on the request we are about to submit, that
will enable the breadcrumbs on the new engine. We've cleared out
ce->signals and removed it from the old engine, so as soon as we want to
enable a new breadcrumb, it will be attached to the [new] rq->engine.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH v3] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
  2020-05-12 14:23 ` [Intel-gfx] ✓ Fi.CI.BAT: success for " Patchwork
  2020-05-12 15:17 ` [Intel-gfx] [PATCH v2] " Tvrtko Ursulin
@ 2020-05-12 16:50 ` Chris Wilson
  2020-05-12 17:30 ` [Intel-gfx] ✓ Fi.CI.IGT: success for " Patchwork
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2020-05-12 16:50 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

The second try at staging the transfer of the breadcrumb. In part one,
we realised we could not simply move to the second engine as we were
only holding the breadcrumb lock on the first. So in commit 6c81e21a4742
("drm/i915/gt: Stage the transfer of the virtual breadcrumb"), we
removed it from the first engine and marked up this request to reattach
the signaling on the new engine. However, this failed to take into
account that we only attach the breadcrumb if the new request is added
at the start of the queue, which if we are transferring, it is because
we know there to be a request to be signaled (and hence we would not be
attached).

In this attempt, we try to transfer the completed requests to the
irq_worker on its rq->engine->breadcrumbs. This preserves the coupling
between the rq and its breadcrumbs, so that
i915_request_cancel_breadcrumb() does not attempt to manipulate the list
under the wrong lock.

v2: Code sharing is fun.

Fixes: 6c81e21a4742 ("drm/i915/gt: Stage the transfer of the virtual breadcrumb")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  | 55 +++++++++++++++-----
 drivers/gpu/drm/i915/gt/intel_engine.h       |  3 ++
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  2 +
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 34 ++++--------
 4 files changed, 58 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cbedba857d43..1fbffacc2eaa 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -142,6 +142,18 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
 	intel_engine_add_retire(engine, tl);
 }
 
+static void __signal_request(struct i915_request *rq, struct list_head *signals)
+{
+	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+	clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+	if (!__dma_fence_signal(&rq->fence))
+		return;
+
+	i915_request_get(rq);
+	list_add_tail(&rq->signal_link, signals);
+}
+
 static void signal_irq_work(struct irq_work *work)
 {
 	struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
@@ -155,6 +167,8 @@ static void signal_irq_work(struct irq_work *work)
 	if (b->irq_armed && list_empty(&b->signalers))
 		__intel_breadcrumbs_disarm_irq(b);
 
+	list_splice_init(&b->signaled_requests, &signal);
+
 	list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
 		GEM_BUG_ON(list_empty(&ce->signals));
 
@@ -164,23 +178,13 @@ static void signal_irq_work(struct irq_work *work)
 
 			GEM_BUG_ON(!check_signal_order(ce, rq));
 
-			if (!__request_completed(rq))
-				break;
-
-			GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
-					     &rq->fence.flags));
-			clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
-			if (!__dma_fence_signal(&rq->fence))
-				continue;
-
 			/*
 			 * Queue for execution after dropping the signaling
 			 * spinlock as the callback chain may end up adding
 			 * more signalers to the same context or engine.
 			 */
-			i915_request_get(rq);
-			list_add_tail(&rq->signal_link, &signal);
+			if (__request_completed(rq))
+				__signal_request(rq, &signal);
 		}
 
 		/*
@@ -255,6 +259,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 
 	spin_lock_init(&b->irq_lock);
 	INIT_LIST_HEAD(&b->signalers);
+	INIT_LIST_HEAD(&b->signaled_requests);
 
 	init_irq_work(&b->irq_work, signal_irq_work);
 }
@@ -274,6 +279,32 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 	spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+					     struct intel_context *ce)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&b->irq_lock, flags);
+	if (!list_empty(&ce->signals)) {
+		struct i915_request *rq, *next;
+
+		/* Queue for executing the signal callbacks in the irq_work */
+		list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
+			GEM_BUG_ON(rq->engine != engine);
+			GEM_BUG_ON(!__request_completed(rq));
+
+			__signal_request(rq, &b->signaled_requests);
+		}
+
+		INIT_LIST_HEAD(&ce->signals);
+		list_del_init(&ce->signal_link);
+
+		irq_work_queue(&b->irq_work);
+	}
+	spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cb789c8bf06b..9bf6d4989968 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -238,6 +238,9 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+					     struct intel_context *ce);
+
 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
 				    struct drm_printer *p);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index c113b7805e65..e20b39eefd79 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -377,6 +377,8 @@ struct intel_engine_cs {
 		spinlock_t irq_lock;
 		struct list_head signalers;
 
+		struct list_head signaled_requests;
+
 		struct irq_work irq_work; /* for use from inside irq_lock */
 
 		unsigned int irq_enabled;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 15716e4d6b76..3d0e0894c015 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1821,30 +1821,16 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	return true;
 }
 
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
-				     struct i915_request *rq)
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
 {
-	struct intel_engine_cs *old = ve->siblings[0];
-
-	/* All unattached (rq->engine == old) must already be completed */
-
-	spin_lock(&old->breadcrumbs.irq_lock);
-	if (!list_empty(&ve->context.signal_link)) {
-		list_del_init(&ve->context.signal_link);
-
-		/*
-		 * We cannot acquire the new engine->breadcrumbs.irq_lock
-		 * (as we are holding a breadcrumbs.irq_lock already),
-		 * so attach this request to the signaler on submission.
-		 * The queued irq_work will occur when we finally drop
-		 * the engine->active.lock after dequeue.
-		 */
-		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
-
-		/* Also transfer the pending irq_work for the old breadcrumb. */
-		intel_engine_signal_breadcrumbs(rq->engine);
-	}
-	spin_unlock(&old->breadcrumbs.irq_lock);
+	/*
+	 * All the outstanding signals on ve->siblings[0] must have
+	 * been completed, just pending the interrupt handler. As those
+	 * signals still refer to the old sibling (via rq->engine), we must
+	 * transfer those to the old irq_worker to keep our locking
+	 * consistent.
+	 */
+	intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
 }
 
 #define for_each_waiter(p__, rq__) \
@@ -2279,7 +2265,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 									engine);
 
 				if (!list_empty(&ve->context.signals))
-					virtual_xfer_breadcrumbs(ve, rq);
+					virtual_xfer_breadcrumbs(ve);
 
 				/*
 				 * Move the bound engine to the top of the list
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
                   ` (2 preceding siblings ...)
  2020-05-12 16:50 ` [Intel-gfx] [PATCH v3] " Chris Wilson
@ 2020-05-12 17:30 ` Patchwork
  2020-05-12 17:36 ` [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev2) Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-05-12 17:30 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
URL   : https://patchwork.freedesktop.org/series/77191/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_8470_full -> Patchwork_17633_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_17633_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_softpin@noreloc-s3:
    - shard-apl:          [PASS][1] -> [DMESG-WARN][2] ([i915#180]) +1 similar issue
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-apl3/igt@gem_softpin@noreloc-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-apl6/igt@gem_softpin@noreloc-s3.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-iclb:         [PASS][3] -> [FAIL][4] ([i915#454])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-iclb5/igt@i915_pm_dc@dc6-psr.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-iclb4/igt@i915_pm_dc@dc6-psr.html

  * igt@i915_suspend@forcewake:
    - shard-kbl:          [PASS][5] -> [DMESG-WARN][6] ([i915#180]) +1 similar issue
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-kbl3/igt@i915_suspend@forcewake.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-kbl6/igt@i915_suspend@forcewake.html

  * igt@kms_cursor_legacy@pipe-b-torture-bo:
    - shard-tglb:         [PASS][7] -> [DMESG-WARN][8] ([i915#128])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-tglb5/igt@kms_cursor_legacy@pipe-b-torture-bo.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-tglb1/igt@kms_cursor_legacy@pipe-b-torture-bo.html

  * igt@kms_hdr@bpc-switch-dpms:
    - shard-skl:          [PASS][9] -> [FAIL][10] ([i915#1188])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-skl3/igt@kms_hdr@bpc-switch-dpms.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-skl5/igt@kms_hdr@bpc-switch-dpms.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][11] -> [FAIL][12] ([fdo#108145] / [i915#265]) +1 similar issue
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-skl3/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_psr2_su@frontbuffer:
    - shard-iclb:         [PASS][13] -> [SKIP][14] ([fdo#109642] / [fdo#111068])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-iclb2/igt@kms_psr2_su@frontbuffer.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-iclb7/igt@kms_psr2_su@frontbuffer.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][15] -> [SKIP][16] ([fdo#109441])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-iclb1/igt@kms_psr@psr2_cursor_render.html

  
#### Possible fixes ####

  * igt@gem_workarounds@suspend-resume-context:
    - shard-kbl:          [DMESG-WARN][17] ([i915#180]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-kbl6/igt@gem_workarounds@suspend-resume-context.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-kbl7/igt@gem_workarounds@suspend-resume-context.html

  * igt@gen9_exec_parse@allowed-all:
    - shard-apl:          [DMESG-WARN][19] ([i915#1436] / [i915#716]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-apl3/igt@gen9_exec_parse@allowed-all.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-apl8/igt@gen9_exec_parse@allowed-all.html

  * igt@i915_selftest@live@gt_pm:
    - shard-apl:          [INCOMPLETE][21] ([i915#1812]) -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-apl2/igt@i915_selftest@live@gt_pm.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-apl7/igt@i915_selftest@live@gt_pm.html

  * igt@kms_cursor_legacy@pipe-c-torture-bo:
    - shard-hsw:          [DMESG-WARN][23] ([i915#128]) -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-hsw6/igt@kms_cursor_legacy@pipe-c-torture-bo.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-hsw1/igt@kms_cursor_legacy@pipe-c-torture-bo.html

  * {igt@kms_flip@flip-vs-suspend-interruptible@c-dp1}:
    - shard-apl:          [DMESG-WARN][25] ([i915#180]) -> [PASS][26] +6 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible@c-dp1.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-apl6/igt@kms_flip@flip-vs-suspend-interruptible@c-dp1.html

  * {igt@kms_flip@plain-flip-ts-check@a-hdmi-a1}:
    - shard-glk:          [FAIL][27] ([i915#34]) -> [PASS][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-glk8/igt@kms_flip@plain-flip-ts-check@a-hdmi-a1.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-glk8/igt@kms_flip@plain-flip-ts-check@a-hdmi-a1.html

  * igt@kms_hdr@bpc-switch-suspend:
    - shard-skl:          [FAIL][29] ([i915#1188]) -> [PASS][30] +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-skl9/igt@kms_hdr@bpc-switch-suspend.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-skl3/igt@kms_hdr@bpc-switch-suspend.html

  * igt@kms_setmode@basic:
    - shard-hsw:          [FAIL][31] ([i915#31]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-hsw1/igt@kms_setmode@basic.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-hsw1/igt@kms_setmode@basic.html

  * {igt@perf@blocking-parameterized}:
    - shard-hsw:          [FAIL][33] ([i915#1542]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-hsw8/igt@perf@blocking-parameterized.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-hsw1/igt@perf@blocking-parameterized.html

  
#### Warnings ####

  * igt@i915_pm_rpm@gem-idle:
    - shard-snb:          [INCOMPLETE][35] ([i915#82]) -> [SKIP][36] ([fdo#109271])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-snb5/igt@i915_pm_rpm@gem-idle.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-snb5/igt@i915_pm_rpm@gem-idle.html

  * igt@kms_content_protection@atomic:
    - shard-apl:          [TIMEOUT][37] ([i915#1319]) -> [FAIL][38] ([fdo#110321] / [fdo#110336])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-apl1/igt@kms_content_protection@atomic.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-apl2/igt@kms_content_protection@atomic.html

  * igt@kms_content_protection@srm:
    - shard-apl:          [TIMEOUT][39] ([i915#1319]) -> [FAIL][40] ([fdo#110321])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/shard-apl3/igt@kms_content_protection@srm.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/shard-apl4/igt@kms_content_protection@srm.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110321]: https://bugs.freedesktop.org/show_bug.cgi?id=110321
  [fdo#110336]: https://bugs.freedesktop.org/show_bug.cgi?id=110336
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#128]: https://gitlab.freedesktop.org/drm/intel/issues/128
  [i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
  [i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1812]: https://gitlab.freedesktop.org/drm/intel/issues/1812
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
  [i915#34]: https://gitlab.freedesktop.org/drm/intel/issues/34
  [i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
  [i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716
  [i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8470 -> Patchwork_17633

  CI-20190529: 20190529
  CI_DRM_8470: d2c5ae86eac811c49f2fe22c4fa02b6e6d31cd67 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5651: e54e2642f1967ca3c488db32264607df670d1dfb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_17633: d0a8dd622ee47a4a2010d9166902c5638e362c14 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17633/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev2)
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
                   ` (3 preceding siblings ...)
  2020-05-12 17:30 ` [Intel-gfx] ✓ Fi.CI.IGT: success for " Patchwork
@ 2020-05-12 17:36 ` Patchwork
  2020-05-12 18:16 ` [Intel-gfx] [PATCH v4] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
  2020-05-12 19:33 ` [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev3) Patchwork
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-05-12 17:36 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev2)
URL   : https://patchwork.freedesktop.org/series/77191/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_8470 -> Patchwork_17634
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_17634 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_17634, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_17634:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_ctx_create@basic-files:
    - fi-hsw-4770:        [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-hsw-4770/igt@gem_ctx_create@basic-files.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-hsw-4770/igt@gem_ctx_create@basic-files.html

  * igt@gem_exec_fence@basic-busy@vcs0:
    - fi-ivb-3770:        [PASS][3] -> [DMESG-WARN][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-ivb-3770/igt@gem_exec_fence@basic-busy@vcs0.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-ivb-3770/igt@gem_exec_fence@basic-busy@vcs0.html
    - fi-elk-e7500:       [PASS][5] -> [DMESG-WARN][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-elk-e7500/igt@gem_exec_fence@basic-busy@vcs0.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-elk-e7500/igt@gem_exec_fence@basic-busy@vcs0.html
    - fi-ilk-650:         [PASS][7] -> [DMESG-WARN][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-ilk-650/igt@gem_exec_fence@basic-busy@vcs0.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-ilk-650/igt@gem_exec_fence@basic-busy@vcs0.html

  * igt@gem_exec_fence@basic-busy@vecs0:
    - fi-apl-guc:         [PASS][9] -> [DMESG-WARN][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-apl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-apl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-bxt-dsi:         [PASS][11] -> [DMESG-WARN][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bxt-dsi/igt@gem_exec_fence@basic-busy@vecs0.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bxt-dsi/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-cml-u2:          [PASS][13] -> [DMESG-WARN][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-cml-u2/igt@gem_exec_fence@basic-busy@vecs0.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cml-u2/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-skl-6700k2:      [PASS][15] -> [DMESG-WARN][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-skl-6700k2/igt@gem_exec_fence@basic-busy@vecs0.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-skl-6700k2/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-cfl-8700k:       [PASS][17] -> [DMESG-WARN][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-cfl-8700k/igt@gem_exec_fence@basic-busy@vecs0.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cfl-8700k/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-icl-guc:         [PASS][19] -> [DMESG-WARN][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-icl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-icl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-bsw-n3050:       [PASS][21] -> [DMESG-WARN][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bsw-n3050/igt@gem_exec_fence@basic-busy@vecs0.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bsw-n3050/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-skl-lmem:        [PASS][23] -> [DMESG-WARN][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-skl-lmem/igt@gem_exec_fence@basic-busy@vecs0.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-skl-lmem/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-kbl-soraka:      [PASS][25] -> [DMESG-WARN][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-soraka/igt@gem_exec_fence@basic-busy@vecs0.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-soraka/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-cml-s:           [PASS][27] -> [DMESG-WARN][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-cml-s/igt@gem_exec_fence@basic-busy@vecs0.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cml-s/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-kbl-x1275:       [PASS][29] -> [DMESG-WARN][30]
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-x1275/igt@gem_exec_fence@basic-busy@vecs0.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-x1275/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-cfl-guc:         [PASS][31] -> [DMESG-WARN][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-cfl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cfl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-bsw-kefka:       [PASS][33] -> [DMESG-WARN][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bsw-kefka/igt@gem_exec_fence@basic-busy@vecs0.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bsw-kefka/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-glk-dsi:         [PASS][35] -> [DMESG-WARN][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-glk-dsi/igt@gem_exec_fence@basic-busy@vecs0.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-glk-dsi/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-kbl-8809g:       [PASS][37] -> [DMESG-WARN][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-8809g/igt@gem_exec_fence@basic-busy@vecs0.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-8809g/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-kbl-r:           [PASS][39] -> [DMESG-WARN][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-r/igt@gem_exec_fence@basic-busy@vecs0.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-r/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-cfl-8109u:       [PASS][41] -> [DMESG-WARN][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-cfl-8109u/igt@gem_exec_fence@basic-busy@vecs0.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cfl-8109u/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-icl-y:           [PASS][43] -> [DMESG-WARN][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-icl-y/igt@gem_exec_fence@basic-busy@vecs0.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-icl-y/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-kbl-guc:         [PASS][45] -> [DMESG-WARN][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-bsw-nick:        [PASS][47] -> [DMESG-WARN][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bsw-nick/igt@gem_exec_fence@basic-busy@vecs0.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bsw-nick/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-bdw-5557u:       [PASS][49] -> [DMESG-WARN][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bdw-5557u/igt@gem_exec_fence@basic-busy@vecs0.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bdw-5557u/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-kbl-7500u:       [PASS][51] -> [DMESG-WARN][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-7500u/igt@gem_exec_fence@basic-busy@vecs0.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-7500u/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-whl-u:           [PASS][53] -> [DMESG-WARN][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-whl-u/igt@gem_exec_fence@basic-busy@vecs0.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-whl-u/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-skl-6600u:       [PASS][55] -> [DMESG-WARN][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-skl-6600u/igt@gem_exec_fence@basic-busy@vecs0.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-skl-6600u/igt@gem_exec_fence@basic-busy@vecs0.html
    - fi-skl-guc:         [PASS][57] -> [DMESG-WARN][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-skl-guc/igt@gem_exec_fence@basic-busy@vecs0.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-skl-guc/igt@gem_exec_fence@basic-busy@vecs0.html

  * igt@gem_exec_fence@basic-wait@vcs0:
    - fi-byt-n2820:       [PASS][59] -> [DMESG-WARN][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-byt-n2820/igt@gem_exec_fence@basic-wait@vcs0.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-byt-n2820/igt@gem_exec_fence@basic-wait@vcs0.html
    - fi-byt-j1900:       [PASS][61] -> [DMESG-WARN][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-byt-j1900/igt@gem_exec_fence@basic-wait@vcs0.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-byt-j1900/igt@gem_exec_fence@basic-wait@vcs0.html

  * igt@gem_exec_fence@basic-wait@vecs0:
    - fi-tgl-y:           [PASS][63] -> [DMESG-WARN][64]
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-tgl-y/igt@gem_exec_fence@basic-wait@vecs0.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-tgl-y/igt@gem_exec_fence@basic-wait@vecs0.html

  * igt@gem_render_linear_blits@basic:
    - fi-blb-e6850:       [PASS][65] -> [INCOMPLETE][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-blb-e6850/igt@gem_render_linear_blits@basic.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-blb-e6850/igt@gem_render_linear_blits@basic.html
    - fi-snb-2520m:       [PASS][67] -> [INCOMPLETE][68]
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-snb-2520m/igt@gem_render_linear_blits@basic.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-snb-2520m/igt@gem_render_linear_blits@basic.html

  * igt@runner@aborted:
    - fi-ilk-650:         NOTRUN -> [FAIL][69]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-ilk-650/igt@runner@aborted.html
    - fi-kbl-x1275:       NOTRUN -> [FAIL][70]
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-x1275/igt@runner@aborted.html
    - fi-bsw-kefka:       NOTRUN -> [FAIL][71]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bsw-kefka/igt@runner@aborted.html
    - fi-cfl-8700k:       NOTRUN -> [FAIL][72]
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cfl-8700k/igt@runner@aborted.html
    - fi-tgl-y:           NOTRUN -> [FAIL][73]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-tgl-y/igt@runner@aborted.html
    - fi-cfl-8109u:       NOTRUN -> [FAIL][74]
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cfl-8109u/igt@runner@aborted.html
    - fi-gdg-551:         NOTRUN -> [FAIL][75]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-gdg-551/igt@runner@aborted.html
    - fi-bsw-nick:        NOTRUN -> [FAIL][76]
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bsw-nick/igt@runner@aborted.html
    - fi-kbl-8809g:       NOTRUN -> [FAIL][77]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-8809g/igt@runner@aborted.html
    - fi-snb-2520m:       NOTRUN -> [FAIL][78]
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-snb-2520m/igt@runner@aborted.html
    - fi-apl-guc:         NOTRUN -> [FAIL][79]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-apl-guc/igt@runner@aborted.html
    - fi-kbl-r:           NOTRUN -> [FAIL][80]
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-r/igt@runner@aborted.html
    - fi-bwr-2160:        NOTRUN -> [FAIL][81]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bwr-2160/igt@runner@aborted.html
    - fi-byt-n2820:       NOTRUN -> [FAIL][82]
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-byt-n2820/igt@runner@aborted.html
    - fi-icl-guc:         NOTRUN -> [FAIL][83]
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-icl-guc/igt@runner@aborted.html
    - fi-kbl-soraka:      NOTRUN -> [FAIL][84]
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-soraka/igt@runner@aborted.html
    - fi-hsw-4770:        NOTRUN -> [FAIL][85]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-hsw-4770/igt@runner@aborted.html
    - fi-kbl-7500u:       NOTRUN -> [FAIL][86]
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-7500u/igt@runner@aborted.html
    - fi-kbl-guc:         NOTRUN -> [FAIL][87]
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-guc/igt@runner@aborted.html
    - fi-snb-2600:        NOTRUN -> [FAIL][88]
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-snb-2600/igt@runner@aborted.html
    - fi-whl-u:           NOTRUN -> [FAIL][89]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-whl-u/igt@runner@aborted.html
    - fi-cml-u2:          NOTRUN -> [FAIL][90]
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cml-u2/igt@runner@aborted.html
    - fi-ivb-3770:        NOTRUN -> [FAIL][91]
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-ivb-3770/igt@runner@aborted.html
    - fi-bxt-dsi:         NOTRUN -> [FAIL][92]
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bxt-dsi/igt@runner@aborted.html
    - fi-byt-j1900:       NOTRUN -> [FAIL][93]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-byt-j1900/igt@runner@aborted.html
    - fi-elk-e7500:       NOTRUN -> [FAIL][94]
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-elk-e7500/igt@runner@aborted.html
    - fi-cml-s:           NOTRUN -> [FAIL][95]
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cml-s/igt@runner@aborted.html
    - fi-cfl-guc:         NOTRUN -> [FAIL][96]
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-cfl-guc/igt@runner@aborted.html
    - fi-icl-y:           NOTRUN -> [FAIL][97]
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-icl-y/igt@runner@aborted.html
    - fi-bsw-n3050:       NOTRUN -> [FAIL][98]
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-bsw-n3050/igt@runner@aborted.html
    - fi-blb-e6850:       NOTRUN -> [FAIL][99]
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-blb-e6850/igt@runner@aborted.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@debugfs_test@read_all_entries:
    - fi-icl-u2:          [PASS][100] -> [{ABORT}][101]
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-icl-u2/igt@debugfs_test@read_all_entries.html
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-icl-u2/igt@debugfs_test@read_all_entries.html

  * igt@gem_exec_fence@basic-busy@vecs0:
    - {fi-tgl-u}:         [PASS][102] -> [DMESG-WARN][103]
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-tgl-u/igt@gem_exec_fence@basic-busy@vecs0.html
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-tgl-u/igt@gem_exec_fence@basic-busy@vecs0.html
    - {fi-ehl-1}:         [PASS][104] -> [DMESG-WARN][105]
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-ehl-1/igt@gem_exec_fence@basic-busy@vecs0.html
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-ehl-1/igt@gem_exec_fence@basic-busy@vecs0.html
    - {fi-tgl-dsi}:       [PASS][106] -> [DMESG-WARN][107]
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-tgl-dsi/igt@gem_exec_fence@basic-busy@vecs0.html
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-tgl-dsi/igt@gem_exec_fence@basic-busy@vecs0.html
    - {fi-kbl-7560u}:     NOTRUN -> [DMESG-WARN][108]
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-7560u/igt@gem_exec_fence@basic-busy@vecs0.html

  * igt@runner@aborted:
    - {fi-tgl-dsi}:       NOTRUN -> [FAIL][109]
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-tgl-dsi/igt@runner@aborted.html
    - {fi-ehl-1}:         NOTRUN -> [FAIL][110]
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-ehl-1/igt@runner@aborted.html
    - {fi-kbl-7560u}:     [FAIL][111] ([i915#1569] / [i915#192] / [i915#193] / [i915#194]) -> [FAIL][112]
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-7560u/igt@runner@aborted.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-kbl-7560u/igt@runner@aborted.html
    - {fi-tgl-u}:         NOTRUN -> [FAIL][113]
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-tgl-u/igt@runner@aborted.html

  
Known issues
------------

  Here are the changes found in Patchwork_17634 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_render_linear_blits@basic:
    - fi-gdg-551:         [PASS][114] -> [INCOMPLETE][115] ([i915#172])
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-gdg-551/igt@gem_render_linear_blits@basic.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-gdg-551/igt@gem_render_linear_blits@basic.html
    - fi-snb-2600:        [PASS][116] -> [INCOMPLETE][117] ([i915#82])
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-snb-2600/igt@gem_render_linear_blits@basic.html
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/fi-snb-2600/igt@gem_render_linear_blits@basic.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [i915#1569]: https://gitlab.freedesktop.org/drm/intel/issues/1569
  [i915#172]: https://gitlab.freedesktop.org/drm/intel/issues/172
  [i915#192]: https://gitlab.freedesktop.org/drm/intel/issues/192
  [i915#193]: https://gitlab.freedesktop.org/drm/intel/issues/193
  [i915#194]: https://gitlab.freedesktop.org/drm/intel/issues/194
  [i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82


Participating hosts (49 -> 44)
------------------------------

  Missing    (5): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-byt-clapper 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8470 -> Patchwork_17634

  CI-20190529: 20190529
  CI_DRM_8470: d2c5ae86eac811c49f2fe22c4fa02b6e6d31cd67 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5651: e54e2642f1967ca3c488db32264607df670d1dfb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_17634: 9a1d8bec880b77f7c39805ddcd1886266fdd1c0a @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

9a1d8bec880b drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17634/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH v4] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
                   ` (4 preceding siblings ...)
  2020-05-12 17:36 ` [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev2) Patchwork
@ 2020-05-12 18:16 ` Chris Wilson
  2020-05-12 19:33 ` [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev3) Patchwork
  6 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2020-05-12 18:16 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

The second try at staging the transfer of the breadcrumb. In part one,
we realised we could not simply move to the second engine as we were
only holding the breadcrumb lock on the first. So in commit 6c81e21a4742
("drm/i915/gt: Stage the transfer of the virtual breadcrumb"), we
removed it from the first engine and marked up this request to reattach
the signaling on the new engine. However, this failed to take into
account that we only attach the breadcrumb if the new request is added
at the start of the queue, which if we are transferring, it is because
we know there to be a request to be signaled (and hence we would not be
attached).

In this attempt, we try to transfer the completed requests to the
irq_worker on its rq->engine->breadcrumbs. This preserves the coupling
between the rq and its breadcrumbs, so that
i915_request_cancel_breadcrumb() does not attempt to manipulate the list
under the wrong lock.

v2: Code sharing is fun.

Fixes: 6c81e21a4742 ("drm/i915/gt: Stage the transfer of the virtual breadcrumb")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  | 52 ++++++++++++++++----
 drivers/gpu/drm/i915/gt/intel_engine.h       |  3 ++
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  2 +
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 34 ++++---------
 4 files changed, 57 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cbedba857d43..d907d538176e 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -142,6 +142,18 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
 	intel_engine_add_retire(engine, tl);
 }
 
+static void __signal_request(struct i915_request *rq, struct list_head *signals)
+{
+	GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
+	clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+
+	if (!__dma_fence_signal(&rq->fence))
+		return;
+
+	i915_request_get(rq);
+	list_add_tail(&rq->signal_link, signals);
+}
+
 static void signal_irq_work(struct irq_work *work)
 {
 	struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
@@ -155,6 +167,8 @@ static void signal_irq_work(struct irq_work *work)
 	if (b->irq_armed && list_empty(&b->signalers))
 		__intel_breadcrumbs_disarm_irq(b);
 
+	list_splice_init(&b->signaled_requests, &signal);
+
 	list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
 		GEM_BUG_ON(list_empty(&ce->signals));
 
@@ -163,24 +177,15 @@ static void signal_irq_work(struct irq_work *work)
 				list_entry(pos, typeof(*rq), signal_link);
 
 			GEM_BUG_ON(!check_signal_order(ce, rq));
-
 			if (!__request_completed(rq))
 				break;
 
-			GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
-					     &rq->fence.flags));
-			clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
-
-			if (!__dma_fence_signal(&rq->fence))
-				continue;
-
 			/*
 			 * Queue for execution after dropping the signaling
 			 * spinlock as the callback chain may end up adding
 			 * more signalers to the same context or engine.
 			 */
-			i915_request_get(rq);
-			list_add_tail(&rq->signal_link, &signal);
+			__signal_request(rq, &signal);
 		}
 
 		/*
@@ -255,6 +260,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 
 	spin_lock_init(&b->irq_lock);
 	INIT_LIST_HEAD(&b->signalers);
+	INIT_LIST_HEAD(&b->signaled_requests);
 
 	init_irq_work(&b->irq_work, signal_irq_work);
 }
@@ -274,6 +280,32 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
 	spin_unlock_irqrestore(&b->irq_lock, flags);
 }
 
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+					     struct intel_context *ce)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+	unsigned long flags;
+
+	spin_lock_irqsave(&b->irq_lock, flags);
+	if (!list_empty(&ce->signals)) {
+		struct i915_request *rq, *next;
+
+		/* Queue for executing the signal callbacks in the irq_work */
+		list_for_each_entry_safe(rq, next, &ce->signals, signal_link) {
+			GEM_BUG_ON(rq->engine != engine);
+			GEM_BUG_ON(!__request_completed(rq));
+
+			__signal_request(rq, &b->signaled_requests);
+		}
+
+		INIT_LIST_HEAD(&ce->signals);
+		list_del_init(&ce->signal_link);
+
+		irq_work_queue(&b->irq_work);
+	}
+	spin_unlock_irqrestore(&b->irq_lock, flags);
+}
+
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
 {
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cb789c8bf06b..9bf6d4989968 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -238,6 +238,9 @@ intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
+void intel_engine_transfer_stale_breadcrumbs(struct intel_engine_cs *engine,
+					     struct intel_context *ce);
+
 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
 				    struct drm_printer *p);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index c113b7805e65..e20b39eefd79 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -377,6 +377,8 @@ struct intel_engine_cs {
 		spinlock_t irq_lock;
 		struct list_head signalers;
 
+		struct list_head signaled_requests;
+
 		struct irq_work irq_work; /* for use from inside irq_lock */
 
 		unsigned int irq_enabled;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 15716e4d6b76..3d0e0894c015 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1821,30 +1821,16 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	return true;
 }
 
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
-				     struct i915_request *rq)
+static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
 {
-	struct intel_engine_cs *old = ve->siblings[0];
-
-	/* All unattached (rq->engine == old) must already be completed */
-
-	spin_lock(&old->breadcrumbs.irq_lock);
-	if (!list_empty(&ve->context.signal_link)) {
-		list_del_init(&ve->context.signal_link);
-
-		/*
-		 * We cannot acquire the new engine->breadcrumbs.irq_lock
-		 * (as we are holding a breadcrumbs.irq_lock already),
-		 * so attach this request to the signaler on submission.
-		 * The queued irq_work will occur when we finally drop
-		 * the engine->active.lock after dequeue.
-		 */
-		set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
-
-		/* Also transfer the pending irq_work for the old breadcrumb. */
-		intel_engine_signal_breadcrumbs(rq->engine);
-	}
-	spin_unlock(&old->breadcrumbs.irq_lock);
+	/*
+	 * All the outstanding signals on ve->siblings[0] must have
+	 * been completed, just pending the interrupt handler. As those
+	 * signals still refer to the old sibling (via rq->engine), we must
+	 * transfer those to the old irq_worker to keep our locking
+	 * consistent.
+	 */
+	intel_engine_transfer_stale_breadcrumbs(ve->siblings[0], &ve->context);
 }
 
 #define for_each_waiter(p__, rq__) \
@@ -2279,7 +2265,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 									engine);
 
 				if (!list_empty(&ve->context.signals))
-					virtual_xfer_breadcrumbs(ve, rq);
+					virtual_xfer_breadcrumbs(ve);
 
 				/*
 				 * Move the bound engine to the top of the list
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev3)
  2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
                   ` (5 preceding siblings ...)
  2020-05-12 18:16 ` [Intel-gfx] [PATCH v4] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
@ 2020-05-12 19:33 ` Patchwork
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-05-12 19:33 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev3)
URL   : https://patchwork.freedesktop.org/series/77191/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_8470 -> Patchwork_17637
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_17637 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_17637, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17637/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_17637:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live@gt_pm:
    - fi-bdw-5557u:       [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-bdw-5557u/igt@i915_selftest@live@gt_pm.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17637/fi-bdw-5557u/igt@i915_selftest@live@gt_pm.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@debugfs_test@read_all_entries:
    - fi-icl-u2:          [PASS][3] -> [{ABORT}][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-icl-u2/igt@debugfs_test@read_all_entries.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17637/fi-icl-u2/igt@debugfs_test@read_all_entries.html

  
Known issues
------------

  Here are the changes found in Patchwork_17637 that come from known issues:

### IGT changes ###

#### Possible fixes ####

  * igt@i915_selftest@live@active:
    - fi-apl-guc:         [DMESG-FAIL][5] ([i915#666]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-apl-guc/igt@i915_selftest@live@active.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17637/fi-apl-guc/igt@i915_selftest@live@active.html

  
#### Warnings ####

  * igt@i915_pm_rpm@module-reload:
    - fi-kbl-x1275:       [FAIL][7] ([i915#62]) -> [SKIP][8] ([fdo#109271])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8470/fi-kbl-x1275/igt@i915_pm_rpm@module-reload.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17637/fi-kbl-x1275/igt@i915_pm_rpm@module-reload.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
  [i915#666]: https://gitlab.freedesktop.org/drm/intel/issues/666


Participating hosts (49 -> 44)
------------------------------

  Missing    (5): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-byt-clapper 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8470 -> Patchwork_17637

  CI-20190529: 20190529
  CI_DRM_8470: d2c5ae86eac811c49f2fe22c4fa02b6e6d31cd67 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5651: e54e2642f1967ca3c488db32264607df670d1dfb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_17637: 35c8a48ad2d3735475ef3ad2e242964b19d6becc @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

35c8a48ad2d3 drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17637/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-12 16:20       ` Chris Wilson
@ 2020-05-13  8:10         ` Tvrtko Ursulin
  2020-05-13  8:19           ` Chris Wilson
  0 siblings, 1 reply; 13+ messages in thread
From: Tvrtko Ursulin @ 2020-05-13  8:10 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 12/05/2020 17:20, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2020-05-12 17:07:23)
>>
>> On 12/05/2020 16:52, Chris Wilson wrote:
>>> Quoting Tvrtko Ursulin (2020-05-12 16:17:30)
>>>>
>>>> On 12/05/2020 14:22, Chris Wilson wrote:
>>>>> -     spin_lock(&old->breadcrumbs.irq_lock);
>>>>> -     if (!list_empty(&ve->context.signal_link)) {
>>>>> -             list_del_init(&ve->context.signal_link);
>>>>> -
>>>>> -             /*
>>>>> -              * We cannot acquire the new engine->breadcrumbs.irq_lock
>>>>> -              * (as we are holding a breadcrumbs.irq_lock already),
>>>>> -              * so attach this request to the signaler on submission.
>>>>> -              * The queued irq_work will occur when we finally drop
>>>>> -              * the engine->active.lock after dequeue.
>>>>> -              */
>>>>> -             set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
>>>>> -
>>>>> -             /* Also transfer the pending irq_work for the old breadcrumb. */
>>>>> -             intel_engine_signal_breadcrumbs(rq->engine);
>>>>> -     }
>>>>> -     spin_unlock(&old->breadcrumbs.irq_lock);
>>>>> +     intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);
>>>>
>>>> But isn't ve->siblings[0] the old engine at this point so new target
>>>> engine would have to be explicitly passed in?
>>>
>>> ve->siblings[0] is the old engine, which is holding the completed
>>> requests and their signals. Since their rq->engine == ve->siblings[0]
>>> and we can't update rq->engine as we can't take the required locks, we
>>> need to keep the breadcrumbs relative to ve->siblings[0] and not the new
>>> engine (the i915_request_cancel_breadcrumb conundrum).
>>
>> Who then enables breadcrumbs on the new engine?
> 
> If we enable signaling on the request we are about to submit, that
> will enable the breadcrumbs on the new engine. We've cleared out
> ce->signals and removed it from the old engine, so as soon as we want to
> enable a new breadcrumb, it will be attached to the [new] rq->engine.

So current code was wrong to think it needed to set 
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT? Should it instead assert it is set? 
Because if it is not set signalling on the new engine won't be enabled.

Regards,

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker
  2020-05-13  8:10         ` Tvrtko Ursulin
@ 2020-05-13  8:19           ` Chris Wilson
  0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2020-05-13  8:19 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-05-13 09:10:48)
> 
> On 12/05/2020 17:20, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2020-05-12 17:07:23)
> >>
> >> On 12/05/2020 16:52, Chris Wilson wrote:
> >>> Quoting Tvrtko Ursulin (2020-05-12 16:17:30)
> >>>>
> >>>> On 12/05/2020 14:22, Chris Wilson wrote:
> >>>>> -     spin_lock(&old->breadcrumbs.irq_lock);
> >>>>> -     if (!list_empty(&ve->context.signal_link)) {
> >>>>> -             list_del_init(&ve->context.signal_link);
> >>>>> -
> >>>>> -             /*
> >>>>> -              * We cannot acquire the new engine->breadcrumbs.irq_lock
> >>>>> -              * (as we are holding a breadcrumbs.irq_lock already),
> >>>>> -              * so attach this request to the signaler on submission.
> >>>>> -              * The queued irq_work will occur when we finally drop
> >>>>> -              * the engine->active.lock after dequeue.
> >>>>> -              */
> >>>>> -             set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags);
> >>>>> -
> >>>>> -             /* Also transfer the pending irq_work for the old breadcrumb. */
> >>>>> -             intel_engine_signal_breadcrumbs(rq->engine);
> >>>>> -     }
> >>>>> -     spin_unlock(&old->breadcrumbs.irq_lock);
> >>>>> +     intel_engine_transfer_breadcrumbs(ve->siblings[0], &ve->context);
> >>>>
> >>>> But isn't ve->siblings[0] the old engine at this point so new target
> >>>> engine would have to be explicitly passed in?
> >>>
> >>> ve->siblings[0] is the old engine, which is holding the completed
> >>> requests and their signals. Since their rq->engine == ve->siblings[0]
> >>> and we can't update rq->engine as we can't take the required locks, we
> >>> need to keep the breadcrumbs relative to ve->siblings[0] and not the new
> >>> engine (the i915_request_cancel_breadcrumb conundrum).
> >>
> >> Who then enables breadcrumbs on the new engine?
> > 
> > If we enable signaling on the request we are about to submit, that
> > will enable the breadcrumbs on the new engine. We've cleared out
> > ce->signals and removed it from the old engine, so as soon as we want to
> > enable a new breadcrumb, it will be attached to the [new] rq->engine.
> 
> So current code was wrong to think it needed to set 
> DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT?

That was a trick to try and use the next request to attach the
ve->context as a signaler on the new engine, and use that engine to
execute the irq_work. It was wrong because of how
i915_request_enable_breadcrumb() only attaches the context to the engine
when the list of breadcrumbs is empty. (It would not be empty as we know
the list is not empty and there may not be enough time on another cpu to
drain it.)

> Should it instead assert it is set? 
> Because if it is not set signalling on the new engine won't be enabled.

Not DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT itself, since we do allow waiters
to be attached prior to execution. !I915_FENCE_FLAG_ACTIVE is a
candidate, but it's a bit of a random spot to assert it.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2020-05-13  8:19 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-12 13:22 [Intel-gfx] [PATCH v2] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
2020-05-12 14:23 ` [Intel-gfx] ✓ Fi.CI.BAT: success for " Patchwork
2020-05-12 15:17 ` [Intel-gfx] [PATCH v2] " Tvrtko Ursulin
2020-05-12 15:52   ` Chris Wilson
2020-05-12 16:07     ` Tvrtko Ursulin
2020-05-12 16:20       ` Chris Wilson
2020-05-13  8:10         ` Tvrtko Ursulin
2020-05-13  8:19           ` Chris Wilson
2020-05-12 16:50 ` [Intel-gfx] [PATCH v3] " Chris Wilson
2020-05-12 17:30 ` [Intel-gfx] ✓ Fi.CI.IGT: success for " Patchwork
2020-05-12 17:36 ` [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev2) Patchwork
2020-05-12 18:16 ` [Intel-gfx] [PATCH v4] drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker Chris Wilson
2020-05-12 19:33 ` [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/i915/gt: Transfer old virtual breadcrumbs to irq_worker (rev3) Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.