All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
@ 2019-07-24 12:43 Chris Wilson
  2019-07-24 12:50 ` Chris Wilson
  2019-07-24 16:35 ` ✗ Fi.CI.BAT: failure for drm/i915: Unshare the idle-barrier from other kernel requests (rev2) Patchwork
  0 siblings, 2 replies; 8+ messages in thread
From: Chris Wilson @ 2019-07-24 12:43 UTC (permalink / raw)
  To: intel-gfx

Under some circumstances (see intel_context_prepare_remote_request), we
may use a requst along a kernel context to modify the logical state of
another. To keep the target context in place while the request executes,
we take an active reference on it using the kernel timeline. This is the
same timeline as we use for the idle-barrier, and so we end up reusing
the same active node. Except that the idle barrier is special and cannot
be reused in this manner! Give the idle-barrier a reserved timeline
index (0) so that is will always be unique (give or take we may issue
multiple idle barriers across multiple engines).

Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_active.c | 63 ++++++++++++++++++++++--------
 1 file changed, 47 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 13f304a29fc8..4f7f698bff15 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -184,6 +184,7 @@ active_instance(struct i915_active *ref, u64 idx)
 	ref->cache = node;
 	mutex_unlock(&ref->mutex);
 
+	BUILD_BUG_ON(offsetof(typeof(*node), base));
 	return &node->base;
 }
 
@@ -212,6 +213,8 @@ int i915_active_ref(struct i915_active *ref,
 	struct i915_active_request *active;
 	int err;
 
+	GEM_BUG_ON(!timeline); /* reserved for idle-barrier */
+
 	/* Prevent reaping in case we malloc/wait while building the tree */
 	err = i915_active_acquire(ref);
 	if (err)
@@ -342,6 +345,31 @@ void i915_active_fini(struct i915_active *ref)
 }
 #endif
 
+static struct active_node *idle_barrier(struct i915_active *ref)
+{
+	struct active_node *node = NULL;
+	struct rb_node *rb;
+
+	mutex_lock(&ref->mutex);
+
+	rb = rb_first(&ref->tree);
+	if (!rb)
+		goto unlock;
+
+	node = rb_entry(rb, typeof(*node), node);
+	if (node->timeline || i915_active_request_isset(&node->base)) {
+		node = NULL;
+		goto unlock;
+	}
+
+	GEM_BUG_ON(!list_empty(&node->base.link));
+	rb_erase(rb, &ref->tree);
+
+unlock:
+	mutex_unlock(&ref->mutex);
+	return node;
+}
+
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 					    struct intel_engine_cs *engine)
 {
@@ -352,22 +380,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 
 	GEM_BUG_ON(!engine->mask);
 	for_each_engine_masked(engine, i915, engine->mask, tmp) {
-		struct intel_context *kctx = engine->kernel_context;
 		struct active_node *node;
 
-		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-		if (unlikely(!node)) {
-			err = -ENOMEM;
-			goto unwind;
+		node = idle_barrier(ref);
+		if (!node) {
+			node = kmem_cache_alloc(global.slab_cache,
+						GFP_KERNEL |
+						__GFP_RETRY_MAYFAIL |
+						__GFP_NOWARN);
+			if (unlikely(!node)) {
+				err = -ENOMEM;
+				goto unwind;
+			}
+
+			node->ref = ref;
+			node->timeline = 0;
+			node->base.retire = node_retire;
 		}
 
-		i915_active_request_init(&node->base,
-					 (void *)engine, node_retire);
-		node->timeline = kctx->ring->timeline->fence_context;
-		node->ref = ref;
+		intel_engine_pm_get(engine);
+
+		RCU_INIT_POINTER(node->base.request, (void *)engine);
 		atomic_inc(&ref->count);
 
-		intel_engine_pm_get(engine);
 		llist_add((struct llist_node *)&node->base.link,
 			  &ref->barriers);
 	}
@@ -402,6 +437,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 
 		node = container_of((struct list_head *)pos,
 				    typeof(*node), base.link);
+		GEM_BUG_ON(node->timeline);
 
 		engine = (void *)rcu_access_pointer(node->base.request);
 		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
@@ -410,12 +446,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 		p = &ref->tree.rb_node;
 		while (*p) {
 			parent = *p;
-			if (rb_entry(parent,
-				     struct active_node,
-				     node)->timeline < node->timeline)
-				p = &parent->rb_right;
-			else
-				p = &parent->rb_left;
+			p = &parent->rb_left;
 		}
 		rb_link_node(&node->node, parent, p);
 		rb_insert_color(&node->node, &ref->tree);
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
  2019-07-24 12:43 [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests Chris Wilson
@ 2019-07-24 12:50 ` Chris Wilson
  2019-07-24 16:35 ` ✗ Fi.CI.BAT: failure for drm/i915: Unshare the idle-barrier from other kernel requests (rev2) Patchwork
  1 sibling, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2019-07-24 12:50 UTC (permalink / raw)
  To: intel-gfx

Under some circumstances (see intel_context_prepare_remote_request), we
may use a request along a kernel context to modify the logical state of
another. To keep the target context in place while the request executes,
we take an active reference on it using the kernel timeline. This is the
same timeline as we use for the idle-barrier, and so we end up reusing
the same active node. Except that the idle barrier is special and cannot
be reused in this manner! Give the idle-barrier a reserved timeline
index (0) so that it will always be unique (give or take we may issue
multiple idle barriers across multiple engines).

Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
Replace recursive attempt to acquire the i915_active.mutex with a
lockdep annotation.
---
 drivers/gpu/drm/i915/i915_active.c | 58 +++++++++++++++++++++---------
 1 file changed, 42 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 13f304a29fc8..99bebd77f364 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -184,6 +184,7 @@ active_instance(struct i915_active *ref, u64 idx)
 	ref->cache = node;
 	mutex_unlock(&ref->mutex);
 
+	BUILD_BUG_ON(offsetof(typeof(*node), base));
 	return &node->base;
 }
 
@@ -212,6 +213,8 @@ int i915_active_ref(struct i915_active *ref,
 	struct i915_active_request *active;
 	int err;
 
+	GEM_BUG_ON(!timeline); /* reserved for idle-barrier */
+
 	/* Prevent reaping in case we malloc/wait while building the tree */
 	err = i915_active_acquire(ref);
 	if (err)
@@ -342,6 +345,26 @@ void i915_active_fini(struct i915_active *ref)
 }
 #endif
 
+static struct active_node *idle_barrier(struct i915_active *ref)
+{
+	struct active_node *node;
+	struct rb_node *rb;
+
+	lockdep_assert_held(&ref->mutex);
+	rb = rb_first(&ref->tree);
+	if (!rb)
+		return NULL;
+
+	node = rb_entry(rb, typeof(*node), node);
+	if (node->timeline || i915_active_request_isset(&node->base))
+		return NULL;
+
+	GEM_BUG_ON(!list_empty(&node->base.link));
+	rb_erase(rb, &ref->tree);
+
+	return node;
+}
+
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 					    struct intel_engine_cs *engine)
 {
@@ -352,22 +375,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 
 	GEM_BUG_ON(!engine->mask);
 	for_each_engine_masked(engine, i915, engine->mask, tmp) {
-		struct intel_context *kctx = engine->kernel_context;
 		struct active_node *node;
 
-		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-		if (unlikely(!node)) {
-			err = -ENOMEM;
-			goto unwind;
+		node = idle_barrier(ref);
+		if (!node) {
+			node = kmem_cache_alloc(global.slab_cache,
+						GFP_KERNEL |
+						__GFP_RETRY_MAYFAIL |
+						__GFP_NOWARN);
+			if (unlikely(!node)) {
+				err = -ENOMEM;
+				goto unwind;
+			}
+
+			node->ref = ref;
+			node->timeline = 0;
+			node->base.retire = node_retire;
 		}
 
-		i915_active_request_init(&node->base,
-					 (void *)engine, node_retire);
-		node->timeline = kctx->ring->timeline->fence_context;
-		node->ref = ref;
+		intel_engine_pm_get(engine);
+
+		RCU_INIT_POINTER(node->base.request, (void *)engine);
 		atomic_inc(&ref->count);
 
-		intel_engine_pm_get(engine);
 		llist_add((struct llist_node *)&node->base.link,
 			  &ref->barriers);
 	}
@@ -402,6 +432,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 
 		node = container_of((struct list_head *)pos,
 				    typeof(*node), base.link);
+		GEM_BUG_ON(node->timeline);
 
 		engine = (void *)rcu_access_pointer(node->base.request);
 		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
@@ -410,12 +441,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 		p = &ref->tree.rb_node;
 		while (*p) {
 			parent = *p;
-			if (rb_entry(parent,
-				     struct active_node,
-				     node)->timeline < node->timeline)
-				p = &parent->rb_right;
-			else
-				p = &parent->rb_left;
+			p = &parent->rb_left;
 		}
 		rb_link_node(&node->node, parent, p);
 		rb_insert_color(&node->node, &ref->tree);
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Unshare the idle-barrier from other kernel requests (rev2)
  2019-07-24 12:43 [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests Chris Wilson
  2019-07-24 12:50 ` Chris Wilson
@ 2019-07-24 16:35 ` Patchwork
  1 sibling, 0 replies; 8+ messages in thread
From: Patchwork @ 2019-07-24 16:35 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Unshare the idle-barrier from other kernel requests (rev2)
URL   : https://patchwork.freedesktop.org/series/64171/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_6545 -> Patchwork_13735
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_13735 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_13735, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_13735:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-snb-2520m:       [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6545/fi-snb-2520m/igt@i915_module_load@reload-with-fault-injection.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/fi-snb-2520m/igt@i915_module_load@reload-with-fault-injection.html

  
Known issues
------------

  Here are the changes found in Patchwork_13735 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-kbl-7500u:       [PASS][3] -> [DMESG-WARN][4] ([fdo#105128] / [fdo#107139])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6545/fi-kbl-7500u/igt@gem_exec_suspend@basic-s4-devices.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/fi-kbl-7500u/igt@gem_exec_suspend@basic-s4-devices.html

  * igt@kms_busy@basic-flip-a:
    - fi-kbl-7567u:       [PASS][5] -> [SKIP][6] ([fdo#109271] / [fdo#109278]) +2 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6545/fi-kbl-7567u/igt@kms_busy@basic-flip-a.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/fi-kbl-7567u/igt@kms_busy@basic-flip-a.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][7] -> [FAIL][8] ([fdo#109485])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6545/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-guc:         [PASS][9] -> [FAIL][10] ([fdo#103167])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6545/fi-icl-guc/igt@kms_frontbuffer_tracking@basic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/fi-icl-guc/igt@kms_frontbuffer_tracking@basic.html

  
#### Possible fixes ####

  * igt@gem_ctx_create@basic-files:
    - fi-icl-u2:          [INCOMPLETE][11] ([fdo#107713] / [fdo#109100]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6545/fi-icl-u2/igt@gem_ctx_create@basic-files.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/fi-icl-u2/igt@gem_ctx_create@basic-files.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#102505]: https://bugs.freedesktop.org/show_bug.cgi?id=102505
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#105128]: https://bugs.freedesktop.org/show_bug.cgi?id=105128
  [fdo#105602]: https://bugs.freedesktop.org/show_bug.cgi?id=105602
  [fdo#107139]: https://bugs.freedesktop.org/show_bug.cgi?id=107139
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109485]: https://bugs.freedesktop.org/show_bug.cgi?id=109485
  [fdo#111045]: https://bugs.freedesktop.org/show_bug.cgi?id=111045
  [fdo#111046 ]: https://bugs.freedesktop.org/show_bug.cgi?id=111046 
  [fdo#111049]: https://bugs.freedesktop.org/show_bug.cgi?id=111049


Participating hosts (52 -> 43)
------------------------------

  Additional (1): fi-skl-gvtdvm 
  Missing    (10): fi-kbl-soraka fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-cfl-8109u fi-pnv-d510 fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6545 -> Patchwork_13735

  CI-20190529: 20190529
  CI_DRM_6545: a6efe73f1e086c7935d56b08342f9e1c5565fcf3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5109: e5fd509e16ec649436be31f38eaa5b85cb7f72f1 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13735: cf9d7252c06a11879a9cce16ad324a64f7055892 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

cf9d7252c06a drm/i915: Unshare the idle-barrier from other kernel requests

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13735/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
  2019-07-25 13:19   ` Tvrtko Ursulin
@ 2019-07-25 13:26     ` Chris Wilson
  0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2019-07-25 13:26 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-07-25 14:19:22)
> 
> On 25/07/2019 10:38, Chris Wilson wrote:
> > @@ -352,22 +384,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
> >   
> >       GEM_BUG_ON(!engine->mask);
> >       for_each_engine_masked(engine, i915, engine->mask, tmp) {
> > -             struct intel_context *kctx = engine->kernel_context;
> >               struct active_node *node;
> >   
> > -             node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
> > -             if (unlikely(!node)) {
> > -                     err = -ENOMEM;
> > -                     goto unwind;
> > +             node = idle_barrier(ref);
> > +             if (!node) {
> > +                     node = kmem_cache_alloc(global.slab_cache,
> > +                                             GFP_KERNEL |
> > +                                             __GFP_RETRY_MAYFAIL |
> > +                                             __GFP_NOWARN);
> > +                     if (unlikely(!node)) {
> > +                             err = -ENOMEM;
> > +                             goto unwind;
> > +                     }
> > +
> > +                     node->ref = ref;
> > +                     node->timeline = 0;
> > +                     node->base.retire = node_retire;
> >               }
> >   
> > -             i915_active_request_init(&node->base,
> > -                                      (void *)engine, node_retire);
> > -             node->timeline = kctx->ring->timeline->fence_context;
> > -             node->ref = ref;
> > +             intel_engine_pm_get(engine);
> 
> AFAIU this could comfortably be moved last instead of going even higher 
> with this patch.

Sure, the wakeref is pinned by the caller this acquires one for the
barrier. I moved it out of the way as I felt grouping the linkage of the
preallocated barrier was more interesting.
 
> > +
> > +             RCU_INIT_POINTER(node->base.request, (void *)engine);
> 
> What happens with the engine in request slot? Nothing if there is no 
> retire hook set? But who uses it then?

We're just stashing a pointer back to the engine in an unused location.

> 
> >               atomic_inc(&ref->count);
> >   
> > -             intel_engine_pm_get(engine);
> >               llist_add((struct llist_node *)&node->base.link,
> >                         &ref->barriers);
> >       }
> > @@ -402,6 +441,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
> >   
> >               node = container_of((struct list_head *)pos,
> >                                   typeof(*node), base.link);
> > +             GEM_BUG_ON(node->timeline);
> >   
> >               engine = (void *)rcu_access_pointer(node->base.request);
> >               RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
> > @@ -410,12 +450,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
> >               p = &ref->tree.rb_node;
> >               while (*p) {
> >                       parent = *p;
> > -                     if (rb_entry(parent,
> > -                                  struct active_node,
> > -                                  node)->timeline < node->timeline)
> > -                             p = &parent->rb_right;
> > -                     else
> > -                             p = &parent->rb_left;
> > +                     p = &parent->rb_left;
> 
> I don't get this hunk at all - how can it be okay to ignore the timeline 
> value. Is this the buggy bit you mentioned on IRC?

We don't ignore, we know it's 0.

No, the bug is whether or not the ppGTT counts as part of the pinned
state for the legacy ringbuffer. Currently we do not and my selftest
required that we always insert an idle-barrier after use. Erring on the
side of safety says we should always utilise the idle-barriers.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
  2019-07-25  9:38 ` Chris Wilson
  2019-07-25 10:26   ` Lionel Landwerlin
@ 2019-07-25 13:19   ` Tvrtko Ursulin
  2019-07-25 13:26     ` Chris Wilson
  1 sibling, 1 reply; 8+ messages in thread
From: Tvrtko Ursulin @ 2019-07-25 13:19 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 25/07/2019 10:38, Chris Wilson wrote:
> Under some circumstances (see intel_context_prepare_remote_request), we
> may use a request along a kernel context to modify the logical state of
> another. To keep the target context in place while the request executes,
> we take an active reference on it using the kernel timeline. This is the
> same timeline as we use for the idle-barrier, and so we end up reusing
> the same active node. Except that the idle barrier is special and cannot
> be reused in this manner! Give the idle-barrier a reserved timeline
> index (0) so that it will always be unique (give or take we may issue
> multiple idle barriers across multiple engines).
> 
> Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
> Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_context.c       |  42 ++-
>   drivers/gpu/drm/i915/gt/intel_context.h       |  13 +-
>   drivers/gpu/drm/i915/gt/selftest_context.c    | 321 ++++++++++++++++++
>   drivers/gpu/drm/i915/i915_active.c            |  67 +++-
>   .../drm/i915/selftests/i915_live_selftests.h  |   3 +-
>   5 files changed, 408 insertions(+), 38 deletions(-)
>   create mode 100644 drivers/gpu/drm/i915/gt/selftest_context.c
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 9292b6ca5e9c..bc20161bab9f 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -162,18 +162,8 @@ static int __intel_context_active(struct i915_active *active)
>   	if (err)
>   		goto err_ring;
>   
> -	/* Preallocate tracking nodes */
> -	if (!i915_gem_context_is_kernel(ce->gem_context)) {
> -		err = i915_active_acquire_preallocate_barrier(&ce->active,
> -							      ce->engine);
> -		if (err)
> -			goto err_state;
> -	}
> -
>   	return 0;
>   
> -err_state:
> -	__context_unpin_state(ce->state);
>   err_ring:
>   	intel_ring_unpin(ce->ring);
>   err_put:
> @@ -181,6 +171,34 @@ static int __intel_context_active(struct i915_active *active)
>   	return err;
>   }
>   
> +int intel_context_active_acquire(struct intel_context *ce)
> +{
> +	int err;
> +
> +	err = i915_active_acquire(&ce->active);
> +	if (err)
> +		return err;
> +
> +	/* Preallocate tracking nodes */
> +	if (ce->state && !i915_gem_context_is_kernel(ce->gem_context)) {
> +		err = i915_active_acquire_preallocate_barrier(&ce->active,
> +							      ce->engine);
> +		if (err) {
> +			i915_active_release(&ce->active);
> +			return err;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +void intel_context_active_release(struct intel_context *ce)
> +{
> +	/* Nodes preallocated in intel_context_active() */
> +	i915_active_acquire_barrier(&ce->active);
> +	i915_active_release(&ce->active);
> +}
> +
>   void
>   intel_context_init(struct intel_context *ce,
>   		   struct i915_gem_context *ctx,
> @@ -284,3 +302,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
>   
>   	return rq;
>   }
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftest_context.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index 23c7e4c0ce7c..07f9924de48f 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce)
>   		ce->ops->exit(ce);
>   }
>   
> -static inline int intel_context_active_acquire(struct intel_context *ce)
> -{
> -	return i915_active_acquire(&ce->active);
> -}
> -
> -static inline void intel_context_active_release(struct intel_context *ce)
> -{
> -	/* Nodes preallocated in intel_context_active() */
> -	i915_active_acquire_barrier(&ce->active);
> -	i915_active_release(&ce->active);
> -}
> +int intel_context_active_acquire(struct intel_context *ce);
> +void intel_context_active_release(struct intel_context *ce);
>   
>   static inline struct intel_context *intel_context_get(struct intel_context *ce)
>   {
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> new file mode 100644
> index 000000000000..4d251faafcc0
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -0,0 +1,321 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "i915_selftest.h"
> +#include "intel_gt.h"
> +
> +#include "gem/selftests/mock_context.h"
> +#include "selftests/igt_flush_test.h"
> +#include "selftests/mock_drm.h"
> +
> +static int request_sync(struct i915_request *rq)
> +{
> +	long timeout;
> +	int err = 0;
> +
> +	i915_request_get(rq);
> +
> +	i915_request_add(rq);
> +	timeout = i915_request_wait(rq, 0, HZ / 10);
> +	if (timeout < 0)
> +		err = timeout;
> +	else
> +		i915_request_retire_upto(rq);
> +
> +	i915_request_put(rq);
> +
> +	return err;
> +}
> +
> +static int context_sync(struct intel_context *ce)
> +{
> +	struct intel_timeline *tl = ce->ring->timeline;
> +	int err = 0;
> +
> +	do {
> +		struct i915_request *rq;
> +		long timeout;
> +
> +		rcu_read_lock();
> +		rq = rcu_dereference(tl->last_request.request);
> +		if (rq)
> +			rq = i915_request_get_rcu(rq);
> +		rcu_read_unlock();
> +		if (!rq)
> +			break;
> +
> +		timeout = i915_request_wait(rq, 0, HZ / 10);
> +		if (timeout < 0)
> +			err = timeout;
> +		else
> +			i915_request_retire_upto(rq);
> +
> +		i915_request_put(rq);
> +	} while (!err);
> +
> +	return err;
> +}
> +
> +static int __live_active_context(struct intel_engine_cs *engine,
> +				 struct i915_gem_context *fixme)
> +{
> +	struct intel_context *ce;
> +	int pass;
> +	int err;
> +
> +	/*
> +	 * We keep active contexts alive until after a subsequent context
> +	 * switch as the final write from the context-save will be after
> +	 * we retire the final request. We track when we unpin the context,
> +	 * under the presumption that the final pin is from the last request,
> +	 * and instead of immediately unpinning the context, we add a task
> +	 * to unpin the context from the next idle-barrier.
> +	 *
> +	 * This test makes sure that the context is kept alive until a
> +	 * subsequent idle-barrier (emitted when the engine wakeref hits 0
> +	 * with no more outstanding requests).
> +	 */
> +
> +	if (intel_engine_pm_is_awake(engine)) {
> +		pr_err("%s is awake before starting %s!\n",
> +		       engine->name, __func__);
> +		return -EINVAL;
> +	}
> +
> +	ce = intel_context_create(fixme, engine);
> +	if (!ce)
> +		return -ENOMEM;
> +
> +	for (pass = 0; pass <= 2; pass++) {
> +		struct i915_request *rq;
> +
> +		rq = intel_context_create_request(ce);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto err;
> +		}
> +
> +		err = request_sync(rq);
> +		if (err)
> +			goto err;
> +
> +		/* Context will be kept active until after an idle-barrier. */
> +		if (i915_active_is_idle(&ce->active)) {
> +			pr_err("context is not active; expected idle-barrier (pass %d)\n", pass);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		if (!intel_engine_pm_is_awake(engine)) {
> +			pr_err("%s is asleep before idle-barrier %s!\n",
> +			       engine->name, __func__);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +	}
> +
> +	/* Now make sure our idle-barriers are flushed */
> +	err = context_sync(engine->kernel_context);
> +	if (err)
> +		goto err;
> +
> +	if (!i915_active_is_idle(&ce->active)) {
> +		pr_err("context is still active!");
> +		err = -EINVAL;
> +	}
> +
> +	if (intel_engine_pm_is_awake(engine)) {
> +		struct drm_printer p = drm_debug_printer(__func__);
> +
> +		intel_engine_dump(engine, &p,
> +				  "%s is still awake after idle-barriers\n",
> +				  engine->name);
> +		GEM_TRACE_DUMP();
> +
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +err:
> +	intel_context_put(ce);
> +	return err;
> +}
> +
> +static int live_active_context(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct intel_engine_cs *engine;
> +	struct i915_gem_context *fixme;
> +	enum intel_engine_id id;
> +	struct drm_file *file;
> +	int err = 0;
> +
> +	file = mock_file(gt->i915);
> +	if (IS_ERR(file))
> +		return PTR_ERR(file);
> +
> +	mutex_lock(&gt->i915->drm.struct_mutex);
> +
> +	fixme = live_context(gt->i915, file);
> +	if (!fixme) {
> +		err = -ENOMEM;
> +		goto unlock;
> +	}
> +
> +	for_each_engine(engine, gt->i915, id) {
> +		err = __live_active_context(engine, fixme);
> +		if (err)
> +			break;
> +
> +		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		if (err)
> +			break;
> +	}
> +
> +unlock:
> +	mutex_unlock(&gt->i915->drm.struct_mutex);
> +	mock_file_free(gt->i915, file);
> +	return err;
> +}
> +
> +static int __live_remote_context(struct intel_engine_cs *engine,
> +				 struct i915_gem_context *fixme)
> +{
> +	struct intel_context *local, *remote;
> +	struct i915_request *rq;
> +	int pass;
> +	int err;
> +
> +	/*
> +	 * Check that our idle barriers do not interfere with normal
> +	 * activity tracking. In particular, check that operating
> +	 * on the context image remotely (intel_context_prepare_remote_request)
> +	 * which inserts foriegn fences into intel_context.active are not
> +	 * clobbered.
> +	 */
> +
> +	remote = intel_context_create(fixme, engine);
> +	if (!remote)
> +		return -ENOMEM;
> +
> +	local = intel_context_create(fixme, engine);
> +	if (!local) {
> +		err = -ENOMEM;
> +		goto err_remote;
> +	}
> +
> +	for (pass = 0; pass <= 2; pass++) {
> +		err = intel_context_pin(remote);
> +		if (err)
> +			goto err_local;
> +
> +		rq = intel_context_create_request(local);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = intel_context_prepare_remote_request(remote, rq);
> +		if (err) {
> +			i915_request_add(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = request_sync(rq);
> +		if (err)
> +			goto err_unpin;
> +
> +		intel_context_unpin(remote);
> +		err = intel_context_pin(remote);
> +		if (err)
> +			goto err_local;
> +
> +		rq = i915_request_create(engine->kernel_context);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = intel_context_prepare_remote_request(remote, rq);
> +		if (err) {
> +			i915_request_add(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = request_sync(rq);
> +		if (err)
> +			goto err_unpin;
> +
> +		intel_context_unpin(remote);
> +
> +		if (i915_active_is_idle(&remote->active)) {
> +			pr_err("remote context is not active; expected idle-barrier (pass %d)\n", pass);
> +			err = -EINVAL;
> +			goto err_local;
> +		}
> +	}
> +
> +	goto err_local;
> +
> +err_unpin:
> +	intel_context_unpin(remote);
> +err_local:
> +	intel_context_put(local);
> +err_remote:
> +	intel_context_put(remote);
> +	return err;
> +}
> +
> +static int live_remote_context(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct intel_engine_cs *engine;
> +	struct i915_gem_context *fixme;
> +	enum intel_engine_id id;
> +	struct drm_file *file;
> +	int err = 0;
> +
> +	file = mock_file(gt->i915);
> +	if (IS_ERR(file))
> +		return PTR_ERR(file);
> +
> +	mutex_lock(&gt->i915->drm.struct_mutex);
> +
> +	fixme = live_context(gt->i915, file);
> +	if (!fixme) {
> +		err = -ENOMEM;
> +		goto unlock;
> +	}
> +
> +	for_each_engine(engine, gt->i915, id) {
> +		err = __live_remote_context(engine, fixme);
> +		if (err)
> +			break;
> +
> +		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		if (err)
> +			break;
> +	}
> +
> +unlock:
> +	mutex_unlock(&gt->i915->drm.struct_mutex);
> +	mock_file_free(gt->i915, file);
> +	return err;
> +}
> +
> +int intel_context_live_selftests(struct drm_i915_private *i915)
> +{
> +	static const struct i915_subtest tests[] = {
> +		SUBTEST(live_active_context),
> +		SUBTEST(live_remote_context),
> +	};
> +	struct intel_gt *gt = &i915->gt;
> +
> +	if (intel_gt_is_wedged(gt))
> +		return 0;
> +
> +	return intel_gt_live_subtests(tests, gt);
> +}
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index 13f304a29fc8..e04afb519264 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -184,6 +184,7 @@ active_instance(struct i915_active *ref, u64 idx)
>   	ref->cache = node;
>   	mutex_unlock(&ref->mutex);
>   
> +	BUILD_BUG_ON(offsetof(typeof(*node), base));
>   	return &node->base;
>   }
>   
> @@ -212,6 +213,8 @@ int i915_active_ref(struct i915_active *ref,
>   	struct i915_active_request *active;
>   	int err;
>   
> +	GEM_BUG_ON(!timeline); /* reserved for idle-barrier */
> +
>   	/* Prevent reaping in case we malloc/wait while building the tree */
>   	err = i915_active_acquire(ref);
>   	if (err)
> @@ -222,6 +225,7 @@ int i915_active_ref(struct i915_active *ref,
>   		err = -ENOMEM;
>   		goto out;
>   	}
> +	GEM_BUG_ON(IS_ERR(active->request));
>   
>   	if (!i915_active_request_isset(active))
>   		atomic_inc(&ref->count);
> @@ -342,6 +346,34 @@ void i915_active_fini(struct i915_active *ref)
>   }
>   #endif
>   
> +static struct active_node *idle_barrier(struct i915_active *ref)
> +{
> +	struct active_node *idle = NULL;
> +	struct rb_node *rb;
> +
> +	if (RB_EMPTY_ROOT(&ref->tree))
> +		return NULL;
> +
> +	mutex_lock(&ref->mutex);
> +	for (rb = rb_first(&ref->tree); rb; rb = rb_next(rb)) {
> +		struct active_node *node;
> +
> +		node = rb_entry(rb, typeof(*node), node);
> +		if (node->timeline)
> +			break;

I think the code needs to be more explicit for readability. Special 
timeline value zero at least as a #define. Plus some comments.

> +
> +		if (!i915_active_request_isset(&node->base)) {
> +			GEM_BUG_ON(!list_empty(&node->base.link));
> +			rb_erase(rb, &ref->tree);
> +			idle = node;
> +			break;
> +		}
> +	}
> +	mutex_unlock(&ref->mutex);
> +
> +	return idle;
> +}
> +
>   int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   					    struct intel_engine_cs *engine)
>   {
> @@ -352,22 +384,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   
>   	GEM_BUG_ON(!engine->mask);
>   	for_each_engine_masked(engine, i915, engine->mask, tmp) {
> -		struct intel_context *kctx = engine->kernel_context;
>   		struct active_node *node;
>   
> -		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
> -		if (unlikely(!node)) {
> -			err = -ENOMEM;
> -			goto unwind;
> +		node = idle_barrier(ref);
> +		if (!node) {
> +			node = kmem_cache_alloc(global.slab_cache,
> +						GFP_KERNEL |
> +						__GFP_RETRY_MAYFAIL |
> +						__GFP_NOWARN);
> +			if (unlikely(!node)) {
> +				err = -ENOMEM;
> +				goto unwind;
> +			}
> +
> +			node->ref = ref;
> +			node->timeline = 0;
> +			node->base.retire = node_retire;
>   		}
>   
> -		i915_active_request_init(&node->base,
> -					 (void *)engine, node_retire);
> -		node->timeline = kctx->ring->timeline->fence_context;
> -		node->ref = ref;
> +		intel_engine_pm_get(engine);

AFAIU this could comfortably be moved last instead of going even higher 
with this patch.

> +
> +		RCU_INIT_POINTER(node->base.request, (void *)engine);

What happens with the engine in request slot? Nothing if there is no 
retire hook set? But who uses it then?

>   		atomic_inc(&ref->count);
>   
> -		intel_engine_pm_get(engine);
>   		llist_add((struct llist_node *)&node->base.link,
>   			  &ref->barriers);
>   	}
> @@ -402,6 +441,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
>   
>   		node = container_of((struct list_head *)pos,
>   				    typeof(*node), base.link);
> +		GEM_BUG_ON(node->timeline);
>   
>   		engine = (void *)rcu_access_pointer(node->base.request);
>   		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
> @@ -410,12 +450,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
>   		p = &ref->tree.rb_node;
>   		while (*p) {
>   			parent = *p;
> -			if (rb_entry(parent,
> -				     struct active_node,
> -				     node)->timeline < node->timeline)
> -				p = &parent->rb_right;
> -			else
> -				p = &parent->rb_left;
> +			p = &parent->rb_left;

I don't get this hunk at all - how can it be okay to ignore the timeline 
value. Is this the buggy bit you mentioned on IRC?

First pass, I'll come back later.

Regards,

Tvrtko

>   		}
>   		rb_link_node(&node->node, parent, p);
>   		rb_insert_color(&node->node, &ref->tree);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index 2b31a4ee0b4c..a841d3f9bedc 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests)
>   selftest(timelines, intel_timeline_live_selftests)
>   selftest(requests, i915_request_live_selftests)
>   selftest(active, i915_active_live_selftests)
> +selftest(gt_contexts, intel_context_live_selftests)
>   selftest(objects, i915_gem_object_live_selftests)
>   selftest(mman, i915_gem_mman_live_selftests)
>   selftest(dmabuf, i915_gem_dmabuf_live_selftests)
> @@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
>   selftest(gem, i915_gem_live_selftests)
>   selftest(evict, i915_gem_evict_live_selftests)
>   selftest(hugepages, i915_gem_huge_page_live_selftests)
> -selftest(contexts, i915_gem_context_live_selftests)
> +selftest(gem_contexts, i915_gem_context_live_selftests)
>   selftest(blt, i915_gem_object_blt_live_selftests)
>   selftest(client, i915_gem_client_blt_live_selftests)
>   selftest(reset, intel_reset_live_selftests)
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
  2019-07-25  9:38 ` Chris Wilson
@ 2019-07-25 10:26   ` Lionel Landwerlin
  2019-07-25 13:19   ` Tvrtko Ursulin
  1 sibling, 0 replies; 8+ messages in thread
From: Lionel Landwerlin @ 2019-07-25 10:26 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 25/07/2019 12:38, Chris Wilson wrote:
> Under some circumstances (see intel_context_prepare_remote_request), we
> may use a request along a kernel context to modify the logical state of
> another. To keep the target context in place while the request executes,
> we take an active reference on it using the kernel timeline. This is the
> same timeline as we use for the idle-barrier, and so we end up reusing
> the same active node. Except that the idle barrier is special and cannot
> be reused in this manner! Give the idle-barrier a reserved timeline
> index (0) so that it will always be unique (give or take we may issue
> multiple idle barriers across multiple engines).
>
> Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
> Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>


Not hugely stressed tested, but seems to solve the problem I'm seeing :


Tested-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>


Thanks!


> ---
>   drivers/gpu/drm/i915/gt/intel_context.c       |  42 ++-
>   drivers/gpu/drm/i915/gt/intel_context.h       |  13 +-
>   drivers/gpu/drm/i915/gt/selftest_context.c    | 321 ++++++++++++++++++
>   drivers/gpu/drm/i915/i915_active.c            |  67 +++-
>   .../drm/i915/selftests/i915_live_selftests.h  |   3 +-
>   5 files changed, 408 insertions(+), 38 deletions(-)
>   create mode 100644 drivers/gpu/drm/i915/gt/selftest_context.c
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 9292b6ca5e9c..bc20161bab9f 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -162,18 +162,8 @@ static int __intel_context_active(struct i915_active *active)
>   	if (err)
>   		goto err_ring;
>   
> -	/* Preallocate tracking nodes */
> -	if (!i915_gem_context_is_kernel(ce->gem_context)) {
> -		err = i915_active_acquire_preallocate_barrier(&ce->active,
> -							      ce->engine);
> -		if (err)
> -			goto err_state;
> -	}
> -
>   	return 0;
>   
> -err_state:
> -	__context_unpin_state(ce->state);
>   err_ring:
>   	intel_ring_unpin(ce->ring);
>   err_put:
> @@ -181,6 +171,34 @@ static int __intel_context_active(struct i915_active *active)
>   	return err;
>   }
>   
> +int intel_context_active_acquire(struct intel_context *ce)
> +{
> +	int err;
> +
> +	err = i915_active_acquire(&ce->active);
> +	if (err)
> +		return err;
> +
> +	/* Preallocate tracking nodes */
> +	if (ce->state && !i915_gem_context_is_kernel(ce->gem_context)) {
> +		err = i915_active_acquire_preallocate_barrier(&ce->active,
> +							      ce->engine);
> +		if (err) {
> +			i915_active_release(&ce->active);
> +			return err;
> +		}
> +	}
> +
> +	return 0;
> +}
> +
> +void intel_context_active_release(struct intel_context *ce)
> +{
> +	/* Nodes preallocated in intel_context_active() */
> +	i915_active_acquire_barrier(&ce->active);
> +	i915_active_release(&ce->active);
> +}
> +
>   void
>   intel_context_init(struct intel_context *ce,
>   		   struct i915_gem_context *ctx,
> @@ -284,3 +302,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
>   
>   	return rq;
>   }
> +
> +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> +#include "selftest_context.c"
> +#endif
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index 23c7e4c0ce7c..07f9924de48f 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce)
>   		ce->ops->exit(ce);
>   }
>   
> -static inline int intel_context_active_acquire(struct intel_context *ce)
> -{
> -	return i915_active_acquire(&ce->active);
> -}
> -
> -static inline void intel_context_active_release(struct intel_context *ce)
> -{
> -	/* Nodes preallocated in intel_context_active() */
> -	i915_active_acquire_barrier(&ce->active);
> -	i915_active_release(&ce->active);
> -}
> +int intel_context_active_acquire(struct intel_context *ce);
> +void intel_context_active_release(struct intel_context *ce);
>   
>   static inline struct intel_context *intel_context_get(struct intel_context *ce)
>   {
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> new file mode 100644
> index 000000000000..4d251faafcc0
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -0,0 +1,321 @@
> +/*
> + * SPDX-License-Identifier: GPL-2.0
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "i915_selftest.h"
> +#include "intel_gt.h"
> +
> +#include "gem/selftests/mock_context.h"
> +#include "selftests/igt_flush_test.h"
> +#include "selftests/mock_drm.h"
> +
> +static int request_sync(struct i915_request *rq)
> +{
> +	long timeout;
> +	int err = 0;
> +
> +	i915_request_get(rq);
> +
> +	i915_request_add(rq);
> +	timeout = i915_request_wait(rq, 0, HZ / 10);
> +	if (timeout < 0)
> +		err = timeout;
> +	else
> +		i915_request_retire_upto(rq);
> +
> +	i915_request_put(rq);
> +
> +	return err;
> +}
> +
> +static int context_sync(struct intel_context *ce)
> +{
> +	struct intel_timeline *tl = ce->ring->timeline;
> +	int err = 0;
> +
> +	do {
> +		struct i915_request *rq;
> +		long timeout;
> +
> +		rcu_read_lock();
> +		rq = rcu_dereference(tl->last_request.request);
> +		if (rq)
> +			rq = i915_request_get_rcu(rq);
> +		rcu_read_unlock();
> +		if (!rq)
> +			break;
> +
> +		timeout = i915_request_wait(rq, 0, HZ / 10);
> +		if (timeout < 0)
> +			err = timeout;
> +		else
> +			i915_request_retire_upto(rq);
> +
> +		i915_request_put(rq);
> +	} while (!err);
> +
> +	return err;
> +}
> +
> +static int __live_active_context(struct intel_engine_cs *engine,
> +				 struct i915_gem_context *fixme)
> +{
> +	struct intel_context *ce;
> +	int pass;
> +	int err;
> +
> +	/*
> +	 * We keep active contexts alive until after a subsequent context
> +	 * switch as the final write from the context-save will be after
> +	 * we retire the final request. We track when we unpin the context,
> +	 * under the presumption that the final pin is from the last request,
> +	 * and instead of immediately unpinning the context, we add a task
> +	 * to unpin the context from the next idle-barrier.
> +	 *
> +	 * This test makes sure that the context is kept alive until a
> +	 * subsequent idle-barrier (emitted when the engine wakeref hits 0
> +	 * with no more outstanding requests).
> +	 */
> +
> +	if (intel_engine_pm_is_awake(engine)) {
> +		pr_err("%s is awake before starting %s!\n",
> +		       engine->name, __func__);
> +		return -EINVAL;
> +	}
> +
> +	ce = intel_context_create(fixme, engine);
> +	if (!ce)
> +		return -ENOMEM;
> +
> +	for (pass = 0; pass <= 2; pass++) {
> +		struct i915_request *rq;
> +
> +		rq = intel_context_create_request(ce);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto err;
> +		}
> +
> +		err = request_sync(rq);
> +		if (err)
> +			goto err;
> +
> +		/* Context will be kept active until after an idle-barrier. */
> +		if (i915_active_is_idle(&ce->active)) {
> +			pr_err("context is not active; expected idle-barrier (pass %d)\n", pass);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		if (!intel_engine_pm_is_awake(engine)) {
> +			pr_err("%s is asleep before idle-barrier %s!\n",
> +			       engine->name, __func__);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +	}
> +
> +	/* Now make sure our idle-barriers are flushed */
> +	err = context_sync(engine->kernel_context);
> +	if (err)
> +		goto err;
> +
> +	if (!i915_active_is_idle(&ce->active)) {
> +		pr_err("context is still active!");
> +		err = -EINVAL;
> +	}
> +
> +	if (intel_engine_pm_is_awake(engine)) {
> +		struct drm_printer p = drm_debug_printer(__func__);
> +
> +		intel_engine_dump(engine, &p,
> +				  "%s is still awake after idle-barriers\n",
> +				  engine->name);
> +		GEM_TRACE_DUMP();
> +
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +err:
> +	intel_context_put(ce);
> +	return err;
> +}
> +
> +static int live_active_context(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct intel_engine_cs *engine;
> +	struct i915_gem_context *fixme;
> +	enum intel_engine_id id;
> +	struct drm_file *file;
> +	int err = 0;
> +
> +	file = mock_file(gt->i915);
> +	if (IS_ERR(file))
> +		return PTR_ERR(file);
> +
> +	mutex_lock(&gt->i915->drm.struct_mutex);
> +
> +	fixme = live_context(gt->i915, file);
> +	if (!fixme) {
> +		err = -ENOMEM;
> +		goto unlock;
> +	}
> +
> +	for_each_engine(engine, gt->i915, id) {
> +		err = __live_active_context(engine, fixme);
> +		if (err)
> +			break;
> +
> +		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		if (err)
> +			break;
> +	}
> +
> +unlock:
> +	mutex_unlock(&gt->i915->drm.struct_mutex);
> +	mock_file_free(gt->i915, file);
> +	return err;
> +}
> +
> +static int __live_remote_context(struct intel_engine_cs *engine,
> +				 struct i915_gem_context *fixme)
> +{
> +	struct intel_context *local, *remote;
> +	struct i915_request *rq;
> +	int pass;
> +	int err;
> +
> +	/*
> +	 * Check that our idle barriers do not interfere with normal
> +	 * activity tracking. In particular, check that operating
> +	 * on the context image remotely (intel_context_prepare_remote_request)
> +	 * which inserts foriegn fences into intel_context.active are not
> +	 * clobbered.
> +	 */
> +
> +	remote = intel_context_create(fixme, engine);
> +	if (!remote)
> +		return -ENOMEM;
> +
> +	local = intel_context_create(fixme, engine);
> +	if (!local) {
> +		err = -ENOMEM;
> +		goto err_remote;
> +	}
> +
> +	for (pass = 0; pass <= 2; pass++) {
> +		err = intel_context_pin(remote);
> +		if (err)
> +			goto err_local;
> +
> +		rq = intel_context_create_request(local);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = intel_context_prepare_remote_request(remote, rq);
> +		if (err) {
> +			i915_request_add(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = request_sync(rq);
> +		if (err)
> +			goto err_unpin;
> +
> +		intel_context_unpin(remote);
> +		err = intel_context_pin(remote);
> +		if (err)
> +			goto err_local;
> +
> +		rq = i915_request_create(engine->kernel_context);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = intel_context_prepare_remote_request(remote, rq);
> +		if (err) {
> +			i915_request_add(rq);
> +			goto err_unpin;
> +		}
> +
> +		err = request_sync(rq);
> +		if (err)
> +			goto err_unpin;
> +
> +		intel_context_unpin(remote);
> +
> +		if (i915_active_is_idle(&remote->active)) {
> +			pr_err("remote context is not active; expected idle-barrier (pass %d)\n", pass);
> +			err = -EINVAL;
> +			goto err_local;
> +		}
> +	}
> +
> +	goto err_local;
> +
> +err_unpin:
> +	intel_context_unpin(remote);
> +err_local:
> +	intel_context_put(local);
> +err_remote:
> +	intel_context_put(remote);
> +	return err;
> +}
> +
> +static int live_remote_context(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct intel_engine_cs *engine;
> +	struct i915_gem_context *fixme;
> +	enum intel_engine_id id;
> +	struct drm_file *file;
> +	int err = 0;
> +
> +	file = mock_file(gt->i915);
> +	if (IS_ERR(file))
> +		return PTR_ERR(file);
> +
> +	mutex_lock(&gt->i915->drm.struct_mutex);
> +
> +	fixme = live_context(gt->i915, file);
> +	if (!fixme) {
> +		err = -ENOMEM;
> +		goto unlock;
> +	}
> +
> +	for_each_engine(engine, gt->i915, id) {
> +		err = __live_remote_context(engine, fixme);
> +		if (err)
> +			break;
> +
> +		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
> +		if (err)
> +			break;
> +	}
> +
> +unlock:
> +	mutex_unlock(&gt->i915->drm.struct_mutex);
> +	mock_file_free(gt->i915, file);
> +	return err;
> +}
> +
> +int intel_context_live_selftests(struct drm_i915_private *i915)
> +{
> +	static const struct i915_subtest tests[] = {
> +		SUBTEST(live_active_context),
> +		SUBTEST(live_remote_context),
> +	};
> +	struct intel_gt *gt = &i915->gt;
> +
> +	if (intel_gt_is_wedged(gt))
> +		return 0;
> +
> +	return intel_gt_live_subtests(tests, gt);
> +}
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index 13f304a29fc8..e04afb519264 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -184,6 +184,7 @@ active_instance(struct i915_active *ref, u64 idx)
>   	ref->cache = node;
>   	mutex_unlock(&ref->mutex);
>   
> +	BUILD_BUG_ON(offsetof(typeof(*node), base));
>   	return &node->base;
>   }
>   
> @@ -212,6 +213,8 @@ int i915_active_ref(struct i915_active *ref,
>   	struct i915_active_request *active;
>   	int err;
>   
> +	GEM_BUG_ON(!timeline); /* reserved for idle-barrier */
> +
>   	/* Prevent reaping in case we malloc/wait while building the tree */
>   	err = i915_active_acquire(ref);
>   	if (err)
> @@ -222,6 +225,7 @@ int i915_active_ref(struct i915_active *ref,
>   		err = -ENOMEM;
>   		goto out;
>   	}
> +	GEM_BUG_ON(IS_ERR(active->request));
>   
>   	if (!i915_active_request_isset(active))
>   		atomic_inc(&ref->count);
> @@ -342,6 +346,34 @@ void i915_active_fini(struct i915_active *ref)
>   }
>   #endif
>   
> +static struct active_node *idle_barrier(struct i915_active *ref)
> +{
> +	struct active_node *idle = NULL;
> +	struct rb_node *rb;
> +
> +	if (RB_EMPTY_ROOT(&ref->tree))
> +		return NULL;
> +
> +	mutex_lock(&ref->mutex);
> +	for (rb = rb_first(&ref->tree); rb; rb = rb_next(rb)) {
> +		struct active_node *node;
> +
> +		node = rb_entry(rb, typeof(*node), node);
> +		if (node->timeline)
> +			break;
> +
> +		if (!i915_active_request_isset(&node->base)) {
> +			GEM_BUG_ON(!list_empty(&node->base.link));
> +			rb_erase(rb, &ref->tree);
> +			idle = node;
> +			break;
> +		}
> +	}
> +	mutex_unlock(&ref->mutex);
> +
> +	return idle;
> +}
> +
>   int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   					    struct intel_engine_cs *engine)
>   {
> @@ -352,22 +384,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
>   
>   	GEM_BUG_ON(!engine->mask);
>   	for_each_engine_masked(engine, i915, engine->mask, tmp) {
> -		struct intel_context *kctx = engine->kernel_context;
>   		struct active_node *node;
>   
> -		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
> -		if (unlikely(!node)) {
> -			err = -ENOMEM;
> -			goto unwind;
> +		node = idle_barrier(ref);
> +		if (!node) {
> +			node = kmem_cache_alloc(global.slab_cache,
> +						GFP_KERNEL |
> +						__GFP_RETRY_MAYFAIL |
> +						__GFP_NOWARN);
> +			if (unlikely(!node)) {
> +				err = -ENOMEM;
> +				goto unwind;
> +			}
> +
> +			node->ref = ref;
> +			node->timeline = 0;
> +			node->base.retire = node_retire;
>   		}
>   
> -		i915_active_request_init(&node->base,
> -					 (void *)engine, node_retire);
> -		node->timeline = kctx->ring->timeline->fence_context;
> -		node->ref = ref;
> +		intel_engine_pm_get(engine);
> +
> +		RCU_INIT_POINTER(node->base.request, (void *)engine);
>   		atomic_inc(&ref->count);
>   
> -		intel_engine_pm_get(engine);
>   		llist_add((struct llist_node *)&node->base.link,
>   			  &ref->barriers);
>   	}
> @@ -402,6 +441,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
>   
>   		node = container_of((struct list_head *)pos,
>   				    typeof(*node), base.link);
> +		GEM_BUG_ON(node->timeline);
>   
>   		engine = (void *)rcu_access_pointer(node->base.request);
>   		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
> @@ -410,12 +450,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
>   		p = &ref->tree.rb_node;
>   		while (*p) {
>   			parent = *p;
> -			if (rb_entry(parent,
> -				     struct active_node,
> -				     node)->timeline < node->timeline)
> -				p = &parent->rb_right;
> -			else
> -				p = &parent->rb_left;
> +			p = &parent->rb_left;
>   		}
>   		rb_link_node(&node->node, parent, p);
>   		rb_insert_color(&node->node, &ref->tree);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> index 2b31a4ee0b4c..a841d3f9bedc 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
> @@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests)
>   selftest(timelines, intel_timeline_live_selftests)
>   selftest(requests, i915_request_live_selftests)
>   selftest(active, i915_active_live_selftests)
> +selftest(gt_contexts, intel_context_live_selftests)
>   selftest(objects, i915_gem_object_live_selftests)
>   selftest(mman, i915_gem_mman_live_selftests)
>   selftest(dmabuf, i915_gem_dmabuf_live_selftests)
> @@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
>   selftest(gem, i915_gem_live_selftests)
>   selftest(evict, i915_gem_evict_live_selftests)
>   selftest(hugepages, i915_gem_huge_page_live_selftests)
> -selftest(contexts, i915_gem_context_live_selftests)
> +selftest(gem_contexts, i915_gem_context_live_selftests)
>   selftest(blt, i915_gem_object_blt_live_selftests)
>   selftest(client, i915_gem_client_blt_live_selftests)
>   selftest(reset, intel_reset_live_selftests)


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
  2019-07-25  7:43 [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests Chris Wilson
@ 2019-07-25  9:38 ` Chris Wilson
  2019-07-25 10:26   ` Lionel Landwerlin
  2019-07-25 13:19   ` Tvrtko Ursulin
  0 siblings, 2 replies; 8+ messages in thread
From: Chris Wilson @ 2019-07-25  9:38 UTC (permalink / raw)
  To: intel-gfx

Under some circumstances (see intel_context_prepare_remote_request), we
may use a request along a kernel context to modify the logical state of
another. To keep the target context in place while the request executes,
we take an active reference on it using the kernel timeline. This is the
same timeline as we use for the idle-barrier, and so we end up reusing
the same active node. Except that the idle barrier is special and cannot
be reused in this manner! Give the idle-barrier a reserved timeline
index (0) so that it will always be unique (give or take we may issue
multiple idle barriers across multiple engines).

Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context.c       |  42 ++-
 drivers/gpu/drm/i915/gt/intel_context.h       |  13 +-
 drivers/gpu/drm/i915/gt/selftest_context.c    | 321 ++++++++++++++++++
 drivers/gpu/drm/i915/i915_active.c            |  67 +++-
 .../drm/i915/selftests/i915_live_selftests.h  |   3 +-
 5 files changed, 408 insertions(+), 38 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/selftest_context.c

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 9292b6ca5e9c..bc20161bab9f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -162,18 +162,8 @@ static int __intel_context_active(struct i915_active *active)
 	if (err)
 		goto err_ring;
 
-	/* Preallocate tracking nodes */
-	if (!i915_gem_context_is_kernel(ce->gem_context)) {
-		err = i915_active_acquire_preallocate_barrier(&ce->active,
-							      ce->engine);
-		if (err)
-			goto err_state;
-	}
-
 	return 0;
 
-err_state:
-	__context_unpin_state(ce->state);
 err_ring:
 	intel_ring_unpin(ce->ring);
 err_put:
@@ -181,6 +171,34 @@ static int __intel_context_active(struct i915_active *active)
 	return err;
 }
 
+int intel_context_active_acquire(struct intel_context *ce)
+{
+	int err;
+
+	err = i915_active_acquire(&ce->active);
+	if (err)
+		return err;
+
+	/* Preallocate tracking nodes */
+	if (ce->state && !i915_gem_context_is_kernel(ce->gem_context)) {
+		err = i915_active_acquire_preallocate_barrier(&ce->active,
+							      ce->engine);
+		if (err) {
+			i915_active_release(&ce->active);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+void intel_context_active_release(struct intel_context *ce)
+{
+	/* Nodes preallocated in intel_context_active() */
+	i915_active_acquire_barrier(&ce->active);
+	i915_active_release(&ce->active);
+}
+
 void
 intel_context_init(struct intel_context *ce,
 		   struct i915_gem_context *ctx,
@@ -284,3 +302,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
 
 	return rq;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 23c7e4c0ce7c..07f9924de48f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce)
 		ce->ops->exit(ce);
 }
 
-static inline int intel_context_active_acquire(struct intel_context *ce)
-{
-	return i915_active_acquire(&ce->active);
-}
-
-static inline void intel_context_active_release(struct intel_context *ce)
-{
-	/* Nodes preallocated in intel_context_active() */
-	i915_active_acquire_barrier(&ce->active);
-	i915_active_release(&ce->active);
-}
+int intel_context_active_acquire(struct intel_context *ce);
+void intel_context_active_release(struct intel_context *ce);
 
 static inline struct intel_context *intel_context_get(struct intel_context *ce)
 {
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
new file mode 100644
index 000000000000..4d251faafcc0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -0,0 +1,321 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "intel_gt.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+
+static int request_sync(struct i915_request *rq)
+{
+	long timeout;
+	int err = 0;
+
+	i915_request_get(rq);
+
+	i915_request_add(rq);
+	timeout = i915_request_wait(rq, 0, HZ / 10);
+	if (timeout < 0)
+		err = timeout;
+	else
+		i915_request_retire_upto(rq);
+
+	i915_request_put(rq);
+
+	return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+	struct intel_timeline *tl = ce->ring->timeline;
+	int err = 0;
+
+	do {
+		struct i915_request *rq;
+		long timeout;
+
+		rcu_read_lock();
+		rq = rcu_dereference(tl->last_request.request);
+		if (rq)
+			rq = i915_request_get_rcu(rq);
+		rcu_read_unlock();
+		if (!rq)
+			break;
+
+		timeout = i915_request_wait(rq, 0, HZ / 10);
+		if (timeout < 0)
+			err = timeout;
+		else
+			i915_request_retire_upto(rq);
+
+		i915_request_put(rq);
+	} while (!err);
+
+	return err;
+}
+
+static int __live_active_context(struct intel_engine_cs *engine,
+				 struct i915_gem_context *fixme)
+{
+	struct intel_context *ce;
+	int pass;
+	int err;
+
+	/*
+	 * We keep active contexts alive until after a subsequent context
+	 * switch as the final write from the context-save will be after
+	 * we retire the final request. We track when we unpin the context,
+	 * under the presumption that the final pin is from the last request,
+	 * and instead of immediately unpinning the context, we add a task
+	 * to unpin the context from the next idle-barrier.
+	 *
+	 * This test makes sure that the context is kept alive until a
+	 * subsequent idle-barrier (emitted when the engine wakeref hits 0
+	 * with no more outstanding requests).
+	 */
+
+	if (intel_engine_pm_is_awake(engine)) {
+		pr_err("%s is awake before starting %s!\n",
+		       engine->name, __func__);
+		return -EINVAL;
+	}
+
+	ce = intel_context_create(fixme, engine);
+	if (!ce)
+		return -ENOMEM;
+
+	for (pass = 0; pass <= 2; pass++) {
+		struct i915_request *rq;
+
+		rq = intel_context_create_request(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err;
+		}
+
+		err = request_sync(rq);
+		if (err)
+			goto err;
+
+		/* Context will be kept active until after an idle-barrier. */
+		if (i915_active_is_idle(&ce->active)) {
+			pr_err("context is not active; expected idle-barrier (pass %d)\n", pass);
+			err = -EINVAL;
+			goto err;
+		}
+
+		if (!intel_engine_pm_is_awake(engine)) {
+			pr_err("%s is asleep before idle-barrier %s!\n",
+			       engine->name, __func__);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Now make sure our idle-barriers are flushed */
+	err = context_sync(engine->kernel_context);
+	if (err)
+		goto err;
+
+	if (!i915_active_is_idle(&ce->active)) {
+		pr_err("context is still active!");
+		err = -EINVAL;
+	}
+
+	if (intel_engine_pm_is_awake(engine)) {
+		struct drm_printer p = drm_debug_printer(__func__);
+
+		intel_engine_dump(engine, &p,
+				  "%s is still awake after idle-barriers\n",
+				  engine->name);
+		GEM_TRACE_DUMP();
+
+		err = -EINVAL;
+		goto err;
+	}
+
+err:
+	intel_context_put(ce);
+	return err;
+}
+
+static int live_active_context(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *fixme;
+	enum intel_engine_id id;
+	struct drm_file *file;
+	int err = 0;
+
+	file = mock_file(gt->i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&gt->i915->drm.struct_mutex);
+
+	fixme = live_context(gt->i915, file);
+	if (!fixme) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	for_each_engine(engine, gt->i915, id) {
+		err = __live_active_context(engine, fixme);
+		if (err)
+			break;
+
+		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+		if (err)
+			break;
+	}
+
+unlock:
+	mutex_unlock(&gt->i915->drm.struct_mutex);
+	mock_file_free(gt->i915, file);
+	return err;
+}
+
+static int __live_remote_context(struct intel_engine_cs *engine,
+				 struct i915_gem_context *fixme)
+{
+	struct intel_context *local, *remote;
+	struct i915_request *rq;
+	int pass;
+	int err;
+
+	/*
+	 * Check that our idle barriers do not interfere with normal
+	 * activity tracking. In particular, check that operating
+	 * on the context image remotely (intel_context_prepare_remote_request)
+	 * which inserts foriegn fences into intel_context.active are not
+	 * clobbered.
+	 */
+
+	remote = intel_context_create(fixme, engine);
+	if (!remote)
+		return -ENOMEM;
+
+	local = intel_context_create(fixme, engine);
+	if (!local) {
+		err = -ENOMEM;
+		goto err_remote;
+	}
+
+	for (pass = 0; pass <= 2; pass++) {
+		err = intel_context_pin(remote);
+		if (err)
+			goto err_local;
+
+		rq = intel_context_create_request(local);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_unpin;
+		}
+
+		err = intel_context_prepare_remote_request(remote, rq);
+		if (err) {
+			i915_request_add(rq);
+			goto err_unpin;
+		}
+
+		err = request_sync(rq);
+		if (err)
+			goto err_unpin;
+
+		intel_context_unpin(remote);
+		err = intel_context_pin(remote);
+		if (err)
+			goto err_local;
+
+		rq = i915_request_create(engine->kernel_context);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_unpin;
+		}
+
+		err = intel_context_prepare_remote_request(remote, rq);
+		if (err) {
+			i915_request_add(rq);
+			goto err_unpin;
+		}
+
+		err = request_sync(rq);
+		if (err)
+			goto err_unpin;
+
+		intel_context_unpin(remote);
+
+		if (i915_active_is_idle(&remote->active)) {
+			pr_err("remote context is not active; expected idle-barrier (pass %d)\n", pass);
+			err = -EINVAL;
+			goto err_local;
+		}
+	}
+
+	goto err_local;
+
+err_unpin:
+	intel_context_unpin(remote);
+err_local:
+	intel_context_put(local);
+err_remote:
+	intel_context_put(remote);
+	return err;
+}
+
+static int live_remote_context(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *fixme;
+	enum intel_engine_id id;
+	struct drm_file *file;
+	int err = 0;
+
+	file = mock_file(gt->i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&gt->i915->drm.struct_mutex);
+
+	fixme = live_context(gt->i915, file);
+	if (!fixme) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	for_each_engine(engine, gt->i915, id) {
+		err = __live_remote_context(engine, fixme);
+		if (err)
+			break;
+
+		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+		if (err)
+			break;
+	}
+
+unlock:
+	mutex_unlock(&gt->i915->drm.struct_mutex);
+	mock_file_free(gt->i915, file);
+	return err;
+}
+
+int intel_context_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_active_context),
+		SUBTEST(live_remote_context),
+	};
+	struct intel_gt *gt = &i915->gt;
+
+	if (intel_gt_is_wedged(gt))
+		return 0;
+
+	return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 13f304a29fc8..e04afb519264 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -184,6 +184,7 @@ active_instance(struct i915_active *ref, u64 idx)
 	ref->cache = node;
 	mutex_unlock(&ref->mutex);
 
+	BUILD_BUG_ON(offsetof(typeof(*node), base));
 	return &node->base;
 }
 
@@ -212,6 +213,8 @@ int i915_active_ref(struct i915_active *ref,
 	struct i915_active_request *active;
 	int err;
 
+	GEM_BUG_ON(!timeline); /* reserved for idle-barrier */
+
 	/* Prevent reaping in case we malloc/wait while building the tree */
 	err = i915_active_acquire(ref);
 	if (err)
@@ -222,6 +225,7 @@ int i915_active_ref(struct i915_active *ref,
 		err = -ENOMEM;
 		goto out;
 	}
+	GEM_BUG_ON(IS_ERR(active->request));
 
 	if (!i915_active_request_isset(active))
 		atomic_inc(&ref->count);
@@ -342,6 +346,34 @@ void i915_active_fini(struct i915_active *ref)
 }
 #endif
 
+static struct active_node *idle_barrier(struct i915_active *ref)
+{
+	struct active_node *idle = NULL;
+	struct rb_node *rb;
+
+	if (RB_EMPTY_ROOT(&ref->tree))
+		return NULL;
+
+	mutex_lock(&ref->mutex);
+	for (rb = rb_first(&ref->tree); rb; rb = rb_next(rb)) {
+		struct active_node *node;
+
+		node = rb_entry(rb, typeof(*node), node);
+		if (node->timeline)
+			break;
+
+		if (!i915_active_request_isset(&node->base)) {
+			GEM_BUG_ON(!list_empty(&node->base.link));
+			rb_erase(rb, &ref->tree);
+			idle = node;
+			break;
+		}
+	}
+	mutex_unlock(&ref->mutex);
+
+	return idle;
+}
+
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 					    struct intel_engine_cs *engine)
 {
@@ -352,22 +384,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 
 	GEM_BUG_ON(!engine->mask);
 	for_each_engine_masked(engine, i915, engine->mask, tmp) {
-		struct intel_context *kctx = engine->kernel_context;
 		struct active_node *node;
 
-		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-		if (unlikely(!node)) {
-			err = -ENOMEM;
-			goto unwind;
+		node = idle_barrier(ref);
+		if (!node) {
+			node = kmem_cache_alloc(global.slab_cache,
+						GFP_KERNEL |
+						__GFP_RETRY_MAYFAIL |
+						__GFP_NOWARN);
+			if (unlikely(!node)) {
+				err = -ENOMEM;
+				goto unwind;
+			}
+
+			node->ref = ref;
+			node->timeline = 0;
+			node->base.retire = node_retire;
 		}
 
-		i915_active_request_init(&node->base,
-					 (void *)engine, node_retire);
-		node->timeline = kctx->ring->timeline->fence_context;
-		node->ref = ref;
+		intel_engine_pm_get(engine);
+
+		RCU_INIT_POINTER(node->base.request, (void *)engine);
 		atomic_inc(&ref->count);
 
-		intel_engine_pm_get(engine);
 		llist_add((struct llist_node *)&node->base.link,
 			  &ref->barriers);
 	}
@@ -402,6 +441,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 
 		node = container_of((struct list_head *)pos,
 				    typeof(*node), base.link);
+		GEM_BUG_ON(node->timeline);
 
 		engine = (void *)rcu_access_pointer(node->base.request);
 		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
@@ -410,12 +450,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 		p = &ref->tree.rb_node;
 		while (*p) {
 			parent = *p;
-			if (rb_entry(parent,
-				     struct active_node,
-				     node)->timeline < node->timeline)
-				p = &parent->rb_right;
-			else
-				p = &parent->rb_left;
+			p = &parent->rb_left;
 		}
 		rb_link_node(&node->node, parent, p);
 		rb_insert_color(&node->node, &ref->tree);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 2b31a4ee0b4c..a841d3f9bedc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests)
 selftest(timelines, intel_timeline_live_selftests)
 selftest(requests, i915_request_live_selftests)
 selftest(active, i915_active_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(mman, i915_gem_mman_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
@@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
 selftest(gem, i915_gem_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
-selftest(contexts, i915_gem_context_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
 selftest(blt, i915_gem_object_blt_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
 selftest(reset, intel_reset_live_selftests)
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests
@ 2019-07-25  7:43 Chris Wilson
  2019-07-25  9:38 ` Chris Wilson
  0 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2019-07-25  7:43 UTC (permalink / raw)
  To: intel-gfx

Under some circumstances (see intel_context_prepare_remote_request), we
may use a request along a kernel context to modify the logical state of
another. To keep the target context in place while the request executes,
we take an active reference on it using the kernel timeline. This is the
same timeline as we use for the idle-barrier, and so we end up reusing
the same active node. Except that the idle barrier is special and cannot
be reused in this manner! Give the idle-barrier a reserved timeline
index (0) so that it will always be unique (give or take we may issue
multiple idle barriers across multiple engines).

Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Fixes: ce476c80b8bf ("drm/i915: Keep contexts pinned until after the next kernel context switch")
Fixes: a9877da2d629 ("drm/i915/oa: Reconfigure contexts on the fly")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
Now with a successful testcase!
---
 drivers/gpu/drm/i915/gt/intel_context.c       |  42 ++-
 drivers/gpu/drm/i915/gt/intel_context.h       |  13 +-
 drivers/gpu/drm/i915/gt/selftest_context.c    | 321 ++++++++++++++++++
 drivers/gpu/drm/i915/i915_active.c            |  59 +++-
 .../drm/i915/selftests/i915_live_selftests.h  |   3 +-
 5 files changed, 400 insertions(+), 38 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/selftest_context.c

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 9292b6ca5e9c..bc20161bab9f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -162,18 +162,8 @@ static int __intel_context_active(struct i915_active *active)
 	if (err)
 		goto err_ring;
 
-	/* Preallocate tracking nodes */
-	if (!i915_gem_context_is_kernel(ce->gem_context)) {
-		err = i915_active_acquire_preallocate_barrier(&ce->active,
-							      ce->engine);
-		if (err)
-			goto err_state;
-	}
-
 	return 0;
 
-err_state:
-	__context_unpin_state(ce->state);
 err_ring:
 	intel_ring_unpin(ce->ring);
 err_put:
@@ -181,6 +171,34 @@ static int __intel_context_active(struct i915_active *active)
 	return err;
 }
 
+int intel_context_active_acquire(struct intel_context *ce)
+{
+	int err;
+
+	err = i915_active_acquire(&ce->active);
+	if (err)
+		return err;
+
+	/* Preallocate tracking nodes */
+	if (ce->state && !i915_gem_context_is_kernel(ce->gem_context)) {
+		err = i915_active_acquire_preallocate_barrier(&ce->active,
+							      ce->engine);
+		if (err) {
+			i915_active_release(&ce->active);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+void intel_context_active_release(struct intel_context *ce)
+{
+	/* Nodes preallocated in intel_context_active() */
+	i915_active_acquire_barrier(&ce->active);
+	i915_active_release(&ce->active);
+}
+
 void
 intel_context_init(struct intel_context *ce,
 		   struct i915_gem_context *ctx,
@@ -284,3 +302,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
 
 	return rq;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 23c7e4c0ce7c..07f9924de48f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -104,17 +104,8 @@ static inline void intel_context_exit(struct intel_context *ce)
 		ce->ops->exit(ce);
 }
 
-static inline int intel_context_active_acquire(struct intel_context *ce)
-{
-	return i915_active_acquire(&ce->active);
-}
-
-static inline void intel_context_active_release(struct intel_context *ce)
-{
-	/* Nodes preallocated in intel_context_active() */
-	i915_active_acquire_barrier(&ce->active);
-	i915_active_release(&ce->active);
-}
+int intel_context_active_acquire(struct intel_context *ce);
+void intel_context_active_release(struct intel_context *ce);
 
 static inline struct intel_context *intel_context_get(struct intel_context *ce)
 {
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
new file mode 100644
index 000000000000..d92cebadf97d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -0,0 +1,321 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "intel_gt.h"
+
+#include "gem/selftests/mock_context.h"
+#include "selftests/igt_flush_test.h"
+#include "selftests/mock_drm.h"
+
+static int request_sync(struct i915_request *rq)
+{
+	long timeout;
+	int err = 0;
+
+	i915_request_get(rq);
+
+	i915_request_add(rq);
+	timeout = i915_request_wait(rq, 0, HZ / 10);
+	if (timeout < 0)
+		err = timeout;
+	else
+		i915_request_retire_upto(rq);
+
+	i915_request_put(rq);
+
+	return err;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+	struct intel_timeline *tl = ce->ring->timeline;
+	int err = 0;
+
+	do {
+		struct i915_request *rq;
+		long timeout;
+
+		rcu_read_lock();
+		rq = rcu_dereference(tl->last_request.request);
+		if (rq)
+			rq = i915_request_get_rcu(rq);
+		rcu_read_unlock();
+		if (!rq)
+			return 0;
+
+		timeout = i915_request_wait(rq, 0, HZ / 10);
+		if (timeout < 0)
+			err = timeout;
+		else
+			i915_request_retire_upto(rq);
+
+		i915_request_put(rq);
+	} while (!err);
+
+	return err;
+}
+
+static int __live_active_context(struct intel_engine_cs *engine,
+				 struct i915_gem_context *fixme)
+{
+	struct intel_context *ce;
+	int pass;
+	int err;
+
+	/*
+	 * We keep active contexts alive until after a subsequent context
+	 * switch as the final write from the context-save will be after
+	 * we retire the final request. We track when we unpin the context,
+	 * under the presumption that the final pin is from the last request,
+	 * and instead of immediately unpinning the context, we add a task
+	 * to unpin the context from the next idle-barrier.
+	 *
+	 * This test makes sure that the context is kept alive until a
+	 * subsequent idle-barrier (emitted when the engine wakeref hits 0
+	 * with no more outstanding requests).
+	 */
+
+	if (intel_engine_pm_is_awake(engine)) {
+		pr_err("%s is awake before starting %s!\n",
+		       engine->name, __func__);
+		return -EINVAL;
+	}
+
+	ce = intel_context_create(fixme, engine);
+	if (!ce)
+		return -ENOMEM;
+
+	for (pass = 0; pass <= 2; pass++) {
+		struct i915_request *rq;
+
+		rq = intel_context_create_request(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err;
+		}
+
+		err = request_sync(rq);
+		if (err)
+			goto err;
+
+		/* Context will be kept active until after an idle-barrier. */
+		if (i915_active_is_idle(&ce->active)) {
+			pr_err("context is not active; expected idle-barrier (pass %d)\n", pass);
+			err = -EINVAL;
+			goto err;
+		}
+
+		if (!intel_engine_pm_is_awake(engine)) {
+			pr_err("%s is asleep before idle-barrier %s!\n",
+			       engine->name, __func__);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+	/* Now make sure our idle-barriers are flushed */
+	err = context_sync(engine->kernel_context);
+	if (err)
+		goto err;
+
+	if (!i915_active_is_idle(&ce->active)) {
+		pr_err("context is still active!");
+		err = -EINVAL;
+	}
+
+	if (intel_engine_pm_is_awake(engine)) {
+		struct drm_printer p = drm_debug_printer(__func__);
+
+		intel_engine_dump(engine, &p,
+				  "%s is still awake after idle-barriers\n",
+				  engine->name);
+		GEM_TRACE_DUMP();
+
+		err = -EINVAL;
+		goto err;
+	}
+
+err:
+	intel_context_put(ce);
+	return err;
+}
+
+static int live_active_context(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *fixme;
+	enum intel_engine_id id;
+	struct drm_file *file;
+	int err = 0;
+
+	file = mock_file(gt->i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&gt->i915->drm.struct_mutex);
+
+	fixme = live_context(gt->i915, file);
+	if (!fixme) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	for_each_engine(engine, gt->i915, id) {
+		err = __live_active_context(engine, fixme);
+		if (err)
+			break;
+
+		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+		if (err)
+			break;
+	}
+
+unlock:
+	mutex_unlock(&gt->i915->drm.struct_mutex);
+	mock_file_free(gt->i915, file);
+	return err;
+}
+
+static int __live_remote_context(struct intel_engine_cs *engine,
+				 struct i915_gem_context *fixme)
+{
+	struct intel_context *local, *remote;
+	struct i915_request *rq;
+	int pass;
+	int err;
+
+	/*
+	 * Check that our idle barriers do not interfere with normal
+	 * activity tracking. In particular, check that operating
+	 * on the context image remotely (intel_context_prepare_remote_request)
+	 * which inserts foriegn fences into intel_context.active are not
+	 * clobbered.
+	 */
+
+	remote = intel_context_create(fixme, engine);
+	if (!remote)
+		return -ENOMEM;
+
+	local = intel_context_create(fixme, engine);
+	if (!local) {
+		err = -ENOMEM;
+		goto err_remote;
+	}
+
+	for (pass = 0; pass <= 2; pass++) {
+		err = intel_context_pin(remote);
+		if (err)
+			goto err_local;
+
+		rq = intel_context_create_request(local);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_unpin;
+		}
+
+		err = intel_context_prepare_remote_request(remote, rq);
+		if (err) {
+			i915_request_add(rq);
+			goto err_unpin;
+		}
+
+		err = request_sync(rq);
+		if (err)
+			goto err_unpin;
+
+		intel_context_unpin(remote);
+		err = intel_context_pin(remote);
+		if (err)
+			goto err_local;
+
+		rq = i915_request_create(engine->kernel_context);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_unpin;
+		}
+
+		err = intel_context_prepare_remote_request(remote, rq);
+		if (err) {
+			i915_request_add(rq);
+			goto err_unpin;
+		}
+
+		err = request_sync(rq);
+		if (err)
+			goto err_unpin;
+
+		intel_context_unpin(remote);
+
+		if (i915_active_is_idle(&remote->active)) {
+			pr_err("remote context is not active; expected idle-barrier (pass %d)\n", pass);
+			err = -EINVAL;
+			goto err_local;
+		}
+	}
+
+	goto err_local;
+
+err_unpin:
+	intel_context_unpin(remote);
+err_local:
+	intel_context_put(local);
+err_remote:
+	intel_context_put(remote);
+	return err;
+}
+
+static int live_remote_context(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	struct i915_gem_context *fixme;
+	enum intel_engine_id id;
+	struct drm_file *file;
+	int err = 0;
+
+	file = mock_file(gt->i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&gt->i915->drm.struct_mutex);
+
+	fixme = live_context(gt->i915, file);
+	if (!fixme) {
+		err = -ENOMEM;
+		goto unlock;
+	}
+
+	for_each_engine(engine, gt->i915, id) {
+		err = __live_remote_context(engine, fixme);
+		if (err)
+			break;
+
+		err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+		if (err)
+			break;
+	}
+
+unlock:
+	mutex_unlock(&gt->i915->drm.struct_mutex);
+	mock_file_free(gt->i915, file);
+	return err;
+}
+
+int intel_context_live_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(live_active_context),
+		SUBTEST(live_remote_context),
+	};
+	struct intel_gt *gt = &i915->gt;
+
+	if (intel_gt_is_wedged(gt))
+		return 0;
+
+	return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 13f304a29fc8..057b33eb7055 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -184,6 +184,7 @@ active_instance(struct i915_active *ref, u64 idx)
 	ref->cache = node;
 	mutex_unlock(&ref->mutex);
 
+	BUILD_BUG_ON(offsetof(typeof(*node), base));
 	return &node->base;
 }
 
@@ -212,6 +213,8 @@ int i915_active_ref(struct i915_active *ref,
 	struct i915_active_request *active;
 	int err;
 
+	GEM_BUG_ON(!timeline); /* reserved for idle-barrier */
+
 	/* Prevent reaping in case we malloc/wait while building the tree */
 	err = i915_active_acquire(ref);
 	if (err)
@@ -222,6 +225,7 @@ int i915_active_ref(struct i915_active *ref,
 		err = -ENOMEM;
 		goto out;
 	}
+	GEM_BUG_ON(IS_ERR(active->request));
 
 	if (!i915_active_request_isset(active))
 		atomic_inc(&ref->count);
@@ -342,6 +346,26 @@ void i915_active_fini(struct i915_active *ref)
 }
 #endif
 
+static struct active_node *idle_barrier(struct i915_active *ref)
+{
+	struct active_node *node;
+	struct rb_node *rb;
+
+	lockdep_assert_held(&ref->mutex);
+	rb = rb_first(&ref->tree);
+	if (!rb)
+		return NULL;
+
+	node = rb_entry(rb, typeof(*node), node);
+	if (node->timeline || i915_active_request_isset(&node->base))
+		return NULL;
+
+	GEM_BUG_ON(!list_empty(&node->base.link));
+	rb_erase(rb, &ref->tree);
+
+	return node;
+}
+
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 					    struct intel_engine_cs *engine)
 {
@@ -352,22 +376,29 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 
 	GEM_BUG_ON(!engine->mask);
 	for_each_engine_masked(engine, i915, engine->mask, tmp) {
-		struct intel_context *kctx = engine->kernel_context;
 		struct active_node *node;
 
-		node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-		if (unlikely(!node)) {
-			err = -ENOMEM;
-			goto unwind;
+		node = idle_barrier(ref);
+		if (!node) {
+			node = kmem_cache_alloc(global.slab_cache,
+						GFP_KERNEL |
+						__GFP_RETRY_MAYFAIL |
+						__GFP_NOWARN);
+			if (unlikely(!node)) {
+				err = -ENOMEM;
+				goto unwind;
+			}
+
+			node->ref = ref;
+			node->timeline = 0;
+			node->base.retire = node_retire;
 		}
 
-		i915_active_request_init(&node->base,
-					 (void *)engine, node_retire);
-		node->timeline = kctx->ring->timeline->fence_context;
-		node->ref = ref;
+		intel_engine_pm_get(engine);
+
+		RCU_INIT_POINTER(node->base.request, (void *)engine);
 		atomic_inc(&ref->count);
 
-		intel_engine_pm_get(engine);
 		llist_add((struct llist_node *)&node->base.link,
 			  &ref->barriers);
 	}
@@ -402,6 +433,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 
 		node = container_of((struct list_head *)pos,
 				    typeof(*node), base.link);
+		GEM_BUG_ON(node->timeline);
 
 		engine = (void *)rcu_access_pointer(node->base.request);
 		RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
@@ -410,12 +442,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 		p = &ref->tree.rb_node;
 		while (*p) {
 			parent = *p;
-			if (rb_entry(parent,
-				     struct active_node,
-				     node)->timeline < node->timeline)
-				p = &parent->rb_right;
-			else
-				p = &parent->rb_left;
+			p = &parent->rb_left;
 		}
 		rb_link_node(&node->node, parent, p);
 		rb_insert_color(&node->node, &ref->tree);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 2b31a4ee0b4c..a841d3f9bedc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests)
 selftest(timelines, intel_timeline_live_selftests)
 selftest(requests, i915_request_live_selftests)
 selftest(active, i915_active_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(mman, i915_gem_mman_live_selftests)
 selftest(dmabuf, i915_gem_dmabuf_live_selftests)
@@ -24,7 +25,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
 selftest(gem, i915_gem_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
-selftest(contexts, i915_gem_context_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
 selftest(blt, i915_gem_object_blt_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
 selftest(reset, intel_reset_live_selftests)
-- 
2.22.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2019-07-25 13:27 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-24 12:43 [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests Chris Wilson
2019-07-24 12:50 ` Chris Wilson
2019-07-24 16:35 ` ✗ Fi.CI.BAT: failure for drm/i915: Unshare the idle-barrier from other kernel requests (rev2) Patchwork
2019-07-25  7:43 [PATCH] drm/i915: Unshare the idle-barrier from other kernel requests Chris Wilson
2019-07-25  9:38 ` Chris Wilson
2019-07-25 10:26   ` Lionel Landwerlin
2019-07-25 13:19   ` Tvrtko Ursulin
2019-07-25 13:26     ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.