All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
@ 2020-03-06 13:38 Chris Wilson
  2020-03-06 13:38   ` [Intel-gfx] " Chris Wilson
                   ` (19 more replies)
  0 siblings, 20 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Due to the ordering of cmpxchg()/dma_fence_signal() inside node_retire(),
we must also use the xchg() as our primary memory barrier to flush the
outstanding callbacks after expected completion of the i915_active.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_active.c | 29 ++++++++++++++------
 1 file changed, 21 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 3a37c67ab6c4..68bbb1580162 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -311,20 +311,33 @@ static void spin_unlock_wait(spinlock_t *lock)
 	spin_unlock_irq(lock);
 }
 
+static void active_flush(struct i915_active *ref,
+			 struct i915_active_fence *active)
+{
+	struct dma_fence *fence;
+
+	fence = xchg(__active_fence_slot(active), NULL);
+	if (!fence)
+		return;
+
+	spin_lock_irq(fence->lock);
+	__list_del_entry(&active->cb.node);
+	spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+	atomic_dec(&ref->count);
+
+	GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
 void i915_active_unlock_wait(struct i915_active *ref)
 {
 	if (i915_active_acquire_if_busy(ref)) {
 		struct active_node *it, *n;
 
+		/* Wait for all active callbacks */
 		rcu_read_lock();
-		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-			struct dma_fence *f;
-
-			/* Wait for all active callbacks */
-			f = rcu_dereference(it->base.fence);
-			if (f)
-				spin_unlock_wait(f->lock);
-		}
+		active_flush(ref, &ref->excl);
+		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+			active_flush(ref, &it->base);
 		rcu_read_unlock();
 
 		i915_active_release(ref);
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [PATCH 02/17] drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
@ 2020-03-06 13:38   ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 03/17] drm/i915: Improve the start alignment of bonded pairs Chris Wilson
                     ` (18 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx; +Cc: mika.kuoppala, tvrtko.ursulin, Chris Wilson, stable

If we stop filling the ELSP due to an incompatible virtual engine
request, check if we should enable the timeslice on behalf of the queue.

This fixes the case where we are inspecting the last->next element when
we know that the last element is the last request in the execution queue,
and so decided we did not need to enable timeslicing despite the intent
to do so!

Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v5.4+
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 29 ++++++++++++++++++-----------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 13941d1c0a4a..a1d268880cfe 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1757,11 +1757,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
 	if (!intel_engine_has_timeslices(engine))
 		return false;
 
-	if (list_is_last(&rq->sched.link, &engine->active.requests))
-		return false;
-
-	hint = max(rq_prio(list_next_entry(rq, sched.link)),
-		   engine->execlists.queue_priority_hint);
+	hint = engine->execlists.queue_priority_hint;
+	if (!list_is_last(&rq->sched.link, &engine->active.requests))
+		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
 	return hint >= effective_prio(rq);
 }
@@ -1803,6 +1801,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
 	set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
 }
 
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists *execlists = &engine->execlists;
+
+	execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+	if (timer_pending(&execlists->timer))
+		return;
+
+	set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
 static void record_preemption(struct intel_engine_execlists *execlists)
 {
 	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
@@ -1966,11 +1976,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				if (!execlists->timer.expires &&
-				    need_timeslice(engine, last))
-					set_timer_ms(&execlists->timer,
-						     timeslice(engine));
-
+				start_timeslice(engine);
 				return;
 			}
 		}
@@ -2005,7 +2011,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 			if (last && !can_merge_rq(last, rq)) {
 				spin_unlock(&ve->base.active.lock);
-				return; /* leave this for another */
+				start_timeslice(engine);
+				return; /* leave this for another sibling */
 			}
 
 			ENGINE_TRACE(engine,
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 02/17] drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
@ 2020-03-06 13:38   ` Chris Wilson
  0 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx; +Cc: stable

If we stop filling the ELSP due to an incompatible virtual engine
request, check if we should enable the timeslice on behalf of the queue.

This fixes the case where we are inspecting the last->next element when
we know that the last element is the last request in the execution queue,
and so decided we did not need to enable timeslicing despite the intent
to do so!

Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v5.4+
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 29 ++++++++++++++++++-----------
 1 file changed, 18 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 13941d1c0a4a..a1d268880cfe 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1757,11 +1757,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
 	if (!intel_engine_has_timeslices(engine))
 		return false;
 
-	if (list_is_last(&rq->sched.link, &engine->active.requests))
-		return false;
-
-	hint = max(rq_prio(list_next_entry(rq, sched.link)),
-		   engine->execlists.queue_priority_hint);
+	hint = engine->execlists.queue_priority_hint;
+	if (!list_is_last(&rq->sched.link, &engine->active.requests))
+		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
 	return hint >= effective_prio(rq);
 }
@@ -1803,6 +1801,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
 	set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
 }
 
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists *execlists = &engine->execlists;
+
+	execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+	if (timer_pending(&execlists->timer))
+		return;
+
+	set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
 static void record_preemption(struct intel_engine_execlists *execlists)
 {
 	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
@@ -1966,11 +1976,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				if (!execlists->timer.expires &&
-				    need_timeslice(engine, last))
-					set_timer_ms(&execlists->timer,
-						     timeslice(engine));
-
+				start_timeslice(engine);
 				return;
 			}
 		}
@@ -2005,7 +2011,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 			if (last && !can_merge_rq(last, rq)) {
 				spin_unlock(&ve->base.active.lock);
-				return; /* leave this for another */
+				start_timeslice(engine);
+				return; /* leave this for another sibling */
 			}
 
 			ENGINE_TRACE(engine,
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 03/17] drm/i915: Improve the start alignment of bonded pairs
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
  2020-03-06 13:38   ` [Intel-gfx] " Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-10  9:59   ` Tvrtko Ursulin
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission() Chris Wilson
                   ` (17 subsequent siblings)
  19 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Always wait on the start of the signaler request to reduce the problem
of dequeueing the bonded pair too early -- we want both payloads to
start at the same time, with no latency, and yet still allow others to
make full use of the slack in the system. This reduce the amount of time
we spend waiting on the semaphore used to synchronise the start of the
bonded payload.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_request.c | 41 +++++++++++++++++++++++++----
 1 file changed, 36 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 66efd16c4850..db11006b4ac9 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1128,14 +1128,45 @@ __i915_request_await_execution(struct i915_request *to,
 					  &from->fence))
 		return 0;
 
-	/* Ensure both start together [after all semaphores in signal] */
-	if (intel_engine_has_semaphores(to->engine))
-		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
-	else
-		err = i915_request_await_start(to, from);
+	/*
+	 * Wait until the start of this request.
+	 *
+	 * The execution cb fires when we submit the request to HW. But in
+	 * many cases this may be long before the request itself is ready to
+	 * run (consider that we submit 2 requests for the same context, where
+	 * the request of interest is behind an indefinite spinner). So we hook
+	 * up to both to reduce our queues and keep the execution lag minimised
+	 * in the worst case, though we hope that the await_start is elided.
+	 */
+	err = i915_request_await_start(to, from);
 	if (err < 0)
 		return err;
 
+	/*
+	 * Ensure both start together [after all semaphores in signal]
+	 *
+	 * Now that we are queued to the HW at roughly the same time (thanks
+	 * to the execute cb) and are ready to run at roughly the same time
+	 * (thanks to the await start), our signaler may still be indefinitely
+	 * delayed by waiting on a semaphore from a remote engine. If our
+	 * signaler depends on a semaphore, so indirectly do we, and we do not
+	 * want to start our payload until our signaler also starts theirs.
+	 * So we wait.
+	 *
+	 * However, there is also a second condition for which we need to wait
+	 * for the precise start of the signaler. Consider that the signaler
+	 * was submitted in a chain of requests following another context
+	 * (with just an ordinary intra-engine fence dependency between the
+	 * two). In this case the signaler is queued to HW, but not for
+	 * immediate execution, and so we must wait until it reaches the
+	 * active slot.
+	 */
+	if (intel_engine_has_semaphores(to->engine)) {
+		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+		if (err < 0)
+			return err;
+	}
+
 	/* Couple the dependency tree for PI on this exposed to->fence */
 	if (to->engine->schedule) {
 		err = i915_sched_node_add_dependency(&to->sched, &from->sched);
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission()
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
  2020-03-06 13:38   ` [Intel-gfx] " Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 03/17] drm/i915: Improve the start alignment of bonded pairs Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-10 10:07   ` Tvrtko Ursulin
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 05/17] drm/i915: Wrap i915_active in a simple kreffed struct Chris Wilson
                   ` (16 subsequent siblings)
  19 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Skip useless priority bumping on adding a new dependency, but otherwise
prevent tasklet scheduling until we have completed all the potential
rescheduling.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_scheduler.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 52f71e83e088..603cba36d6a4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
 	if (!inflight)
 		goto unlock;
 
+	engine->execlists.queue_priority_hint = prio;
+
 	/*
 	 * If we are already the currently executing context, don't
 	 * bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
 	if (inflight->context == rq->context)
 		goto unlock;
 
-	engine->execlists.queue_priority_hint = prio;
 	if (need_preempt(prio, rq_prio(inflight)))
 		tasklet_hi_schedule(&engine->execlists.tasklet);
 
@@ -463,11 +464,15 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
 	if (!dep)
 		return -ENOMEM;
 
+	local_bh_disable();
+
 	if (!__i915_sched_node_add_dependency(node, signal, dep,
 					      I915_DEPENDENCY_EXTERNAL |
 					      I915_DEPENDENCY_ALLOC))
 		i915_dependency_free(dep);
 
+	local_bh_enable(); /* kick submission tasklet */
+
 	return 0;
 }
 
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 05/17] drm/i915: Wrap i915_active in a simple kreffed struct
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (2 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission() Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 14:44   ` Mika Kuoppala
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 06/17] drm/i915: Extend i915_request_await_active to use all timelines Chris Wilson
                   ` (15 subsequent siblings)
  19 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

For conveniences of callers that just want to use an i915_active to
track a wide array of concurrent timelines, wrap the base i915_active
struct inside a kref. This i915_active will self-destruct after use.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c | 53 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_active.h |  4 +++
 2 files changed, 57 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 7b3d6c12ad61..1826de14d2da 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -881,6 +881,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
 	active_fence_cb(fence, cb);
 }
 
+struct auto_active {
+	struct i915_active base;
+	struct kref ref;
+};
+
+struct i915_active *i915_active_get(struct i915_active *ref)
+{
+	struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+	kref_get(&aa->ref);
+	return &aa->base;
+}
+
+static void auto_release(struct kref *ref)
+{
+	struct auto_active *aa = container_of(ref, typeof(*aa), ref);
+
+	i915_active_fini(&aa->base);
+	kfree(aa);
+}
+
+void i915_active_put(struct i915_active *ref)
+{
+	struct auto_active *aa = container_of(ref, typeof(*aa), base);
+
+	kref_put(&aa->ref, auto_release);
+}
+
+static int auto_active(struct i915_active *ref)
+{
+	i915_active_get(ref);
+	return 0;
+}
+
+static void auto_retire(struct i915_active *ref)
+{
+	i915_active_put(ref);
+}
+
+struct i915_active *i915_active_create(void)
+{
+	struct auto_active *aa;
+
+	aa = kmalloc(sizeof(*aa), GFP_KERNEL);
+	if (!aa)
+		return NULL;
+
+	kref_init(&aa->ref);
+	i915_active_init(&aa->base, auto_active, auto_retire);
+
+	return &aa->base;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_active.c"
 #endif
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 973ff0447c6c..7e438501333e 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -215,4 +215,8 @@ void i915_request_add_active_barriers(struct i915_request *rq);
 void i915_active_print(struct i915_active *ref, struct drm_printer *m);
 void i915_active_unlock_wait(struct i915_active *ref);
 
+struct i915_active *i915_active_create(void);
+struct i915_active *i915_active_get(struct i915_active *ref);
+void i915_active_put(struct i915_active *ref);
+
 #endif /* _I915_ACTIVE_H_ */
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 06/17] drm/i915: Extend i915_request_await_active to use all timelines
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (3 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 05/17] drm/i915: Wrap i915_active in a simple kreffed struct Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-10 10:18   ` Tvrtko Ursulin
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts Chris Wilson
                   ` (14 subsequent siblings)
  19 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Extend i915_request_await_active() to be able to asynchronously wait on
all the tracked timelines simultaneously.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c | 51 +++++++++++++++++++++++-------
 drivers/gpu/drm/i915/i915_active.h |  5 ++-
 drivers/gpu/drm/i915/i915_vma.c    |  2 +-
 3 files changed, 45 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 1826de14d2da..e659688db043 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -518,23 +518,52 @@ int i915_active_wait(struct i915_active *ref)
 	return 0;
 }
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int await_active(struct i915_request *rq,
+			struct i915_active_fence *active)
+{
+	struct dma_fence *fence;
+
+	if (is_barrier(active))
+		return 0;
+
+	fence = i915_active_fence_get(active);
+	if (fence) {
+		int err;
+
+		err = i915_request_await_dma_fence(rq, fence);
+		dma_fence_put(fence);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+int i915_request_await_active(struct i915_request *rq,
+			      struct i915_active *ref,
+			      unsigned int flags)
 {
 	int err = 0;
 
+	/* We must always wait for the exclusive fence! */
 	if (rcu_access_pointer(ref->excl.fence)) {
-		struct dma_fence *fence;
-
-		rcu_read_lock();
-		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
-		rcu_read_unlock();
-		if (fence) {
-			err = i915_request_await_dma_fence(rq, fence);
-			dma_fence_put(fence);
-		}
+		err = await_active(rq, &ref->excl);
+		if (err)
+			return err;
 	}
 
-	/* In the future we may choose to await on all fences */
+	if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+		struct active_node *it, *n;
+
+		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+			err = await_active(rq, &it->base);
+			if (err)
+				break;
+		}
+		i915_active_release(ref);
+		if (err)
+			return err;
+	}
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 7e438501333e..e3c13060c4c7 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -183,7 +183,10 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
 
 int i915_active_wait(struct i915_active *ref);
 
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_request_await_active(struct i915_request *rq,
+			      struct i915_active *ref,
+			      unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
 
 int i915_active_acquire(struct i915_active *ref);
 bool i915_active_acquire_if_busy(struct i915_active *ref);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 3dde671145f7..5b3efb43a8ef 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1173,7 +1173,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
 
 	/* Wait for the vma to be bound before we start! */
-	err = i915_request_await_active(rq, &vma->active);
+	err = i915_request_await_active(rq, &vma->active, 0);
 	if (err)
 		return err;
 
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (4 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 06/17] drm/i915: Extend i915_request_await_active to use all timelines Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 14:20   ` Lionel Landwerlin
  2020-03-10 11:17   ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf Chris Wilson
                   ` (13 subsequent siblings)
  19 siblings, 2 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

We wish that the scheduler emit the context modification commands prior
to enabling the oa_config, for which we must explicitly inform it of the
ordering constraints. This is especially important as we now wait for
the final oa_config setup to be completed and as this wait may be on a
distinct context to the state modifications, we need that command packet
to be always last in the queue.

We borrow the i915_active for its ability to track multiple timelines
and the last dma_fence on each; a flexible dma_resv. Keeping track of
each dma_fence is important for us so that we can efficiently schedule
the requests and reprioritise as required.

Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/i915/display/intel_overlay.c  |   8 +-
 drivers/gpu/drm/i915/gt/intel_context_param.c |   2 +-
 drivers/gpu/drm/i915/i915_active.c            |   6 +-
 drivers/gpu/drm/i915/i915_active.h            |   2 +-
 drivers/gpu/drm/i915/i915_perf.c              | 154 +++++++++++-------
 drivers/gpu/drm/i915/i915_perf_types.h        |   5 +-
 drivers/gpu/drm/i915/i915_vma.h               |   2 +-
 drivers/gpu/drm/i915/selftests/i915_active.c  |   4 +-
 8 files changed, 115 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 3b0cb3534e2a..733dcfc28a05 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -272,7 +272,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	i915_request_add(rq);
 
-	return i915_active_wait(&overlay->last_flip);
+	return i915_active_wait(&overlay->last_flip, TASK_INTERRUPTIBLE);
 }
 
 static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
@@ -429,14 +429,14 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	intel_overlay_flip_prepare(overlay, NULL);
 	i915_request_add(rq);
 
-	return i915_active_wait(&overlay->last_flip);
+	return i915_active_wait(&overlay->last_flip, TASK_INTERRUPTIBLE);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
 static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
-	return i915_active_wait(&overlay->last_flip);
+	return i915_active_wait(&overlay->last_flip, TASK_INTERRUPTIBLE);
 }
 
 /* Wait for pending overlay flip and release old frame.
@@ -477,7 +477,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 
 	i915_request_add(rq);
 
-	return i915_active_wait(&overlay->last_flip);
+	return i915_active_wait(&overlay->last_flip, TASK_INTERRUPTIBLE);
 }
 
 void intel_overlay_reset(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
index 65dcd090245d..903cce8c23c4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_param.c
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -15,7 +15,7 @@ int intel_context_set_ring_size(struct intel_context *ce, long sz)
 	if (intel_context_lock_pinned(ce))
 		return -EINTR;
 
-	err = i915_active_wait(&ce->active);
+	err = i915_active_wait(&ce->active, TASK_INTERRUPTIBLE);
 	if (err < 0)
 		goto unlock;
 
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index e659688db043..12943301df3d 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -496,7 +496,7 @@ static int flush_lazy_signals(struct i915_active *ref)
 	return err;
 }
 
-int i915_active_wait(struct i915_active *ref)
+int i915_active_wait(struct i915_active *ref, int state)
 {
 	int err;
 
@@ -511,7 +511,9 @@ int i915_active_wait(struct i915_active *ref)
 	if (err)
 		return err;
 
-	if (wait_var_event_interruptible(ref, i915_active_is_idle(ref)))
+	if (!i915_active_is_idle(ref) &&
+	    ___wait_var_event(ref, i915_active_is_idle(ref),
+			      state, 0, 0, schedule()))
 		return -EINTR;
 
 	flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index e3c13060c4c7..69b5f7a76488 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -181,7 +181,7 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
 	return rcu_access_pointer(ref->excl.fence);
 }
 
-int i915_active_wait(struct i915_active *ref);
+int i915_active_wait(struct i915_active *ref, int state);
 
 int i915_request_await_active(struct i915_request *rq,
 			      struct i915_active *ref,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 1b074bb4a7fe..214e72670738 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1972,10 +1972,11 @@ get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config)
 	return i915_vma_get(oa_bo->vma);
 }
 
-static struct i915_request *
+static int
 emit_oa_config(struct i915_perf_stream *stream,
 	       struct i915_oa_config *oa_config,
-	       struct intel_context *ce)
+	       struct intel_context *ce,
+	       struct i915_active *active)
 {
 	struct i915_request *rq;
 	struct i915_vma *vma;
@@ -1983,7 +1984,7 @@ emit_oa_config(struct i915_perf_stream *stream,
 
 	vma = get_oa_vma(stream, oa_config);
 	if (IS_ERR(vma))
-		return ERR_CAST(vma);
+		return PTR_ERR(vma);
 
 	err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
@@ -1997,6 +1998,18 @@ emit_oa_config(struct i915_perf_stream *stream,
 		goto err_vma_unpin;
 	}
 
+	if (!IS_ERR_OR_NULL(active)) {
+		/* After all individual context modifications */
+		err = i915_request_await_active(rq, active,
+						I915_ACTIVE_AWAIT_ALL);
+		if (err)
+			goto err_add_request;
+
+		err = i915_active_add_request(active, rq);
+		if (err)
+			goto err_add_request;
+	}
+
 	i915_vma_lock(vma);
 	err = i915_request_await_object(rq, vma->obj, 0);
 	if (!err)
@@ -2011,14 +2024,13 @@ emit_oa_config(struct i915_perf_stream *stream,
 	if (err)
 		goto err_add_request;
 
-	i915_request_get(rq);
 err_add_request:
 	i915_request_add(rq);
 err_vma_unpin:
 	i915_vma_unpin(vma);
 err_vma_put:
 	i915_vma_put(vma);
-	return err ? ERR_PTR(err) : rq;
+	return err;
 }
 
 static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2026,8 +2038,9 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
 	return stream->pinned_ctx ?: stream->engine->kernel_context;
 }
 
-static struct i915_request *
-hsw_enable_metric_set(struct i915_perf_stream *stream)
+static int
+hsw_enable_metric_set(struct i915_perf_stream *stream,
+		      struct i915_active *active)
 {
 	struct intel_uncore *uncore = stream->uncore;
 
@@ -2046,7 +2059,9 @@ hsw_enable_metric_set(struct i915_perf_stream *stream)
 	intel_uncore_rmw(uncore, GEN6_UCGCTL1,
 			 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
-	return emit_oa_config(stream, stream->oa_config, oa_context(stream));
+	return emit_oa_config(stream,
+			      stream->oa_config, oa_context(stream),
+			      active);
 }
 
 static void hsw_disable_metric_set(struct i915_perf_stream *stream)
@@ -2196,8 +2211,10 @@ static int gen8_modify_context(struct intel_context *ce,
 	return err;
 }
 
-static int gen8_modify_self(struct intel_context *ce,
-			    const struct flex *flex, unsigned int count)
+static int
+gen8_modify_self(struct intel_context *ce,
+		 const struct flex *flex, unsigned int count,
+		 struct i915_active *active)
 {
 	struct i915_request *rq;
 	int err;
@@ -2208,8 +2225,17 @@ static int gen8_modify_self(struct intel_context *ce,
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	if (!IS_ERR_OR_NULL(active)) {
+		err = i915_active_add_request(active, rq);
+		if (err)
+			goto err_add_request;
+	}
+
 	err = gen8_load_flex(rq, ce, flex, count);
+	if (err)
+		goto err_add_request;
 
+err_add_request:
 	i915_request_add(rq);
 	return err;
 }
@@ -2243,7 +2269,8 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
 	return err;
 }
 
-static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool enable)
+static int gen12_configure_oar_context(struct i915_perf_stream *stream,
+				       struct i915_active *active)
 {
 	int err;
 	struct intel_context *ce = stream->pinned_ctx;
@@ -2252,7 +2279,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
 		{
 			GEN8_OACTXCONTROL,
 			stream->perf->ctx_oactxctrl_offset + 1,
-			enable ? GEN8_OA_COUNTER_RESUME : 0,
+			active ? GEN8_OA_COUNTER_RESUME : 0,
 		},
 	};
 	/* Offsets in regs_lri are not used since this configuration is only
@@ -2264,13 +2291,13 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
 			GEN12_OAR_OACONTROL,
 			GEN12_OAR_OACONTROL_OFFSET + 1,
 			(format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) |
-			(enable ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
+			(active ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0)
 		},
 		{
 			RING_CONTEXT_CONTROL(ce->engine->mmio_base),
 			CTX_CONTEXT_CONTROL,
 			_MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
-				      enable ?
+				      active ?
 				      GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
 				      0)
 		},
@@ -2287,7 +2314,7 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
 		return err;
 
 	/* Apply regs_lri using LRI with pinned context */
-	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri));
+	return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
 }
 
 /*
@@ -2315,9 +2342,11 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream, bool ena
  * Note: it's only the RCS/Render context that has any OA state.
  * Note: the first flex register passed must always be R_PWR_CLK_STATE
  */
-static int oa_configure_all_contexts(struct i915_perf_stream *stream,
-				     struct flex *regs,
-				     size_t num_regs)
+static int
+oa_configure_all_contexts(struct i915_perf_stream *stream,
+			  struct flex *regs,
+			  size_t num_regs,
+			  struct i915_active *active)
 {
 	struct drm_i915_private *i915 = stream->perf->i915;
 	struct intel_engine_cs *engine;
@@ -2374,7 +2403,7 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
 
 		regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
 
-		err = gen8_modify_self(ce, regs, num_regs);
+		err = gen8_modify_self(ce, regs, num_regs, active);
 		if (err)
 			return err;
 	}
@@ -2382,8 +2411,10 @@ static int oa_configure_all_contexts(struct i915_perf_stream *stream,
 	return 0;
 }
 
-static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
-					const struct i915_oa_config *oa_config)
+static int
+gen12_configure_all_contexts(struct i915_perf_stream *stream,
+			     const struct i915_oa_config *oa_config,
+			     struct i915_active *active)
 {
 	struct flex regs[] = {
 		{
@@ -2392,11 +2423,15 @@ static int gen12_configure_all_contexts(struct i915_perf_stream *stream,
 		},
 	};
 
-	return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+	return oa_configure_all_contexts(stream,
+					 regs, ARRAY_SIZE(regs),
+					 active);
 }
 
-static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
-				      const struct i915_oa_config *oa_config)
+static int
+lrc_configure_all_contexts(struct i915_perf_stream *stream,
+			   const struct i915_oa_config *oa_config,
+			   struct i915_active *active)
 {
 	/* The MMIO offsets for Flex EU registers aren't contiguous */
 	const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset;
@@ -2429,11 +2464,14 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
 	for (i = 2; i < ARRAY_SIZE(regs); i++)
 		regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
 
-	return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
+	return oa_configure_all_contexts(stream,
+					 regs, ARRAY_SIZE(regs),
+					 active);
 }
 
-static struct i915_request *
-gen8_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen8_enable_metric_set(struct i915_perf_stream *stream,
+		       struct i915_active *active)
 {
 	struct intel_uncore *uncore = stream->uncore;
 	struct i915_oa_config *oa_config = stream->oa_config;
@@ -2473,11 +2511,13 @@ gen8_enable_metric_set(struct i915_perf_stream *stream)
 	 * to make sure all slices/subslices are ON before writing to NOA
 	 * registers.
 	 */
-	ret = lrc_configure_all_contexts(stream, oa_config);
+	ret = lrc_configure_all_contexts(stream, oa_config, active);
 	if (ret)
-		return ERR_PTR(ret);
+		return ret;
 
-	return emit_oa_config(stream, oa_config, oa_context(stream));
+	return emit_oa_config(stream,
+			      stream->oa_config, oa_context(stream),
+			      active);
 }
 
 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
@@ -2487,8 +2527,9 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
 			     0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
 }
 
-static struct i915_request *
-gen12_enable_metric_set(struct i915_perf_stream *stream)
+static int
+gen12_enable_metric_set(struct i915_perf_stream *stream,
+			struct i915_active *active)
 {
 	struct intel_uncore *uncore = stream->uncore;
 	struct i915_oa_config *oa_config = stream->oa_config;
@@ -2517,9 +2558,9 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
 	 * to make sure all slices/subslices are ON before writing to NOA
 	 * registers.
 	 */
-	ret = gen12_configure_all_contexts(stream, oa_config);
+	ret = gen12_configure_all_contexts(stream, oa_config, active);
 	if (ret)
-		return ERR_PTR(ret);
+		return ret;
 
 	/*
 	 * For Gen12, performance counters are context
@@ -2527,12 +2568,14 @@ gen12_enable_metric_set(struct i915_perf_stream *stream)
 	 * requested this.
 	 */
 	if (stream->ctx) {
-		ret = gen12_configure_oar_context(stream, true);
+		ret = gen12_configure_oar_context(stream, active);
 		if (ret)
-			return ERR_PTR(ret);
+			return ret;
 	}
 
-	return emit_oa_config(stream, oa_config, oa_context(stream));
+	return emit_oa_config(stream,
+			      stream->oa_config, oa_context(stream),
+			      active);
 }
 
 static void gen8_disable_metric_set(struct i915_perf_stream *stream)
@@ -2540,7 +2583,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream)
 	struct intel_uncore *uncore = stream->uncore;
 
 	/* Reset all contexts' slices/subslices configurations. */
-	lrc_configure_all_contexts(stream, NULL);
+	lrc_configure_all_contexts(stream, NULL, NULL);
 
 	intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
 }
@@ -2550,7 +2593,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream)
 	struct intel_uncore *uncore = stream->uncore;
 
 	/* Reset all contexts' slices/subslices configurations. */
-	lrc_configure_all_contexts(stream, NULL);
+	lrc_configure_all_contexts(stream, NULL, NULL);
 
 	/* Make sure we disable noa to save power. */
 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2561,11 +2604,11 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
 	struct intel_uncore *uncore = stream->uncore;
 
 	/* Reset all contexts' slices/subslices configurations. */
-	gen12_configure_all_contexts(stream, NULL);
+	gen12_configure_all_contexts(stream, NULL, NULL);
 
 	/* disable the context save/restore or OAR counters */
 	if (stream->ctx)
-		gen12_configure_oar_context(stream, false);
+		gen12_configure_oar_context(stream, NULL);
 
 	/* Make sure we disable noa to save power. */
 	intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0);
@@ -2729,16 +2772,19 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
 
 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
 {
-	struct i915_request *rq;
+	struct i915_active *active;
+	int err;
 
-	rq = stream->perf->ops.enable_metric_set(stream);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+	active = i915_active_create();
+	if (!active)
+		return -ENOMEM;
 
-	i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
-	i915_request_put(rq);
+	err = stream->perf->ops.enable_metric_set(stream, active);
+	if (err == 0)
+		i915_active_wait(active, TASK_UNINTERRUPTIBLE);
 
-	return 0;
+	i915_active_put(active);
+	return err;
 }
 
 /**
@@ -3192,7 +3238,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
 		return -EINVAL;
 
 	if (config != stream->oa_config) {
-		struct i915_request *rq;
+		int err;
 
 		/*
 		 * If OA is bound to a specific context, emit the
@@ -3203,13 +3249,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
 		 * When set globally, we use a low priority kernel context,
 		 * so it will effectively take effect when idle.
 		 */
-		rq = emit_oa_config(stream, config, oa_context(stream));
-		if (!IS_ERR(rq)) {
+		err = emit_oa_config(stream, config, oa_context(stream), NULL);
+		if (!err)
 			config = xchg(&stream->oa_config, config);
-			i915_request_put(rq);
-		} else {
-			ret = PTR_ERR(rq);
-		}
+		else
+			ret = err;
 	}
 
 	i915_oa_config_put(config);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index a0e22f00f6cf..5eaf874a0d25 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -21,6 +21,7 @@
 
 struct drm_i915_private;
 struct file;
+struct i915_active;
 struct i915_gem_context;
 struct i915_perf;
 struct i915_vma;
@@ -339,8 +340,8 @@ struct i915_oa_ops {
 	 * counter reports being sampled. May apply system constraints such as
 	 * disabling EU clock gating as required.
 	 */
-	struct i915_request *
-		(*enable_metric_set)(struct i915_perf_stream *stream);
+	int (*enable_metric_set)(struct i915_perf_stream *stream,
+				 struct i915_active *active);
 
 	/**
 	 * @disable_metric_set: Remove system constraints associated with using
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e1ced1df13e1..3baa98fa5009 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -380,7 +380,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma);
 static inline int i915_vma_sync(struct i915_vma *vma)
 {
 	/* Wait for the asynchronous bindings and pending GPU reads */
-	return i915_active_wait(&vma->active);
+	return i915_active_wait(&vma->active, TASK_INTERRUPTIBLE);
 }
 
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 68bbb1580162..7357d2130024 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -153,7 +153,7 @@ static int live_active_wait(void *arg)
 	if (IS_ERR(active))
 		return PTR_ERR(active);
 
-	i915_active_wait(&active->base);
+	i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
 	if (!READ_ONCE(active->retired)) {
 		struct drm_printer p = drm_err_printer(__func__);
 
@@ -230,7 +230,7 @@ static int live_active_barrier(void *arg)
 	i915_active_release(&active->base);
 
 	if (err == 0)
-		err = i915_active_wait(&active->base);
+		err = i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
 
 	if (err == 0 && !READ_ONCE(active->retired)) {
 		pr_err("i915_active not retired after flushing barriers!\n");
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (5 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-10 10:38   ` Tvrtko Ursulin
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 09/17] dma-buf: Prettify typecasts for dma-fence-chain Chris Wilson
                   ` (12 subsequent siblings)
  19 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Under ideal circumstances, the driver should be able to keep the GPU
fully saturated with work. Measure how close to ideal we get under the
harshest of conditions with no user payload.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../drm/i915/selftests/i915_perf_selftests.h  |   1 +
 drivers/gpu/drm/i915/selftests/i915_request.c | 285 +++++++++++++++++-
 2 files changed, 285 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 3bf7f53e9924..d8da142985eb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -16,5 +16,6 @@
  * Tests are executed in order by igt/i915_selftest
  */
 selftest(engine_cs, intel_engine_cs_perf_selftests)
+selftest(request, i915_request_perf_selftests)
 selftest(blt, i915_gem_object_blt_perf_selftests)
 selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index f89d9c42f1fa..d4c088cfe4e1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -23,6 +23,7 @@
  */
 
 #include <linux/prime_numbers.h>
+#include <linux/pm_qos.h>
 
 #include "gem/i915_gem_pm.h"
 #include "gem/selftests/mock_context.h"
@@ -1233,7 +1234,7 @@ static int live_parallel_engines(void *arg)
 		struct igt_live_test t;
 		unsigned int idx;
 
-		snprintf(name, sizeof(name), "%pS", fn);
+		snprintf(name, sizeof(name), "%ps", *fn);
 		err = igt_live_test_begin(&t, i915, __func__, name);
 		if (err)
 			break;
@@ -1470,3 +1471,285 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
 
 	return i915_subtests(tests, i915);
 }
+
+struct perf_parallel {
+	struct intel_engine_cs *engine;
+	unsigned long count;
+	ktime_t time;
+	ktime_t busy;
+	u64 runtime;
+};
+
+static int switch_to_kernel_sync(struct intel_context *ce, int err)
+{
+	struct i915_request *rq;
+	struct dma_fence *fence;
+
+	rq = intel_engine_create_kernel_request(ce->engine);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	fence = i915_active_fence_get(&ce->timeline->last_request);
+	if (fence) {
+		i915_request_await_dma_fence(rq, fence);
+		dma_fence_put(fence);
+	}
+
+	rq = i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
+		err = -ETIME;
+	i915_request_put(rq);
+
+	while (!err && !intel_engine_is_idle(ce->engine))
+		intel_engine_flush_submission(ce->engine);
+
+	return err;
+}
+
+static int perf_sync(void *arg)
+{
+	struct perf_parallel *p = arg;
+	struct intel_engine_cs *engine = p->engine;
+	struct intel_context *ce;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+	bool busy;
+	int err = 0;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	err = intel_context_pin(ce);
+	if (err) {
+		intel_context_put(ce);
+		return err;
+	}
+
+	busy = false;
+	if (intel_engine_supports_stats(engine) &&
+	    !intel_enable_engine_stats(engine)) {
+		p->busy = intel_engine_get_busy_time(engine);
+		busy = true;
+	}
+
+	p->time = ktime_get();
+	count = 0;
+	do {
+		struct i915_request *rq;
+
+		rq = i915_request_create(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+
+		err = 0;
+		if (i915_request_wait(rq, 0, HZ / 5) < 0)
+			err = -ETIME;
+		i915_request_put(rq);
+		if (err)
+			break;
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+	p->time = ktime_sub(ktime_get(), p->time);
+
+	if (busy) {
+		p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+				    p->busy);
+		intel_disable_engine_stats(engine);
+	}
+
+	err = switch_to_kernel_sync(ce, err);
+	p->runtime = intel_context_get_total_runtime_ns(ce);
+	p->count = count;
+
+	intel_context_unpin(ce);
+	intel_context_put(ce);
+	return err;
+}
+
+static int perf_many(void *arg)
+{
+	struct perf_parallel *p = arg;
+	struct intel_engine_cs *engine = p->engine;
+	struct intel_context *ce;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+	int err = 0;
+	bool busy;
+
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	err = intel_context_pin(ce);
+	if (err) {
+		intel_context_put(ce);
+		return err;
+	}
+
+	busy = false;
+	if (intel_engine_supports_stats(engine) &&
+	    !intel_enable_engine_stats(engine)) {
+		p->busy = intel_engine_get_busy_time(engine);
+		busy = true;
+	}
+
+	count = 0;
+	p->time = ktime_get();
+	do {
+		struct i915_request *rq;
+
+		rq = i915_request_create(ce);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			break;
+		}
+
+		i915_request_add(rq);
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+	p->time = ktime_sub(ktime_get(), p->time);
+
+	if (busy) {
+		p->busy = ktime_sub(intel_engine_get_busy_time(engine),
+				    p->busy);
+		intel_disable_engine_stats(engine);
+	}
+
+	err = switch_to_kernel_sync(ce, err);
+	p->runtime = intel_context_get_total_runtime_ns(ce);
+	p->count = count;
+
+	intel_context_unpin(ce);
+	intel_context_put(ce);
+	return err;
+}
+
+static int perf_parallel_engines(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {
+		perf_sync,
+		perf_many,
+		NULL,
+	};
+	const unsigned int nengines = num_uabi_engines(i915);
+	struct intel_engine_cs *engine;
+	int (* const *fn)(void *arg);
+	struct pm_qos_request *qos;
+	struct {
+		struct perf_parallel p;
+		struct task_struct *tsk;
+	} *engines;
+	int err = 0;
+
+	engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
+	if (!engines)
+		return -ENOMEM;
+
+	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
+	if (qos)
+		pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, 0);
+
+	for (fn = func; *fn; fn++) {
+		char name[KSYM_NAME_LEN];
+		struct igt_live_test t;
+		unsigned int idx;
+
+		snprintf(name, sizeof(name), "%ps", *fn);
+		err = igt_live_test_begin(&t, i915, __func__, name);
+		if (err)
+			break;
+
+		atomic_set(&i915->selftest.counter, nengines);
+
+		idx = 0;
+		for_each_uabi_engine(engine, i915) {
+			intel_engine_pm_get(engine);
+
+			memset(&engines[idx].p, 0, sizeof(engines[idx].p));
+			engines[idx].p.engine = engine;
+
+			engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
+						       "igt:%s", engine->name);
+			if (IS_ERR(engines[idx].tsk)) {
+				err = PTR_ERR(engines[idx].tsk);
+				intel_engine_pm_put(engine);
+				break;
+			}
+			get_task_struct(engines[idx++].tsk);
+		}
+
+		yield(); /* start all threads before we kthread_stop() */
+
+		idx = 0;
+		for_each_uabi_engine(engine, i915) {
+			int status;
+
+			if (IS_ERR(engines[idx].tsk))
+				break;
+
+			status = kthread_stop(engines[idx].tsk);
+			if (status && !err)
+				err = status;
+
+			intel_engine_pm_put(engine);
+			put_task_struct(engines[idx++].tsk);
+		}
+
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		if (err)
+			break;
+
+		idx = 0;
+		for_each_uabi_engine(engine, i915) {
+			struct perf_parallel *p = &engines[idx].p;
+			u64 busy = 100 * ktime_to_ns(p->busy);
+			u64 dt = ktime_to_ns(p->time);
+			int integer, decimal;
+
+			if (dt) {
+				integer = div64_u64(busy, dt);
+				busy -= integer * dt;
+				decimal = div64_u64(100 * busy, dt);
+			} else {
+				integer = 0;
+				decimal = 0;
+			}
+
+			GEM_BUG_ON(engine != p->engine);
+			pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
+				name, engine->name, p->count, integer, decimal,
+				div_u64(p->runtime, 1000 * 1000),
+				div_u64(ktime_to_ns(p->time), 1000 * 1000));
+			idx++;
+		}
+	}
+
+	if (qos) {
+		pm_qos_remove_request(qos);
+		kfree(qos);
+	}
+	kfree(engines);
+	return err;
+}
+
+int i915_request_perf_selftests(struct drm_i915_private *i915)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(perf_parallel_engines),
+	};
+
+	if (intel_gt_is_wedged(&i915->gt))
+		return 0;
+
+	return i915_subtests(tests, i915);
+}
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 09/17] dma-buf: Prettify typecasts for dma-fence-chain
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (6 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 10/17] dma-buf: Report signaled links inside dma-fence-chain Chris Wilson
                   ` (11 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Inside dma-fence-chain, we use a cmpxchg on an RCU-protected pointer. To
avoid the sparse warning for using the RCU pointer directly, we have to
cast away the __rcu annotation. However, we don't need to use void*
everywhere and can stick to the dma_fence*.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/dma-buf/dma-fence-chain.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 44a741677d25..3d123502ff12 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -62,7 +62,8 @@ struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
 			replacement = NULL;
 		}
 
-		tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
+		tmp = cmpxchg((struct dma_fence __force **)&chain->prev,
+			      prev, replacement);
 		if (tmp == prev)
 			dma_fence_put(tmp);
 		else
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 10/17] dma-buf: Report signaled links inside dma-fence-chain
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (7 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 09/17] dma-buf: Prettify typecasts for dma-fence-chain Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 11/17] dma-buf: Exercise dma-fence-chain under selftests Chris Wilson
                   ` (10 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Whenever we walk along the dma-fence-chain, we prune signaled links to
keep the chain nice and tidy. This leads to situations where we can
prune a link and report the earlier fence as the target seqno --
violating our own consistency checks that the seqno is not more advanced
than the last element in a dma-fence-chain.

Report a NULL fence and success if the seqno has already been signaled.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/dma-buf/dma-fence-chain.c | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 3d123502ff12..c435bbba851c 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -99,6 +99,12 @@ int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
 		return -EINVAL;
 
 	dma_fence_chain_for_each(*pfence, &chain->base) {
+		if ((*pfence)->seqno < seqno) { /* already signaled */
+			dma_fence_put(*pfence);
+			*pfence = NULL;
+			break;
+		}
+
 		if ((*pfence)->context != chain->base.context ||
 		    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
 			break;
@@ -222,6 +228,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
  * @chain: the chain node to initialize
  * @prev: the previous fence
  * @fence: the current fence
+ * @seqno: the sequence number (syncpt) of the fence within the chain
  *
  * Initialize a new chain node and either start a new chain or add the node to
  * the existing chain of the previous fence.
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 11/17] dma-buf: Exercise dma-fence-chain under selftests
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (8 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 10/17] dma-buf: Report signaled links inside dma-fence-chain Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 12/17] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
                   ` (9 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

A few very simple testcases to exercise the dma-fence-chain API.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/dma-buf/Makefile             |   3 +-
 drivers/dma-buf/selftests.h          |   1 +
 drivers/dma-buf/st-dma-fence-chain.c | 713 +++++++++++++++++++++++++++
 3 files changed, 716 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/st-dma-fence-chain.c

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 9c190026bfab..995e05f609ff 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_UDMABUF)		+= udmabuf.o
 
 dmabuf_selftests-y := \
 	selftest.o \
-	st-dma-fence.o
+	st-dma-fence.o \
+	st-dma-fence-chain.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 5320386f02e5..55918ef9adab 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -11,3 +11,4 @@
  */
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
+selftest(dma_fence_chain, dma_fence_chain)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
new file mode 100644
index 000000000000..bd08ba67b03b
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static inline struct mock_fence {
+	struct dma_fence base;
+	spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+	return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+	return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+	kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+	.get_driver_name = mock_name,
+	.get_timeline_name = mock_name,
+	.release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+	struct mock_fence *f;
+
+	f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	spin_lock_init(&f->lock);
+	dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+	return &f->base;
+}
+
+static inline struct mock_chain {
+	struct dma_fence_chain base;
+} *to_mock_chain(struct dma_fence *f) {
+	return container_of(f, struct mock_chain, base.base);
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+				    struct dma_fence *fence,
+				    u64 seqno)
+{
+	struct mock_chain *f;
+
+	f = kmalloc(sizeof(*f), GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	dma_fence_chain_init(&f->base,
+			     dma_fence_get(prev),
+			     dma_fence_get(fence),
+			     seqno);
+
+	return &f->base.base;
+}
+
+static int sanitycheck(void *arg)
+{
+	struct dma_fence *f, *chain;
+	int err = 0;
+
+	f = mock_fence();
+	if (!f)
+		return -ENOMEM;
+
+	chain = mock_chain(NULL, f, 1);
+	if (!chain)
+		err = -ENOMEM;
+
+	dma_fence_signal(f);
+	dma_fence_put(f);
+
+	dma_fence_put(chain);
+
+	return err;
+}
+
+struct fence_chains {
+	unsigned int chain_length;
+	struct dma_fence **fences;
+	struct dma_fence **chains;
+
+	struct dma_fence *tail;
+};
+
+static uint64_t seqno_inc(unsigned int i)
+{
+	return i + 1;
+}
+
+static int fence_chains_init(struct fence_chains *fc, unsigned int count,
+			     uint64_t (*seqno_fn)(unsigned int))
+{
+	unsigned int i;
+	int err = 0;
+
+	fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
+				    GFP_KERNEL | __GFP_ZERO);
+	if (!fc->chains)
+		return -ENOMEM;
+
+	fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
+				    GFP_KERNEL | __GFP_ZERO);
+	if (!fc->fences) {
+		err = -ENOMEM;
+		goto err_chains;
+	}
+
+	fc->tail = NULL;
+	for (i = 0; i < count; i++) {
+		fc->fences[i] = mock_fence();
+		if (!fc->fences[i]) {
+			err = -ENOMEM;
+			goto unwind;
+		}
+
+		fc->chains[i] = mock_chain(fc->tail,
+					   fc->fences[i],
+					   seqno_fn(i));
+		if (!fc->chains[i]) {
+			err = -ENOMEM;
+			goto unwind;
+		}
+
+		fc->tail = fc->chains[i];
+	}
+
+	fc->chain_length = i;
+	return 0;
+
+unwind:
+	for (i = 0; i < count; i++) {
+		dma_fence_put(fc->fences[i]);
+		dma_fence_put(fc->chains[i]);
+	}
+	kvfree(fc->fences);
+err_chains:
+	kvfree(fc->chains);
+	return err;
+}
+
+static void fence_chains_fini(struct fence_chains *fc)
+{
+	unsigned int i;
+
+	for (i = 0; i < fc->chain_length; i++) {
+		dma_fence_signal(fc->fences[i]);
+		dma_fence_put(fc->fences[i]);
+	}
+	kvfree(fc->fences);
+
+	for (i = 0; i < fc->chain_length; i++)
+		dma_fence_put(fc->chains[i]);
+	kvfree(fc->chains);
+}
+
+static int find_seqno(void *arg)
+{
+	struct fence_chains fc;
+	struct dma_fence *fence;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64, seqno_inc);
+	if (err)
+		return err;
+
+	fence = dma_fence_get(fc.tail);
+	err = dma_fence_chain_find_seqno(&fence, 0);
+	dma_fence_put(fence);
+	if (err) {
+		pr_err("Reported %d for find_seqno(0)!\n", err);
+		goto err;
+	}
+
+	for (i = 0; i < fc.chain_length; i++) {
+		fence = dma_fence_get(fc.tail);
+		err = dma_fence_chain_find_seqno(&fence, i + 1);
+		dma_fence_put(fence);
+		if (err) {
+			pr_err("Reported %d for find_seqno(%d:%d)!\n",
+			       err, fc.chain_length + 1, i + 1);
+			goto err;
+		}
+		if (fence != fc.chains[i]) {
+			pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+			       fc.chain_length + 1, i + 1);
+			err = -EINVAL;
+			goto err;
+		}
+
+		dma_fence_get(fence);
+		err = dma_fence_chain_find_seqno(&fence, i + 1);
+		dma_fence_put(fence);
+		if (err) {
+			pr_err("Error reported for finding self\n");
+			goto err;
+		}
+		if (fence != fc.chains[i]) {
+			pr_err("Incorrect fence reported by find self\n");
+			err = -EINVAL;
+			goto err;
+		}
+
+		dma_fence_get(fence);
+		err = dma_fence_chain_find_seqno(&fence, i + 2);
+		dma_fence_put(fence);
+		if (!err) {
+			pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
+			       i + 1, i + 2);
+			err = -EINVAL;
+			goto err;
+		}
+
+		dma_fence_get(fence);
+		err = dma_fence_chain_find_seqno(&fence, i);
+		dma_fence_put(fence);
+		if (err) {
+			pr_err("Error reported for previous fence!\n");
+			goto err;
+		}
+		if (i > 0 && fence != fc.chains[i - 1]) {
+			pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+			       i + 1, i);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static int find_signaled(void *arg)
+{
+	struct fence_chains fc;
+	struct dma_fence *fence;
+	int err;
+
+	err = fence_chains_init(&fc, 2, seqno_inc);
+	if (err)
+		return err;
+
+	dma_fence_signal(fc.fences[0]);
+
+	fence = dma_fence_get(fc.tail);
+	err = dma_fence_chain_find_seqno(&fence, 1);
+	dma_fence_put(fence);
+	if (err) {
+		pr_err("Reported %d for find_seqno()!\n", err);
+		goto err;
+	}
+
+	if (fence && fence != fc.chains[0]) {
+		pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
+		       fence->seqno);
+
+		dma_fence_get(fence);
+		err = dma_fence_chain_find_seqno(&fence, 1);
+		dma_fence_put(fence);
+		if (err)
+			pr_err("Reported %d for finding self!\n", err);
+
+		err = -EINVAL;
+	}
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static int find_out_of_order(void *arg)
+{
+	struct fence_chains fc;
+	struct dma_fence *fence;
+	int err;
+
+	err = fence_chains_init(&fc, 3, seqno_inc);
+	if (err)
+		return err;
+
+	dma_fence_signal(fc.fences[1]);
+
+	fence = dma_fence_get(fc.tail);
+	err = dma_fence_chain_find_seqno(&fence, 2);
+	dma_fence_put(fence);
+	if (err) {
+		pr_err("Reported %d for find_seqno()!\n", err);
+		goto err;
+	}
+
+	if (fence && fence != fc.chains[1]) {
+		pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
+		       fence->seqno);
+
+		dma_fence_get(fence);
+		err = dma_fence_chain_find_seqno(&fence, 2);
+		dma_fence_put(fence);
+		if (err)
+			pr_err("Reported %d for finding self!\n", err);
+
+		err = -EINVAL;
+	}
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static uint64_t seqno_inc2(unsigned int i)
+{
+	return 2 * i + 2;
+}
+
+static int find_gap(void *arg)
+{
+	struct fence_chains fc;
+	struct dma_fence *fence;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64, seqno_inc2);
+	if (err)
+		return err;
+
+	for (i = 0; i < fc.chain_length; i++) {
+		fence = dma_fence_get(fc.tail);
+		err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
+		dma_fence_put(fence);
+		if (err) {
+			pr_err("Reported %d for find_seqno(%d:%d)!\n",
+			       err, fc.chain_length + 1, 2 * i + 1);
+			goto err;
+		}
+		if (fence != fc.chains[i]) {
+			pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
+			       fence->seqno,
+			       fc.chain_length + 1,
+			       2 * i + 1);
+			err = -EINVAL;
+			goto err;
+		}
+
+		dma_fence_get(fence);
+		err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
+		dma_fence_put(fence);
+		if (err) {
+			pr_err("Error reported for finding self\n");
+			goto err;
+		}
+		if (fence != fc.chains[i]) {
+			pr_err("Incorrect fence reported by find self\n");
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+struct find_race {
+	struct fence_chains fc;
+	atomic_t children;
+};
+
+static int __find_race(void *arg)
+{
+	struct find_race *data = arg;
+	int err = 0;
+
+	while (!kthread_should_stop()) {
+		struct dma_fence *fence = dma_fence_get(data->fc.tail);
+		int seqno;
+
+		seqno = prandom_u32_max(data->fc.chain_length) + 1;
+
+		err = dma_fence_chain_find_seqno(&fence, seqno);
+		if (err) {
+			pr_err("Failed to find fence seqno:%d\n",
+			       seqno);
+			dma_fence_put(fence);
+			break;
+		}
+		if (!fence)
+			goto signal;
+
+		err = dma_fence_chain_find_seqno(&fence, seqno);
+		if (err) {
+			pr_err("Reported an invalid fence for find-self:%d\n",
+			       seqno);
+			dma_fence_put(fence);
+			break;
+		}
+
+		if (fence->seqno < seqno) {
+			pr_err("Reported an earlier fence.seqno:%lld for seqno:%d\n",
+			       fence->seqno, seqno);
+			err = -EINVAL;
+			dma_fence_put(fence);
+			break;
+		}
+
+		dma_fence_put(fence);
+
+signal:
+		seqno = prandom_u32_max(data->fc.chain_length - 1);
+		dma_fence_signal(data->fc.fences[seqno]);
+		cond_resched();
+	}
+
+	if (atomic_dec_and_test(&data->children))
+		wake_up_var(&data->children);
+	return err;
+}
+
+static int find_race(void *arg)
+{
+	struct find_race data;
+	int ncpus = num_online_cpus();
+	struct task_struct **threads;
+	unsigned long count;
+	int err;
+	int i;
+
+	err = fence_chains_init(&data.fc, 64 << 10, seqno_inc);
+	if (err)
+		return err;
+
+	threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+	if (!threads) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	atomic_set(&data.children, 0);
+	for (i = 0; i < ncpus; i++) {
+		threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
+		if (IS_ERR(threads[i])) {
+			ncpus = i;
+			break;
+		}
+		atomic_inc(&data.children);
+		get_task_struct(threads[i]);
+	}
+
+	wait_var_event_timeout(&data.children,
+			       !atomic_read(&data.children),
+			       5 * HZ);
+
+	for (i = 0; i < ncpus; i++) {
+		int ret;
+
+		ret = kthread_stop(threads[i]);
+		if (ret && !err)
+			err = ret;
+		put_task_struct(threads[i]);
+	}
+	kfree(threads);
+
+	count = 0;
+	for (i = 0; i < data.fc.chain_length; i++)
+		if (dma_fence_is_signaled(data.fc.fences[i]))
+			count++;
+	pr_info("Completed %lu cycles\n", count);
+
+err:
+	fence_chains_fini(&data.fc);
+	return err;
+}
+
+static int signal_forward(void *arg)
+{
+	struct fence_chains fc;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64, seqno_inc);
+	if (err)
+		return err;
+
+	for (i = 0; i < fc.chain_length; i++) {
+		dma_fence_signal(fc.fences[i]);
+
+		if (!dma_fence_is_signaled(fc.chains[i])) {
+			pr_err("chain[%d] not signaled!\n", i);
+			err = -EINVAL;
+			goto err;
+		}
+
+		if (i + 1 < fc.chain_length &&
+		    dma_fence_is_signaled(fc.chains[i + 1])) {
+			pr_err("chain[%d] is signaled!\n", i);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static int signal_backward(void *arg)
+{
+	struct fence_chains fc;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64, seqno_inc);
+	if (err)
+		return err;
+
+	for (i = fc.chain_length; i--; ) {
+		dma_fence_signal(fc.fences[i]);
+
+		if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+			pr_err("chain[%d] is signaled!\n", i);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+	for (i = 0; i < fc.chain_length; i++) {
+		if (!dma_fence_is_signaled(fc.chains[i])) {
+			pr_err("chain[%d] was not signaled!\n", i);
+			err = -EINVAL;
+			goto err;
+		}
+	}
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static int __wait_fence_chains(void *arg)
+{
+	struct fence_chains *fc = arg;
+
+	if (dma_fence_wait(fc->tail, false))
+		return -EIO;
+
+	return 0;
+}
+
+static int wait_forward(void *arg)
+{
+	struct fence_chains fc;
+	struct task_struct *tsk;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64 << 10, seqno_inc);
+	if (err)
+		return err;
+
+	tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+	if (IS_ERR(tsk)) {
+		err = PTR_ERR(tsk);
+		goto err;
+	}
+	get_task_struct(tsk);
+	yield_to(tsk, true);
+
+	for (i = 0; i < fc.chain_length; i++)
+		dma_fence_signal(fc.fences[i]);
+
+	err = kthread_stop(tsk);
+	put_task_struct(tsk);
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static int wait_backward(void *arg)
+{
+	struct fence_chains fc;
+	struct task_struct *tsk;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64 << 10, seqno_inc);
+	if (err)
+		return err;
+
+	tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+	if (IS_ERR(tsk)) {
+		err = PTR_ERR(tsk);
+		goto err;
+	}
+	get_task_struct(tsk);
+	yield_to(tsk, true);
+
+	for (i = fc.chain_length; i--; )
+		dma_fence_signal(fc.fences[i]);
+
+	err = kthread_stop(tsk);
+	put_task_struct(tsk);
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+static void randomise_fences(struct fence_chains *fc)
+{
+	unsigned int count = fc->chain_length;
+
+	/* Fisher-Yates shuffle courtesy of Knuth */
+	while (--count) {
+		unsigned int swp;
+
+		swp = prandom_u32_max(count + 1);
+		if (swp == count)
+			continue;
+
+		swap(fc->fences[count], fc->fences[swp]);
+	}
+}
+
+static int wait_random(void *arg)
+{
+	struct fence_chains fc;
+	struct task_struct *tsk;
+	int err;
+	int i;
+
+	err = fence_chains_init(&fc, 64 << 10, seqno_inc);
+	if (err)
+		return err;
+
+	randomise_fences(&fc);
+
+	tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+	if (IS_ERR(tsk)) {
+		err = PTR_ERR(tsk);
+		goto err;
+	}
+	get_task_struct(tsk);
+	yield_to(tsk, true);
+
+	for (i = 0; i < fc.chain_length; i++)
+		dma_fence_signal(fc.fences[i]);
+
+	err = kthread_stop(tsk);
+	put_task_struct(tsk);
+
+err:
+	fence_chains_fini(&fc);
+	return err;
+}
+
+int dma_fence_chain(void)
+{
+	static const struct subtest tests[] = {
+		SUBTEST(sanitycheck),
+		SUBTEST(find_seqno),
+		SUBTEST(find_signaled),
+		SUBTEST(find_out_of_order),
+		SUBTEST(find_gap),
+		SUBTEST(find_race),
+		SUBTEST(signal_forward),
+		SUBTEST(signal_backward),
+		SUBTEST(wait_forward),
+		SUBTEST(wait_backward),
+		SUBTEST(wait_random),
+	};
+	int ret;
+
+	pr_info("sizeof(dma_fence_chain)=%zu\n",
+		sizeof(struct dma_fence_chain));
+
+	slab_fences = KMEM_CACHE(mock_fence,
+				 SLAB_TYPESAFE_BY_RCU |
+				 SLAB_HWCACHE_ALIGN);
+	if (!slab_fences)
+		return -ENOMEM;
+
+	ret = subtests(tests, NULL);
+
+	kmem_cache_destroy(slab_fences);
+	return ret;
+}
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 12/17] dma-buf: Proxy fence, an unsignaled fence placeholder
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (9 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 11/17] dma-buf: Exercise dma-fence-chain under selftests Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 13/17] drm/syncobj: Allow use of dma-fence-proxy Chris Wilson
                   ` (8 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Often we need to create a fence for a future event that has not yet been
associated with a fence. We can store a proxy fence, a placeholder, in
the timeline and replace it later when the real fence is known. Any
listeners that attach to the proxy fence will automatically be signaled
when the real fence completes, and any future listeners will instead be
attach directly to the real fence avoiding any indirection overhead.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/dma-buf/Makefile             |  13 +-
 drivers/dma-buf/dma-fence-private.h  |  20 +
 drivers/dma-buf/dma-fence-proxy.c    | 189 +++++++++
 drivers/dma-buf/dma-fence.c          |   4 +-
 drivers/dma-buf/selftests.h          |   1 +
 drivers/dma-buf/st-dma-fence-proxy.c | 581 +++++++++++++++++++++++++++
 include/linux/dma-fence-proxy.h      |  20 +
 7 files changed, 824 insertions(+), 4 deletions(-)
 create mode 100644 drivers/dma-buf/dma-fence-private.h
 create mode 100644 drivers/dma-buf/dma-fence-proxy.c
 create mode 100644 drivers/dma-buf/st-dma-fence-proxy.c
 create mode 100644 include/linux/dma-fence-proxy.h

diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 995e05f609ff..afaf6dadd9a3 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
-	 dma-resv.o seqno-fence.o
+obj-y := \
+	dma-buf.o \
+	dma-fence.o \
+	dma-fence-array.o \
+	dma-fence-chain.o \
+	dma-fence-proxy.o \
+	dma-resv.o \
+	seqno-fence.o
 obj-$(CONFIG_DMABUF_HEAPS)	+= dma-heap.o
 obj-$(CONFIG_DMABUF_HEAPS)	+= heaps/
 obj-$(CONFIG_SYNC_FILE)		+= sync_file.o
@@ -10,6 +16,7 @@ obj-$(CONFIG_UDMABUF)		+= udmabuf.o
 dmabuf_selftests-y := \
 	selftest.o \
 	st-dma-fence.o \
-	st-dma-fence-chain.o
+	st-dma-fence-chain.o \
+	st-dma-fence-proxy.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS)	+= dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-fence-private.h b/drivers/dma-buf/dma-fence-private.h
new file mode 100644
index 000000000000..6924d28af0fa
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-private.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Fence mechanism for dma-buf and to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ */
+
+#ifndef DMA_FENCE_PRIVATE_H
+#define DMA_FENCE_PRIAVTE_H
+
+struct dma_fence;
+
+bool __dma_fence_enable_signaling(struct dma_fence *fence);
+
+#endif /* DMA_FENCE_PRIAVTE_H */
diff --git a/drivers/dma-buf/dma-fence-proxy.c b/drivers/dma-buf/dma-fence-proxy.c
new file mode 100644
index 000000000000..6dce543d0757
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-proxy.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dma-fence-proxy: placeholder unsignaled fence
+ *
+ * Copyright (C) 2017-2019 Intel Corporation
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-proxy.h>
+#include <linux/export.h>
+#include <linux/irq_work.h>
+#include <linux/slab.h>
+
+#include "dma-fence-private.h"
+
+struct dma_fence_proxy {
+	struct dma_fence base;
+	spinlock_t lock;
+
+	struct dma_fence *real;
+	struct dma_fence_cb cb;
+	struct irq_work work;
+};
+
+static const char *proxy_get_driver_name(struct dma_fence *fence)
+{
+	struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+	struct dma_fence *real = READ_ONCE(p->real);
+
+	return real ? real->ops->get_driver_name(real) : "proxy";
+}
+
+static const char *proxy_get_timeline_name(struct dma_fence *fence)
+{
+	struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+	struct dma_fence *real = READ_ONCE(p->real);
+
+	return real ? real->ops->get_timeline_name(real) : "unset";
+}
+
+static void proxy_irq_work(struct irq_work *work)
+{
+	struct dma_fence_proxy *p = container_of(work, typeof(*p), work);
+
+	dma_fence_signal(&p->base);
+	dma_fence_put(&p->base);
+}
+
+static void proxy_callback(struct dma_fence *real, struct dma_fence_cb *cb)
+{
+	struct dma_fence_proxy *p = container_of(cb, typeof(*p), cb);
+
+	if (real->error)
+		dma_fence_set_error(&p->base, real->error);
+
+	/* Lower the height of the proxy chain -> single stack frame */
+	irq_work_queue(&p->work);
+}
+
+static bool proxy_enable_signaling(struct dma_fence *fence)
+{
+	struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+	struct dma_fence *real = READ_ONCE(p->real);
+	bool ret = true;
+
+	if (real) {
+		spin_lock_nested(real->lock, SINGLE_DEPTH_NESTING);
+		ret = __dma_fence_enable_signaling(real);
+		spin_unlock(real->lock);
+	}
+
+	return ret;
+}
+
+static void proxy_release(struct dma_fence *fence)
+{
+	struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+
+	dma_fence_put(p->real);
+	dma_fence_free(&p->base);
+}
+
+static const struct dma_fence_ops dma_fence_proxy_ops = {
+	.get_driver_name = proxy_get_driver_name,
+	.get_timeline_name = proxy_get_timeline_name,
+	.enable_signaling = proxy_enable_signaling,
+	.wait = dma_fence_default_wait,
+	.release = proxy_release,
+};
+
+/**
+ * dma_fence_create_proxy - Create an unset dma-fence
+ *
+ * dma_fence_create_proxy() creates a new dma_fence stub that is initially
+ * unsignaled and may later be replaced with a real fence. Any listeners
+ * to the proxy fence will be signaled when the target fence signals its
+ * completion.
+ */
+struct dma_fence *dma_fence_create_proxy(void)
+{
+	struct dma_fence_proxy *p;
+
+	p = kzalloc(sizeof(*p), GFP_KERNEL);
+	if (!p)
+		return NULL;
+
+	spin_lock_init(&p->lock);
+	dma_fence_init(&p->base, &dma_fence_proxy_ops, &p->lock,
+		       dma_fence_context_alloc(1), 0);
+	init_irq_work(&p->work, proxy_irq_work);
+
+	return &p->base;
+}
+EXPORT_SYMBOL(dma_fence_create_proxy);
+
+static void wrap_signal_locked(struct dma_fence *fence, struct dma_fence *real)
+{
+	if (real->error)
+		dma_fence_set_error(fence, real->error);
+	dma_fence_signal_locked(fence);
+}
+
+static void proxy_assign(struct dma_fence *fence, struct dma_fence *real)
+{
+	struct dma_fence_proxy *p = container_of(fence, typeof(*p), base);
+	unsigned long flags;
+
+	if (WARN_ON(fence == real))
+		return;
+
+	if (WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+		return;
+
+	if (WARN_ON(p->real))
+		return;
+
+	spin_lock_irqsave(p->base.lock, flags);
+
+	if (unlikely(!real)) {
+		dma_fence_signal_locked(&p->base);
+		goto unlock;
+	}
+
+	p->real = dma_fence_get(real);
+
+	spin_lock_nested(real->lock, SINGLE_DEPTH_NESTING);
+	if (dma_fence_is_signaled(real)) {
+		wrap_signal_locked(&p->base, real);
+	} else if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+			    &p->base.flags) &&
+		   !__dma_fence_enable_signaling(real)) {
+		wrap_signal_locked(&p->base, real);
+	} else {
+		dma_fence_get(&p->base);
+		p->cb.func = proxy_callback;
+		list_add_tail(&p->cb.node, &real->cb_list);
+	}
+	spin_unlock(real->lock);
+
+unlock:
+	spin_unlock_irqrestore(p->base.lock, flags);
+}
+
+/**
+ * dma_fence_replace_proxy - Replace the proxy fence with the real target
+ * @slot: pointer to location of fence to update
+ * @fence: the new fence to store in @slot
+ *
+ * Once the real dma_fence is known, we can replace the proxy fence holder
+ * with a pointer to the real dma fence. Future listeners will attach to
+ * the real fence, avoiding any indirection overhead. Previous listeners
+ * will remain attached to the proxy fence, and be signaled in turn when
+ * the target fence completes.
+ */
+struct dma_fence *
+dma_fence_replace_proxy(struct dma_fence __rcu **slot, struct dma_fence *fence)
+{
+	struct dma_fence *old;
+
+	if (fence)
+		dma_fence_get(fence);
+
+	old = rcu_replace_pointer(*slot, fence, true);
+	if (old && old->ops == &dma_fence_proxy_ops)
+		proxy_assign(old, fence);
+
+	return old;
+}
+EXPORT_SYMBOL(dma_fence_replace_proxy);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 052a41e2451c..fa7bedc6703d 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -19,6 +19,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/dma_fence.h>
 
+#include "dma-fence-private.h"
+
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
 EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
@@ -273,7 +275,7 @@ void dma_fence_free(struct dma_fence *fence)
 }
 EXPORT_SYMBOL(dma_fence_free);
 
-static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+bool __dma_fence_enable_signaling(struct dma_fence *fence)
 {
 	bool was_set;
 
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
index 55918ef9adab..616eca70e2d8 100644
--- a/drivers/dma-buf/selftests.h
+++ b/drivers/dma-buf/selftests.h
@@ -12,3 +12,4 @@
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
 selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_proxy, dma_fence_proxy)
diff --git a/drivers/dma-buf/st-dma-fence-proxy.c b/drivers/dma-buf/st-dma-fence-proxy.c
new file mode 100644
index 000000000000..658f6b90abc4
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-proxy.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-proxy.h>
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static struct mock_fence {
+	struct dma_fence base;
+	spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+	return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+	return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+	kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+	.get_driver_name = mock_name,
+	.get_timeline_name = mock_name,
+	.release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+	struct mock_fence *f;
+
+	f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+	if (!f)
+		return NULL;
+
+	spin_lock_init(&f->lock);
+	dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+	return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+	struct dma_fence *f;
+
+	f = dma_fence_create_proxy();
+	if (!f)
+		return -ENOMEM;
+
+	dma_fence_signal(f);
+	dma_fence_put(f);
+
+	return 0;
+}
+
+struct fences {
+	struct dma_fence *real;
+	struct dma_fence *proxy;
+	struct dma_fence __rcu *slot;
+};
+
+static int create_fences(struct fences *f, bool attach)
+{
+	f->proxy = dma_fence_create_proxy();
+	if (!f->proxy)
+		return -ENOMEM;
+
+	RCU_INIT_POINTER(f->slot, f->proxy);
+
+	f->real = mock_fence();
+	if (!f->real) {
+		dma_fence_put(f->proxy);
+		return -ENOMEM;
+	}
+
+	if (attach)
+		dma_fence_replace_proxy(&f->slot, f->real);
+
+	return 0;
+}
+
+static void free_fences(struct fences *f)
+{
+	dma_fence_put(dma_fence_replace_proxy(&f->slot, NULL));
+	dma_fence_put(f->real);
+	dma_fence_put(f->proxy);
+}
+
+static int wrap_signaling(void *arg)
+{
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	if (dma_fence_is_signaled(f.proxy)) {
+		pr_err("Fence unexpectedly signaled on creation\n");
+		goto err_free;
+	}
+
+	if (dma_fence_signal(f.real)) {
+		pr_err("Fence reported being already signaled\n");
+		goto err_free;
+	}
+
+	if (!dma_fence_is_signaled(f.proxy)) {
+		pr_err("Fence not reporting signaled\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_signaling_recurse(void *arg)
+{
+	struct fences f;
+	struct dma_fence *chain;
+	int err = -EINVAL;
+
+	if (create_fences(&f, false))
+		return -ENOMEM;
+
+	chain = dma_fence_create_proxy();
+	if (!chain) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	dma_fence_replace_proxy(&f.slot, chain);
+	dma_fence_put(dma_fence_replace_proxy(&f.slot, f.real));
+	dma_fence_put(chain);
+
+	/* f.real <- chain <- f.proxy */
+
+	if (dma_fence_is_signaled(f.proxy)) {
+		pr_err("Fence unexpectedly signaled on creation\n");
+		goto err_free;
+	}
+
+	if (dma_fence_signal(f.real)) {
+		pr_err("Fence reported being already signaled\n");
+		goto err_free;
+	}
+
+	if (!dma_fence_is_signaled(f.proxy)) {
+		pr_err("Fence not reporting signaled\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+struct simple_cb {
+	struct dma_fence_cb cb;
+	bool seen;
+};
+
+static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+	smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
+}
+
+static int wrap_add_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_add_callback_recurse(void *arg)
+{
+	struct simple_cb cb = {};
+	struct dma_fence *chain;
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, false))
+		return -ENOMEM;
+
+	chain = dma_fence_create_proxy();
+	if (!chain) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	dma_fence_replace_proxy(&f.slot, chain);
+	dma_fence_put(dma_fence_replace_proxy(&f.slot, f.real));
+	dma_fence_put(chain);
+
+	/* f.real <- chain <- f.proxy */
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_late_add_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	dma_fence_signal(f.real);
+
+	if (!dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Added callback, but fence was already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (cb.seen) {
+		pr_err("Callback called after failed attachment!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_early_add_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, false))
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_replace_proxy(&f.slot, f.real);
+	dma_fence_signal(f.real);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_early_add_callback_late(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, false))
+		return -ENOMEM;
+
+	dma_fence_signal(f.real);
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_replace_proxy(&f.slot, f.real);
+	dma_fence_signal(f.real);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_early_add_callback_early(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, false))
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_replace_proxy(&f.slot, f.real);
+	dma_fence_signal(f.real);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_rm_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	if (!dma_fence_remove_callback(f.proxy, &cb.cb)) {
+		pr_err("Failed to remove callback!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (cb.seen) {
+		pr_err("Callback still signaled after removal!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_late_rm_callback(void *arg)
+{
+	struct simple_cb cb = {};
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	if (dma_fence_add_callback(f.proxy, &cb.cb, simple_callback)) {
+		pr_err("Failed to add callback, fence already signaled!\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (!cb.seen) {
+		pr_err("Callback failed!\n");
+		goto err_free;
+	}
+
+	if (dma_fence_remove_callback(f.proxy, &cb.cb)) {
+		pr_err("Callback removal succeed after being executed!\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_status(void *arg)
+{
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	if (dma_fence_get_status(f.proxy)) {
+		pr_err("Fence unexpectedly has signaled status on creation\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (!dma_fence_get_status(f.proxy)) {
+		pr_err("Fence not reporting signaled status\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_error(void *arg)
+{
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	dma_fence_set_error(f.real, -EIO);
+
+	if (dma_fence_get_status(f.proxy)) {
+		pr_err("Fence unexpectedly has error status before signal\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+	if (dma_fence_get_status(f.proxy) != -EIO) {
+		pr_err("Fence not reporting error status, got %d\n",
+		       dma_fence_get_status(f.proxy));
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	free_fences(&f);
+	return err;
+}
+
+static int wrap_wait(void *arg)
+{
+	struct fences f;
+	int err = -EINVAL;
+
+	if (create_fences(&f, true))
+		return -ENOMEM;
+
+	if (dma_fence_wait_timeout(f.proxy, false, 0) != 0) {
+		pr_err("Wait reported complete before being signaled\n");
+		goto err_free;
+	}
+
+	dma_fence_signal(f.real);
+
+	if (dma_fence_wait_timeout(f.proxy, false, 0) == 0) {
+		pr_err("Wait reported incomplete after being signaled\n");
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	dma_fence_signal(f.real);
+	free_fences(&f);
+	return err;
+}
+
+struct wait_timer {
+	struct timer_list timer;
+	struct fences f;
+};
+
+static void wait_timer(struct timer_list *timer)
+{
+	struct wait_timer *wt = from_timer(wt, timer, timer);
+
+	dma_fence_signal(wt->f.real);
+}
+
+static int wrap_wait_timeout(void *arg)
+{
+	struct wait_timer wt;
+	int err = -EINVAL;
+
+	if (create_fences(&wt.f, true))
+		return -ENOMEM;
+
+	timer_setup_on_stack(&wt.timer, wait_timer, 0);
+
+	if (dma_fence_wait_timeout(wt.f.proxy, false, 1) != 0) {
+		pr_err("Wait reported complete before being signaled\n");
+		goto err_free;
+	}
+
+	mod_timer(&wt.timer, jiffies + 1);
+
+	if (dma_fence_wait_timeout(wt.f.proxy, false, 2) != 0) {
+		if (timer_pending(&wt.timer)) {
+			pr_notice("Timer did not fire within the jiffie!\n");
+			err = 0; /* not our fault! */
+		} else {
+			pr_err("Wait reported incomplete after timeout\n");
+		}
+		goto err_free;
+	}
+
+	err = 0;
+err_free:
+	del_timer_sync(&wt.timer);
+	destroy_timer_on_stack(&wt.timer);
+	dma_fence_signal(wt.f.real);
+	free_fences(&wt.f);
+	return err;
+}
+
+int dma_fence_proxy(void)
+{
+	static const struct subtest tests[] = {
+		SUBTEST(sanitycheck),
+		SUBTEST(wrap_signaling),
+		SUBTEST(wrap_signaling_recurse),
+		SUBTEST(wrap_add_callback),
+		SUBTEST(wrap_add_callback_recurse),
+		SUBTEST(wrap_late_add_callback),
+		SUBTEST(wrap_early_add_callback),
+		SUBTEST(wrap_early_add_callback_late),
+		SUBTEST(wrap_early_add_callback_early),
+		SUBTEST(wrap_rm_callback),
+		SUBTEST(wrap_late_rm_callback),
+		SUBTEST(wrap_status),
+		SUBTEST(wrap_error),
+		SUBTEST(wrap_wait),
+		SUBTEST(wrap_wait_timeout),
+	};
+	int ret;
+
+	slab_fences = KMEM_CACHE(mock_fence,
+				 SLAB_TYPESAFE_BY_RCU |
+				 SLAB_HWCACHE_ALIGN);
+	if (!slab_fences)
+		return -ENOMEM;
+
+	ret = subtests(tests, NULL);
+
+	kmem_cache_destroy(slab_fences);
+
+	return ret;
+}
diff --git a/include/linux/dma-fence-proxy.h b/include/linux/dma-fence-proxy.h
new file mode 100644
index 000000000000..587d5044f0bf
--- /dev/null
+++ b/include/linux/dma-fence-proxy.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * dma-fence-proxy: allows waiting upon unset and future fences
+ *
+ * Copyright (C) 2017 Intel Corporation
+ */
+
+#ifndef __LINUX_DMA_FENCE_PROXY_H
+#define __LINUX_DMA_FENCE_PROXY_H
+
+#include <linux/kernel.h>
+
+struct dma_fence;
+
+struct dma_fence *dma_fence_create_proxy(void);
+
+struct dma_fence *
+dma_fence_replace_proxy(struct dma_fence __rcu **slot, struct dma_fence *fence);
+
+#endif /* __LINUX_DMA_FENCE_PROXY_H */
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 13/17] drm/syncobj: Allow use of dma-fence-proxy
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (10 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 12/17] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 14/17] drm/i915/gem: Teach execbuf how to wait on future syncobj Chris Wilson
                   ` (7 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Allow the callers to supply a dma-fence-proxy for asynchronous waiting on
future fences.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/drm_syncobj.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 42d46414f767..e141db0e1eb6 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -184,6 +184,7 @@
  */
 
 #include <linux/anon_inodes.h>
+#include <linux/dma-fence-proxy.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/sched/signal.h>
@@ -324,14 +325,9 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
 	struct dma_fence *old_fence;
 	struct syncobj_wait_entry *cur, *tmp;
 
-	if (fence)
-		dma_fence_get(fence);
-
 	spin_lock(&syncobj->lock);
 
-	old_fence = rcu_dereference_protected(syncobj->fence,
-					      lockdep_is_held(&syncobj->lock));
-	rcu_assign_pointer(syncobj->fence, fence);
+	old_fence = dma_fence_replace_proxy(&syncobj->fence, fence);
 
 	if (fence != old_fence) {
 		list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 14/17] drm/i915/gem: Teach execbuf how to wait on future syncobj
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (11 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 13/17] drm/syncobj: Allow use of dma-fence-proxy Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 15/17] drm/i915/gem: Allow combining submit-fences with syncobj Chris Wilson
                   ` (6 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

If a syncobj has not yet been assigned, treat it as a future fence and
install and wait upon a dma-fence-proxy. The proxy will be replace by
the real fence later, and that fence will be responsible for signaling
our waiter.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 21 +++++++++++++++++--
 1 file changed, 19 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 0893ce781a84..3c427bdfbb2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -5,6 +5,7 @@
  */
 
 #include <linux/intel-iommu.h>
+#include <linux/dma-fence-proxy.h>
 #include <linux/dma-resv.h>
 #include <linux/sync_file.h>
 #include <linux/uaccess.h>
@@ -2491,8 +2492,24 @@ await_fence_array(struct i915_execbuffer *eb,
 			continue;
 
 		fence = drm_syncobj_fence_get(syncobj);
-		if (!fence)
-			return -EINVAL;
+		if (!fence) {
+			struct dma_fence *old;
+
+			fence = dma_fence_create_proxy();
+			if (!fence)
+				return -ENOMEM;
+
+			spin_lock(&syncobj->lock);
+			old = rcu_dereference_protected(syncobj->fence, true);
+			if (unlikely(old)) {
+				dma_fence_put(fence);
+				fence = dma_fence_get(old);
+			} else {
+				rcu_assign_pointer(syncobj->fence,
+						   dma_fence_get(fence));
+			}
+			spin_unlock(&syncobj->lock);
+		}
 
 		err = i915_request_await_dma_fence(eb->request, fence);
 		dma_fence_put(fence);
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 15/17] drm/i915/gem: Allow combining submit-fences with syncobj
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (12 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 14/17] drm/i915/gem: Teach execbuf how to wait on future syncobj Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 16/17] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
                   ` (5 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx

Fixes: a88b6e4cbafd ("drm/i915: Allow specification of parallel execbuf")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 10 +++++++---
 include/uapi/drm/i915_drm.h                    |  7 ++++---
 2 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3c427bdfbb2d..96a1f36f161c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2456,7 +2456,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
 		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
 			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
 
-		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
+		fences[n] = ptr_pack_bits(syncobj, fence.flags, 3);
 	}
 
 	return fences;
@@ -2487,7 +2487,7 @@ await_fence_array(struct i915_execbuffer *eb,
 		struct dma_fence *fence;
 		unsigned int flags;
 
-		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
+		syncobj = ptr_unpack_bits(fences[n], &flags, 3);
 		if (!(flags & I915_EXEC_FENCE_WAIT))
 			continue;
 
@@ -2511,7 +2511,11 @@ await_fence_array(struct i915_execbuffer *eb,
 			spin_unlock(&syncobj->lock);
 		}
 
-		err = i915_request_await_dma_fence(eb->request, fence);
+		if (flags & I915_EXEC_FENCE_WAIT_SUBMIT)
+			err = i915_request_await_execution(eb->request, fence,
+							   eb->engine->bond_execute);
+		else
+			err = i915_request_await_dma_fence(eb->request, fence);
 		dma_fence_put(fence);
 		if (err < 0)
 			return err;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 2813e579b480..3a24817ca25b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1040,9 +1040,10 @@ struct drm_i915_gem_exec_fence {
 	 */
 	__u32 handle;
 
-#define I915_EXEC_FENCE_WAIT            (1<<0)
-#define I915_EXEC_FENCE_SIGNAL          (1<<1)
-#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
+#define I915_EXEC_FENCE_WAIT            (1u << 0)
+#define I915_EXEC_FENCE_SIGNAL          (1u << 1)
+#define I915_EXEC_FENCE_WAIT_SUBMIT     (1u << 2)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_WAIT_SUBMIT << 1))
 	__u32 flags;
 };
 
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 16/17] drm/i915/gt: Declare when we enabled timeslicing
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (13 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 15/17] drm/i915/gem: Allow combining submit-fences with syncobj Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 17/17] drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore Chris Wilson
                   ` (4 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx; +Cc: Kenneth Graunke

Let userspace know if they can trust timeslicing by including it as part
of the I915_PARAM_HAS_SCHEDULER::I915_SCHEDULER_CAP_TIMESLICING

v2: Only declare timeslicing if we can safely preempt userspace.

Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Kenneth Graunke <kenneth@whitecape.org>
---
 drivers/gpu/drm/i915/gt/intel_engine.h      | 3 ++-
 drivers/gpu/drm/i915/gt/intel_engine_user.c | 5 +++++
 include/uapi/drm/i915_drm.h                 | 1 +
 3 files changed, 8 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 29c8c03c5caa..a32dc82a90d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -326,7 +326,8 @@ intel_engine_has_timeslices(const struct intel_engine_cs *engine)
 	if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
 		return false;
 
-	return intel_engine_has_semaphores(engine);
+	return (intel_engine_has_semaphores(engine) &&
+		intel_engine_has_preemption(engine));
 }
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 848decee9066..b84fdd722781 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -121,6 +121,11 @@ static void set_scheduler_caps(struct drm_i915_private *i915)
 			else
 				disabled |= BIT(map[i].sched);
 		}
+
+		if (intel_engine_has_timeslices(engine))
+			enabled |= I915_SCHEDULER_CAP_TIMESLICING;
+		else
+			disabled |= I915_SCHEDULER_CAP_TIMESLICING;
 	}
 
 	i915->caps.scheduler = enabled & ~disabled;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 3a24817ca25b..ff7be293ec31 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -523,6 +523,7 @@ typedef struct drm_i915_irq_wait {
 #define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
 #define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
 #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
+#define   I915_SCHEDULER_CAP_TIMESLICING	(1ul << 5)
 
 #define I915_PARAM_HUC_STATUS		 42
 
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* [Intel-gfx] [PATCH 17/17] drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (14 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 16/17] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
@ 2020-03-06 13:38 ` Chris Wilson
  2020-03-06 14:35 ` [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Mika Kuoppala
                   ` (3 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-06 13:38 UTC (permalink / raw)
  To: intel-gfx; +Cc: Kenneth Graunke

If we find ourselves waiting on a MI_SEMAPHORE_WAIT, either within the
user batch or in our own preamble, the engine raises a
GT_WAIT_ON_SEMAPHORE interrupt. We can unmask that interrupt and so
respond to a semaphore wait by yielding the timeslice, if we have
another context to yield to!

The only real complication is that the interrupt is only generated for
the start of the semaphore wait, and is asynchronous to our
process_csb() -- that is, we may not have registered the timeslice before
we see the interrupt. To ensure we don't miss a potential semaphore
blocking forward progress (e.g. selftests/live_timeslice_preempt) we mark
the interrupt and apply it to the next timeslice regardless of whether it
was active at the time.

v2: We use semaphores in preempt-to-busy, within the timeslicing
implementation itself! Ergo, when we do insert a preemption due to an
expired timeslice, the new context may start with the missed semaphore
flagged by the retired context and be yielded, ad infinitum. To avoid
this, read the context id at the time of the semaphore interrupt and
only yield if that context is still active.

Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    |  6 +++
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  9 +++++
 drivers/gpu/drm/i915/gt/intel_gt_irq.c       | 13 ++++++-
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 40 +++++++++++++++++---
 drivers/gpu/drm/i915/i915_reg.h              |  1 +
 5 files changed, 61 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 53ac3f00909a..c93a805eddfb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1290,6 +1290,12 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 
 	if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
 		drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
+	if (HAS_EXECLISTS(dev_priv)) {
+		drm_printf(m, "\tEL_STAT_HI: 0x%08x\n",
+			   ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
+		drm_printf(m, "\tEL_STAT_LO: 0x%08x\n",
+			   ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
+	}
 	drm_printf(m, "\tRING_START: 0x%08x\n",
 		   ENGINE_READ(engine, RING_START));
 	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 80cdde712842..ac283ab5d89c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -156,6 +156,15 @@ struct intel_engine_execlists {
 	 */
 	struct i915_priolist default_priolist;
 
+	/**
+	 * @yield: CCID at the time of the last semaphore-wait interrupt.
+	 *
+	 * Instead of leaving a semaphore busy-spinning on an engine, we would
+	 * like to switch to another ready context, i.e. yielding the semaphore
+	 * timeslice.
+	 */
+	u32 yield;
+
 	/**
 	 * @error_interrupt: CS Master EIR
 	 *
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f0e7fd95165a..875bd0392ffc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -39,6 +39,13 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 		}
 	}
 
+	if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+		WRITE_ONCE(engine->execlists.yield,
+			   ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+		if (del_timer(&engine->execlists.timer))
+			tasklet = true;
+	}
+
 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
 		tasklet = true;
 
@@ -228,7 +235,8 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
 	const u32 irqs =
 		GT_CS_MASTER_ERROR_INTERRUPT |
 		GT_RENDER_USER_INTERRUPT |
-		GT_CONTEXT_SWITCH_INTERRUPT;
+		GT_CONTEXT_SWITCH_INTERRUPT |
+		GT_WAIT_SEMAPHORE_INTERRUPT;
 	struct intel_uncore *uncore = gt->uncore;
 	const u32 dmask = irqs << 16 | irqs;
 	const u32 smask = irqs << 16;
@@ -366,7 +374,8 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
 	const u32 irqs =
 		GT_CS_MASTER_ERROR_INTERRUPT |
 		GT_RENDER_USER_INTERRUPT |
-		GT_CONTEXT_SWITCH_INTERRUPT;
+		GT_CONTEXT_SWITCH_INTERRUPT |
+		GT_WAIT_SEMAPHORE_INTERRUPT;
 	const u32 gt_interrupts[] = {
 		irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
 		irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a1d268880cfe..2558b8afed44 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1750,7 +1750,8 @@ static void defer_active(struct intel_engine_cs *engine)
 }
 
 static bool
-need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+need_timeslice(const struct intel_engine_cs *engine,
+	       const struct i915_request *rq)
 {
 	int hint;
 
@@ -1764,6 +1765,31 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
 	return hint >= effective_prio(rq);
 }
 
+static bool
+timeslice_yield(const struct intel_engine_execlists *el,
+		const struct i915_request *rq)
+{
+	/*
+	 * Once bitten, forever smitten!
+	 *
+	 * If the active context ever busy-waited on a semaphore,
+	 * it will be treated as a hog until the end of its timeslice.
+	 * The HW only sends an interrupt on the first miss, and we
+	 * do know if that semaphore has been signaled, or even if it
+	 * is now stuck on another semaphore. Play safe, yield if it
+	 * might be stuck -- it will be given a fresh timeslice in
+	 * the near future.
+	 */
+	return upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield);
+}
+
+static bool
+timeslice_expired(const struct intel_engine_execlists *el,
+		  const struct i915_request *rq)
+{
+	return timer_expired(&el->timer) || timeslice_yield(el, rq);
+}
+
 static int
 switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
 {
@@ -1779,8 +1805,7 @@ timeslice(const struct intel_engine_cs *engine)
 	return READ_ONCE(engine->props.timeslice_duration_ms);
 }
 
-static unsigned long
-active_timeslice(const struct intel_engine_cs *engine)
+static unsigned long active_timeslice(const struct intel_engine_cs *engine)
 {
 	const struct i915_request *rq = *engine->execlists.active;
 
@@ -1935,13 +1960,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 			last = NULL;
 		} else if (need_timeslice(engine, last) &&
-			   timer_expired(&engine->execlists.timer)) {
+			   timeslice_expired(execlists, last)) {
 			ENGINE_TRACE(engine,
-				     "expired last=%llx:%lld, prio=%d, hint=%d\n",
+				     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
 				     last->fence.context,
 				     last->fence.seqno,
 				     last->sched.attr.priority,
-				     execlists->queue_priority_hint);
+				     execlists->queue_priority_hint,
+				     yesno(timeslice_yield(execlists, last)));
 
 			ring_set_paused(engine, 1);
 			defer_active(engine);
@@ -2201,6 +2227,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		}
 		clear_ports(port + 1, last_port - port);
 
+		WRITE_ONCE(execlists->yield, -1);
 		execlists_submit_ports(engine);
 		set_preempt_timeout(engine);
 	} else {
@@ -4454,6 +4481,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
 	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
 	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
 	engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
+	engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
 }
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 80cf02a6eec1..7dd94227c4b8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3092,6 +3092,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
 #define GT_BSD_USER_INTERRUPT			(1 << 12)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1	(1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_WAIT_SEMAPHORE_INTERRUPT		REG_BIT(11) /* bdw+ */
 #define GT_CONTEXT_SWITCH_INTERRUPT		(1 <<  8)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts Chris Wilson
@ 2020-03-06 14:20   ` Lionel Landwerlin
  2020-03-10 11:17   ` Chris Wilson
  1 sibling, 0 replies; 37+ messages in thread
From: Lionel Landwerlin @ 2020-03-06 14:20 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 06/03/2020 15:38, Chris Wilson wrote:
> We wish that the scheduler emit the context modification commands prior
> to enabling the oa_config, for which we must explicitly inform it of the
> ordering constraints. This is especially important as we now wait for
> the final oa_config setup to be completed and as this wait may be on a
> distinct context to the state modifications, we need that command packet
> to be always last in the queue.
>
> We borrow the i915_active for its ability to track multiple timelines
> and the last dma_fence on each; a flexible dma_resv. Keeping track of
> each dma_fence is important for us so that we can efficiently schedule
> the requests and reprioritise as required.
>
> Reported-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> ---
>   drivers/gpu/drm/i915/display/intel_overlay.c  |   8 +-
>   drivers/gpu/drm/i915/gt/intel_context_param.c |   2 +-
>   drivers/gpu/drm/i915/i915_active.c            |   6 +-
>   drivers/gpu/drm/i915/i915_active.h            |   2 +-
>   drivers/gpu/drm/i915/i915_perf.c              | 154 +++++++++++-------
>   drivers/gpu/drm/i915/i915_perf_types.h        |   5 +-
>   drivers/gpu/drm/i915/i915_vma.h               |   2 +-
>   drivers/gpu/drm/i915/selftests/i915_active.c  |   4 +-
>   8 files changed, 115 insertions(+), 68 deletions(-)
>
...
> @@ -2729,16 +2772,19 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
>   
>   static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
>   {
> -	struct i915_request *rq;
> +	struct i915_active *active;
> +	int err;
>   
> -	rq = stream->perf->ops.enable_metric_set(stream);
> -	if (IS_ERR(rq))
> -		return PTR_ERR(rq);
> +	active = i915_active_create();
> +	if (!active)
> +		return -ENOMEM;
>   
> -	i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
> -	i915_request_put(rq);
> +	err = stream->perf->ops.enable_metric_set(stream, active);
> +	if (err == 0)
> +		i915_active_wait(active, TASK_UNINTERRUPTIBLE);


Why not? :


err = i915_active_wait(active, TASK_INTERRUPTIBLE);


-Lionel


>   
> -	return 0;
> +	i915_active_put(active);
> +	return err;
>   }
>   
>   /**
> @@ -3192,7 +3238,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
>   		return -EINVAL;
>   
>   	if (config != stream->oa_config) {
> -		struct i915_request *rq;
> +		int err;
>   
>   		/*
>   		 * If OA is bound to a specific context, emit the
> @@ -3203,13 +3249,11 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
>   		 * When set globally, we use a low priority kernel context,
>   		 * so it will effectively take effect when idle.
>   		 */
> -		rq = emit_oa_config(stream, config, oa_context(stream));
> -		if (!IS_ERR(rq)) {
> +		err = emit_oa_config(stream, config, oa_context(stream), NULL);
> +		if (!err)
>   			config = xchg(&stream->oa_config, config);
> -			i915_request_put(rq);
> -		} else {
> -			ret = PTR_ERR(rq);
> -		}
> +		else
> +			ret = err;
>   	}
>   
>   	i915_oa_config_put(config);
> diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
> index a0e22f00f6cf..5eaf874a0d25 100644
> --- a/drivers/gpu/drm/i915/i915_perf_types.h
> +++ b/drivers/gpu/drm/i915/i915_perf_types.h
> @@ -21,6 +21,7 @@
>   
>   struct drm_i915_private;
>   struct file;
> +struct i915_active;
>   struct i915_gem_context;
>   struct i915_perf;
>   struct i915_vma;
> @@ -339,8 +340,8 @@ struct i915_oa_ops {
>   	 * counter reports being sampled. May apply system constraints such as
>   	 * disabling EU clock gating as required.
>   	 */
> -	struct i915_request *
> -		(*enable_metric_set)(struct i915_perf_stream *stream);
> +	int (*enable_metric_set)(struct i915_perf_stream *stream,
> +				 struct i915_active *active);
>   
>   	/**
>   	 * @disable_metric_set: Remove system constraints associated with using
> diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
> index e1ced1df13e1..3baa98fa5009 100644
> --- a/drivers/gpu/drm/i915/i915_vma.h
> +++ b/drivers/gpu/drm/i915/i915_vma.h
> @@ -380,7 +380,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma);
>   static inline int i915_vma_sync(struct i915_vma *vma)
>   {
>   	/* Wait for the asynchronous bindings and pending GPU reads */
> -	return i915_active_wait(&vma->active);
> +	return i915_active_wait(&vma->active, TASK_INTERRUPTIBLE);
>   }
>   
>   #endif
> diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
> index 68bbb1580162..7357d2130024 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_active.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_active.c
> @@ -153,7 +153,7 @@ static int live_active_wait(void *arg)
>   	if (IS_ERR(active))
>   		return PTR_ERR(active);
>   
> -	i915_active_wait(&active->base);
> +	i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
>   	if (!READ_ONCE(active->retired)) {
>   		struct drm_printer p = drm_err_printer(__func__);
>   
> @@ -230,7 +230,7 @@ static int live_active_barrier(void *arg)
>   	i915_active_release(&active->base);
>   
>   	if (err == 0)
> -		err = i915_active_wait(&active->base);
> +		err = i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
>   
>   	if (err == 0 && !READ_ONCE(active->retired)) {
>   		pr_err("i915_active not retired after flushing barriers!\n");


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (15 preceding siblings ...)
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 17/17] drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore Chris Wilson
@ 2020-03-06 14:35 ` Mika Kuoppala
  2020-03-06 21:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/17] " Patchwork
                   ` (2 subsequent siblings)
  19 siblings, 0 replies; 37+ messages in thread
From: Mika Kuoppala @ 2020-03-06 14:35 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Due to the ordering of cmpxchg()/dma_fence_signal() inside node_retire(),
> we must also use the xchg() as our primary memory barrier to flush the
> outstanding callbacks after expected completion of the i915_active.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/selftests/i915_active.c | 29 ++++++++++++++------
>  1 file changed, 21 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
> index 3a37c67ab6c4..68bbb1580162 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_active.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_active.c
> @@ -311,20 +311,33 @@ static void spin_unlock_wait(spinlock_t *lock)
>  	spin_unlock_irq(lock);
>  }
>  
> +static void active_flush(struct i915_active *ref,
> +			 struct i915_active_fence *active)
> +{
> +	struct dma_fence *fence;
> +
> +	fence = xchg(__active_fence_slot(active), NULL);
> +	if (!fence)
> +		return;
> +
> +	spin_lock_irq(fence->lock);
> +	__list_del_entry(&active->cb.node);
> +	spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
> +	atomic_dec(&ref->count);
> +
> +	GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
> +}
> +
>  void i915_active_unlock_wait(struct i915_active *ref)
>  {
>  	if (i915_active_acquire_if_busy(ref)) {
>  		struct active_node *it, *n;
>  
> +		/* Wait for all active callbacks */
>  		rcu_read_lock();
> -		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
> -			struct dma_fence *f;
> -
> -			/* Wait for all active callbacks */
> -			f = rcu_dereference(it->base.fence);
> -			if (f)
> -				spin_unlock_wait(f->lock);
> -		}
> +		active_flush(ref, &ref->excl);
> +		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
> +			active_flush(ref, &it->base);
>  		rcu_read_unlock();
>  
>  		i915_active_release(ref);
> -- 
> 2.25.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 05/17] drm/i915: Wrap i915_active in a simple kreffed struct
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 05/17] drm/i915: Wrap i915_active in a simple kreffed struct Chris Wilson
@ 2020-03-06 14:44   ` Mika Kuoppala
  0 siblings, 0 replies; 37+ messages in thread
From: Mika Kuoppala @ 2020-03-06 14:44 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> For conveniences of callers that just want to use an i915_active to
> track a wide array of concurrent timelines, wrap the base i915_active
> struct inside a kref. This i915_active will self-destruct after use.
>

Found the user,
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_active.c | 53 ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/i915_active.h |  4 +++
>  2 files changed, 57 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index 7b3d6c12ad61..1826de14d2da 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -881,6 +881,59 @@ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
>  	active_fence_cb(fence, cb);
>  }
>  
> +struct auto_active {
> +	struct i915_active base;
> +	struct kref ref;
> +};
> +
> +struct i915_active *i915_active_get(struct i915_active *ref)
> +{
> +	struct auto_active *aa = container_of(ref, typeof(*aa), base);
> +
> +	kref_get(&aa->ref);
> +	return &aa->base;
> +}
> +
> +static void auto_release(struct kref *ref)
> +{
> +	struct auto_active *aa = container_of(ref, typeof(*aa), ref);
> +
> +	i915_active_fini(&aa->base);
> +	kfree(aa);
> +}
> +
> +void i915_active_put(struct i915_active *ref)
> +{
> +	struct auto_active *aa = container_of(ref, typeof(*aa), base);
> +
> +	kref_put(&aa->ref, auto_release);
> +}
> +
> +static int auto_active(struct i915_active *ref)
> +{
> +	i915_active_get(ref);
> +	return 0;
> +}
> +
> +static void auto_retire(struct i915_active *ref)
> +{
> +	i915_active_put(ref);
> +}
> +
> +struct i915_active *i915_active_create(void)
> +{
> +	struct auto_active *aa;
> +
> +	aa = kmalloc(sizeof(*aa), GFP_KERNEL);
> +	if (!aa)
> +		return NULL;
> +
> +	kref_init(&aa->ref);
> +	i915_active_init(&aa->base, auto_active, auto_retire);
> +
> +	return &aa->base;
> +}
> +
>  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>  #include "selftests/i915_active.c"
>  #endif
> diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
> index 973ff0447c6c..7e438501333e 100644
> --- a/drivers/gpu/drm/i915/i915_active.h
> +++ b/drivers/gpu/drm/i915/i915_active.h
> @@ -215,4 +215,8 @@ void i915_request_add_active_barriers(struct i915_request *rq);
>  void i915_active_print(struct i915_active *ref, struct drm_printer *m);
>  void i915_active_unlock_wait(struct i915_active *ref);
>  
> +struct i915_active *i915_active_create(void);
> +struct i915_active *i915_active_get(struct i915_active *ref);
> +void i915_active_put(struct i915_active *ref);
> +
>  #endif /* _I915_ACTIVE_H_ */
> -- 
> 2.25.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (16 preceding siblings ...)
  2020-03-06 14:35 ` [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Mika Kuoppala
@ 2020-03-06 21:13 ` Patchwork
  2020-03-06 21:33 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
  2020-03-06 21:59 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
  19 siblings, 0 replies; 37+ messages in thread
From: Patchwork @ 2020-03-06 21:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
URL   : https://patchwork.freedesktop.org/series/74392/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
982908c93abd drm/i915/selftests: Apply a heavy handed flush to i915_active
031f94155df6 drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
9b61d8749081 drm/i915: Improve the start alignment of bonded pairs
5d9fda3690f4 drm/i915: Tweak scheduler's kick_submission()
c0d1c65cb89a drm/i915: Wrap i915_active in a simple kreffed struct
3109c8f9d4d0 drm/i915: Extend i915_request_await_active to use all timelines
5ae4e5feccba drm/i915/perf: Schedule oa_config after modifying the contexts
d0db810d00b0 drm/i915/selftests: Add request throughput measurement to perf
-:90: WARNING:LINE_SPACING: Missing a blank line after declarations
#90: FILE: drivers/gpu/drm/i915/selftests/i915_request.c:1515:
+	struct intel_context *ce;
+	IGT_TIMEOUT(end_time);

-:157: WARNING:LINE_SPACING: Missing a blank line after declarations
#157: FILE: drivers/gpu/drm/i915/selftests/i915_request.c:1582:
+	struct intel_context *ce;
+	IGT_TIMEOUT(end_time);

-:213: WARNING:LINE_SPACING: Missing a blank line after declarations
#213: FILE: drivers/gpu/drm/i915/selftests/i915_request.c:1638:
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {

-:220: WARNING:LINE_SPACING: Missing a blank line after declarations
#220: FILE: drivers/gpu/drm/i915/selftests/i915_request.c:1645:
+	struct intel_engine_cs *engine;
+	int (* const *fn)(void *arg);

-:265: WARNING:YIELD: Using yield() is generally wrong. See yield() kernel-doc (sched/core.c)
#265: FILE: drivers/gpu/drm/i915/selftests/i915_request.c:1690:
+		yield(); /* start all threads before we kthread_stop() */

total: 0 errors, 5 warnings, 0 checks, 306 lines checked
0a74ccdc875c dma-buf: Prettify typecasts for dma-fence-chain
3702ebf022c1 dma-buf: Report signaled links inside dma-fence-chain
e3df6c598dbd dma-buf: Exercise dma-fence-chain under selftests
-:33: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#33: 
new file mode 100644

-:61: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#61: FILE: drivers/dma-buf/st-dma-fence-chain.c:24:
+	spinlock_t lock;

-:235: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'find_seqno', this function's name, in a string
#235: FILE: drivers/dma-buf/st-dma-fence-chain.c:198:
+		pr_err("Reported %d for find_seqno(0)!\n", err);

-:244: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'find_seqno', this function's name, in a string
#244: FILE: drivers/dma-buf/st-dma-fence-chain.c:207:
+			pr_err("Reported %d for find_seqno(%d:%d)!\n",

-:249: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'find_seqno', this function's name, in a string
#249: FILE: drivers/dma-buf/st-dma-fence-chain.c:212:
+			pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",

-:272: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'find_seqno', this function's name, in a string
#272: FILE: drivers/dma-buf/st-dma-fence-chain.c:235:
+			pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",

-:286: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'find_seqno', this function's name, in a string
#286: FILE: drivers/dma-buf/st-dma-fence-chain.c:249:
+			pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",

-:737: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'dma_fence_chain', this function's name, in a string
#737: FILE: drivers/dma-buf/st-dma-fence-chain.c:700:
+	pr_info("sizeof(dma_fence_chain)=%zu\n",

total: 0 errors, 7 warnings, 1 checks, 725 lines checked
dfcab52a6de6 dma-buf: Proxy fence, an unsignaled fence placeholder
-:45: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#45: 
new file mode 100644

-:93: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#93: FILE: drivers/dma-buf/dma-fence-proxy.c:18:
+	spinlock_t lock;

-:321: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#321: FILE: drivers/dma-buf/st-dma-fence-proxy.c:20:
+	spinlock_t lock;

-:481: WARNING:MEMORY_BARRIER: memory barrier without comment
#481: FILE: drivers/dma-buf/st-dma-fence-proxy.c:180:
+	smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);

total: 0 errors, 2 warnings, 2 checks, 852 lines checked
72c5da66b118 drm/syncobj: Allow use of dma-fence-proxy
2bf6c69d342d drm/i915/gem: Teach execbuf how to wait on future syncobj
e7ace1917afa drm/i915/gem: Allow combining submit-fences with syncobj
232c8948625d drm/i915/gt: Declare when we enabled timeslicing
62e2ea9500a8 drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [Intel-gfx] ✗ Fi.CI.DOCS: warning for series starting with [01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (17 preceding siblings ...)
  2020-03-06 21:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/17] " Patchwork
@ 2020-03-06 21:33 ` Patchwork
  2020-03-06 21:59 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
  19 siblings, 0 replies; 37+ messages in thread
From: Patchwork @ 2020-03-06 21:33 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
URL   : https://patchwork.freedesktop.org/series/74392/
State : warning

== Summary ==

$ make htmldocs 2>&1 > /dev/null | grep i915
./drivers/gpu/drm/i915/display/intel_dpll_mgr.h:285: warning: Function parameter or member 'get_freq' not described in 'intel_shared_dpll_funcs'

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for series starting with [01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
  2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
                   ` (18 preceding siblings ...)
  2020-03-06 21:33 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
@ 2020-03-06 21:59 ` Patchwork
  19 siblings, 0 replies; 37+ messages in thread
From: Patchwork @ 2020-03-06 21:59 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active
URL   : https://patchwork.freedesktop.org/series/74392/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_8086 -> Patchwork_16862
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_16862 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_16862, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_16862:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live@execlists:
    - fi-kbl-x1275:       [PASS][1] -> [DMESG-FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-kbl-x1275/igt@i915_selftest@live@execlists.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-kbl-x1275/igt@i915_selftest@live@execlists.html
    - fi-skl-6770hq:      [PASS][3] -> [DMESG-FAIL][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-skl-6770hq/igt@i915_selftest@live@execlists.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-skl-6770hq/igt@i915_selftest@live@execlists.html

  
New tests
---------

  New tests have been introduced between CI_DRM_8086 and Patchwork_16862:

### New IGT tests (2) ###

  * igt@dmabuf@all@dma_fence_chain:
    - Statuses : 42 pass(s)
    - Exec time: [7.38, 78.64] s

  * igt@dmabuf@all@dma_fence_proxy:
    - Statuses : 42 pass(s)
    - Exec time: [0.02, 0.19] s

  

Known issues
------------

  Here are the changes found in Patchwork_16862 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_parallel@contexts:
    - fi-apl-guc:         [PASS][5] -> [INCOMPLETE][6] ([fdo#103927])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-apl-guc/igt@gem_exec_parallel@contexts.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-apl-guc/igt@gem_exec_parallel@contexts.html

  * igt@gem_mmap_gtt@basic:
    - fi-tgl-y:           [PASS][7] -> [DMESG-WARN][8] ([CI#94] / [i915#402])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-tgl-y/igt@gem_mmap_gtt@basic.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-tgl-y/igt@gem_mmap_gtt@basic.html

  * igt@i915_module_load@reload:
    - fi-skl-6770hq:      [PASS][9] -> [DMESG-WARN][10] ([i915#92]) +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-skl-6770hq/igt@i915_module_load@reload.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-skl-6770hq/igt@i915_module_load@reload.html

  * igt@kms_chamelium@dp-edid-read:
    - fi-icl-u2:          [PASS][11] -> [FAIL][12] ([fdo#109635] / [i915#217])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-icl-u2/igt@kms_chamelium@dp-edid-read.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-icl-u2/igt@kms_chamelium@dp-edid-read.html

  * igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence:
    - fi-skl-6770hq:      [PASS][13] -> [SKIP][14] ([fdo#109271]) +5 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-skl-6770hq/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-skl-6770hq/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html

  * igt@kms_pipe_crc_basic@read-crc-pipe-b:
    - fi-skl-6770hq:      [PASS][15] -> [DMESG-WARN][16] ([i915#106])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-skl-6770hq/igt@kms_pipe_crc_basic@read-crc-pipe-b.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-skl-6770hq/igt@kms_pipe_crc_basic@read-crc-pipe-b.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-tgl-y:           [FAIL][17] ([CI#94]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-tgl-y/igt@gem_exec_suspend@basic-s4-devices.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-tgl-y/igt@gem_exec_suspend@basic-s4-devices.html

  * igt@gem_mmap@basic:
    - fi-tgl-y:           [DMESG-WARN][19] ([CI#94] / [i915#402]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-tgl-y/igt@gem_mmap@basic.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-tgl-y/igt@gem_mmap@basic.html

  * igt@i915_selftest@live@dmabuf:
    - fi-ivb-3770:        [DMESG-WARN][21] -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-ivb-3770/igt@i915_selftest@live@dmabuf.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-ivb-3770/igt@i915_selftest@live@dmabuf.html

  * igt@i915_selftest@live@hangcheck:
    - fi-ivb-3770:        [INCOMPLETE][23] -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-ivb-3770/igt@i915_selftest@live@hangcheck.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-ivb-3770/igt@i915_selftest@live@hangcheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-icl-u2:          [FAIL][25] ([i915#217]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8086/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html

  
  [CI#94]: https://gitlab.freedesktop.org/gfx-ci/i915-infra/issues/94
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109635]: https://bugs.freedesktop.org/show_bug.cgi?id=109635
  [i915#106]: https://gitlab.freedesktop.org/drm/intel/issues/106
  [i915#217]: https://gitlab.freedesktop.org/drm/intel/issues/217
  [i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
  [i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92


Participating hosts (50 -> 44)
------------------------------

  Additional (2): fi-skl-guc fi-skl-lmem 
  Missing    (8): fi-ilk-m540 fi-hsw-4200u fi-bdw-gvtdvm fi-byt-squawks fi-bsw-cyan fi-kbl-7560u fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_8086 -> Patchwork_16862

  CI-20190529: 20190529
  CI_DRM_8086: 3a1e69684036738b540510ffcc31964600bc0b3f @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5498: 1bb7a25a09fe3e653d310e8bdfbdde4a1934b326 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16862: 62e2ea9500a8a90b6f1822b5a80366be4b412b1a @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

62e2ea9500a8 drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore
232c8948625d drm/i915/gt: Declare when we enabled timeslicing
e7ace1917afa drm/i915/gem: Allow combining submit-fences with syncobj
2bf6c69d342d drm/i915/gem: Teach execbuf how to wait on future syncobj
72c5da66b118 drm/syncobj: Allow use of dma-fence-proxy
dfcab52a6de6 dma-buf: Proxy fence, an unsignaled fence placeholder
e3df6c598dbd dma-buf: Exercise dma-fence-chain under selftests
3702ebf022c1 dma-buf: Report signaled links inside dma-fence-chain
0a74ccdc875c dma-buf: Prettify typecasts for dma-fence-chain
d0db810d00b0 drm/i915/selftests: Add request throughput measurement to perf
5ae4e5feccba drm/i915/perf: Schedule oa_config after modifying the contexts
3109c8f9d4d0 drm/i915: Extend i915_request_await_active to use all timelines
c0d1c65cb89a drm/i915: Wrap i915_active in a simple kreffed struct
5d9fda3690f4 drm/i915: Tweak scheduler's kick_submission()
9b61d8749081 drm/i915: Improve the start alignment of bonded pairs
031f94155df6 drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
982908c93abd drm/i915/selftests: Apply a heavy handed flush to i915_active

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16862/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [PATCH 02/17] drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
  2020-03-06 13:38   ` [Intel-gfx] " Chris Wilson
@ 2020-03-07 23:20     ` Sasha Levin
  -1 siblings, 0 replies; 37+ messages in thread
From: Sasha Levin @ 2020-03-07 23:20 UTC (permalink / raw)
  To: Sasha Levin, Chris Wilson, intel-gfx
  Cc: mika.kuoppala, tvrtko.ursulin, Mika Kuoppala, Tvrtko Ursulin, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing").

The bot has tested the following trees: v5.5.8, v5.4.24.

v5.5.8: Build OK!
v5.4.24: Failed to apply! Possible dependencies:
    16ffe73c186b ("drm/i915/pmu: Use GT parked for estimating RC6 while asleep")
    253a774bb08b ("drm/i915/execlists: Don't merely skip submission if maybe timeslicing")
    3c00660db183 ("drm/i915/execlists: Assert tasklet is locked for process_csb()")
    42014f69bb23 ("drm/i915: Hook up GT power management")
    5d904e3c5d40 ("drm/i915: Pass in intel_gt at some for_each_engine sites")
    61fa60ff6e6a ("drm/i915: Move GT init to intel_gt.c")
    c113236718e8 ("drm/i915: Extract GT render sleep (rc6) management")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 02/17] drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
@ 2020-03-07 23:20     ` Sasha Levin
  0 siblings, 0 replies; 37+ messages in thread
From: Sasha Levin @ 2020-03-07 23:20 UTC (permalink / raw)
  To: Sasha Levin, Chris Wilson, intel-gfx; +Cc: stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing").

The bot has tested the following trees: v5.5.8, v5.4.24.

v5.5.8: Build OK!
v5.4.24: Failed to apply! Possible dependencies:
    16ffe73c186b ("drm/i915/pmu: Use GT parked for estimating RC6 while asleep")
    253a774bb08b ("drm/i915/execlists: Don't merely skip submission if maybe timeslicing")
    3c00660db183 ("drm/i915/execlists: Assert tasklet is locked for process_csb()")
    42014f69bb23 ("drm/i915: Hook up GT power management")
    5d904e3c5d40 ("drm/i915: Pass in intel_gt at some for_each_engine sites")
    61fa60ff6e6a ("drm/i915: Move GT init to intel_gt.c")
    c113236718e8 ("drm/i915: Extract GT render sleep (rc6) management")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 03/17] drm/i915: Improve the start alignment of bonded pairs
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 03/17] drm/i915: Improve the start alignment of bonded pairs Chris Wilson
@ 2020-03-10  9:59   ` Tvrtko Ursulin
  0 siblings, 0 replies; 37+ messages in thread
From: Tvrtko Ursulin @ 2020-03-10  9:59 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 06/03/2020 13:38, Chris Wilson wrote:
> Always wait on the start of the signaler request to reduce the problem
> of dequeueing the bonded pair too early -- we want both payloads to
> start at the same time, with no latency, and yet still allow others to
> make full use of the slack in the system. This reduce the amount of time
> we spend waiting on the semaphore used to synchronise the start of the
> bonded payload.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_request.c | 41 +++++++++++++++++++++++++----
>   1 file changed, 36 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 66efd16c4850..db11006b4ac9 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -1128,14 +1128,45 @@ __i915_request_await_execution(struct i915_request *to,
>   					  &from->fence))
>   		return 0;
>   
> -	/* Ensure both start together [after all semaphores in signal] */
> -	if (intel_engine_has_semaphores(to->engine))
> -		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
> -	else
> -		err = i915_request_await_start(to, from);
> +	/*
> +	 * Wait until the start of this request.
> +	 *
> +	 * The execution cb fires when we submit the request to HW. But in
> +	 * many cases this may be long before the request itself is ready to
> +	 * run (consider that we submit 2 requests for the same context, where
> +	 * the request of interest is behind an indefinite spinner). So we hook
> +	 * up to both to reduce our queues and keep the execution lag minimised
> +	 * in the worst case, though we hope that the await_start is elided.
> +	 */
> +	err = i915_request_await_start(to, from);
>   	if (err < 0)
>   		return err;
>   
> +	/*
> +	 * Ensure both start together [after all semaphores in signal]
> +	 *
> +	 * Now that we are queued to the HW at roughly the same time (thanks
> +	 * to the execute cb) and are ready to run at roughly the same time
> +	 * (thanks to the await start), our signaler may still be indefinitely
> +	 * delayed by waiting on a semaphore from a remote engine. If our
> +	 * signaler depends on a semaphore, so indirectly do we, and we do not
> +	 * want to start our payload until our signaler also starts theirs.
> +	 * So we wait.
> +	 *
> +	 * However, there is also a second condition for which we need to wait
> +	 * for the precise start of the signaler. Consider that the signaler
> +	 * was submitted in a chain of requests following another context
> +	 * (with just an ordinary intra-engine fence dependency between the
> +	 * two). In this case the signaler is queued to HW, but not for
> +	 * immediate execution, and so we must wait until it reaches the
> +	 * active slot.
> +	 */
> +	if (intel_engine_has_semaphores(to->engine)) {
> +		err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
> +		if (err < 0)
> +			return err;
> +	}
> +
>   	/* Couple the dependency tree for PI on this exposed to->fence */
>   	if (to->engine->schedule) {
>   		err = i915_sched_node_add_dependency(&to->sched, &from->sched);
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission()
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission() Chris Wilson
@ 2020-03-10 10:07   ` Tvrtko Ursulin
  2020-03-10 11:00     ` Chris Wilson
  0 siblings, 1 reply; 37+ messages in thread
From: Tvrtko Ursulin @ 2020-03-10 10:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 06/03/2020 13:38, Chris Wilson wrote:
> Skip useless priority bumping on adding a new dependency, but otherwise
> prevent tasklet scheduling until we have completed all the potential
> rescheduling.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_scheduler.c | 7 ++++++-
>   1 file changed, 6 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 52f71e83e088..603cba36d6a4 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
>   	if (!inflight)
>   		goto unlock;
>   
> +	engine->execlists.queue_priority_hint = prio;
> +

What is the significance of moving this up? I couldn't correlate it to 
the commit message.

>   	/*
>   	 * If we are already the currently executing context, don't
>   	 * bother evaluating if we should preempt ourselves.
> @@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
>   	if (inflight->context == rq->context)
>   		goto unlock;
>   
> -	engine->execlists.queue_priority_hint = prio;
>   	if (need_preempt(prio, rq_prio(inflight)))
>   		tasklet_hi_schedule(&engine->execlists.tasklet);
>   
> @@ -463,11 +464,15 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
>   	if (!dep)
>   		return -ENOMEM;
>   
> +	local_bh_disable();
> +
>   	if (!__i915_sched_node_add_dependency(node, signal, dep,
>   					      I915_DEPENDENCY_EXTERNAL |
>   					      I915_DEPENDENCY_ALLOC))
>   		i915_dependency_free(dep);
>   
> +	local_bh_enable(); /* kick submission tasklet */
> +

And this presumably postpones the tasklet until __bump_priority -> 
__i915_schedule is finished. But then why the request submission path 
which calls __i915_sched_node_add_dependency directly does not need this 
treatment?

Regards,

Tvrtko

>   	return 0;
>   }
>   
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 06/17] drm/i915: Extend i915_request_await_active to use all timelines
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 06/17] drm/i915: Extend i915_request_await_active to use all timelines Chris Wilson
@ 2020-03-10 10:18   ` Tvrtko Ursulin
  0 siblings, 0 replies; 37+ messages in thread
From: Tvrtko Ursulin @ 2020-03-10 10:18 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 06/03/2020 13:38, Chris Wilson wrote:
> Extend i915_request_await_active() to be able to asynchronously wait on
> all the tracked timelines simultaneously.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_active.c | 51 +++++++++++++++++++++++-------
>   drivers/gpu/drm/i915/i915_active.h |  5 ++-
>   drivers/gpu/drm/i915/i915_vma.c    |  2 +-
>   3 files changed, 45 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index 1826de14d2da..e659688db043 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -518,23 +518,52 @@ int i915_active_wait(struct i915_active *ref)
>   	return 0;
>   }
>   
> -int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
> +static int await_active(struct i915_request *rq,
> +			struct i915_active_fence *active)
> +{
> +	struct dma_fence *fence;
> +
> +	if (is_barrier(active))
> +		return 0;
> +
> +	fence = i915_active_fence_get(active);
> +	if (fence) {
> +		int err;
> +
> +		err = i915_request_await_dma_fence(rq, fence);
> +		dma_fence_put(fence);
> +		if (err < 0)
> +			return err;
> +	}
> +
> +	return 0;
> +}
> +
> +int i915_request_await_active(struct i915_request *rq,
> +			      struct i915_active *ref,
> +			      unsigned int flags)
>   {
>   	int err = 0;
>   
> +	/* We must always wait for the exclusive fence! */
>   	if (rcu_access_pointer(ref->excl.fence)) {
> -		struct dma_fence *fence;
> -
> -		rcu_read_lock();
> -		fence = dma_fence_get_rcu_safe(&ref->excl.fence);
> -		rcu_read_unlock();
> -		if (fence) {
> -			err = i915_request_await_dma_fence(rq, fence);
> -			dma_fence_put(fence);
> -		}
> +		err = await_active(rq, &ref->excl);
> +		if (err)
> +			return err;
>   	}
>   
> -	/* In the future we may choose to await on all fences */
> +	if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
> +		struct active_node *it, *n;
> +
> +		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
> +			err = await_active(rq, &it->base);
> +			if (err)
> +				break;
> +		}
> +		i915_active_release(ref);
> +		if (err)
> +			return err;
> +	}
>   
>   	return err;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
> index 7e438501333e..e3c13060c4c7 100644
> --- a/drivers/gpu/drm/i915/i915_active.h
> +++ b/drivers/gpu/drm/i915/i915_active.h
> @@ -183,7 +183,10 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
>   
>   int i915_active_wait(struct i915_active *ref);
>   
> -int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
> +int i915_request_await_active(struct i915_request *rq,
> +			      struct i915_active *ref,
> +			      unsigned int flags);
> +#define I915_ACTIVE_AWAIT_ALL BIT(0)
>   
>   int i915_active_acquire(struct i915_active *ref);
>   bool i915_active_acquire_if_busy(struct i915_active *ref);
> diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
> index 3dde671145f7..5b3efb43a8ef 100644
> --- a/drivers/gpu/drm/i915/i915_vma.c
> +++ b/drivers/gpu/drm/i915/i915_vma.c
> @@ -1173,7 +1173,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
>   	GEM_BUG_ON(!i915_vma_is_pinned(vma));
>   
>   	/* Wait for the vma to be bound before we start! */
> -	err = i915_request_await_active(rq, &vma->active);
> +	err = i915_request_await_active(rq, &vma->active, 0);
>   	if (err)
>   		return err;
>   
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf Chris Wilson
@ 2020-03-10 10:38   ` Tvrtko Ursulin
  2020-03-10 11:09     ` Chris Wilson
  0 siblings, 1 reply; 37+ messages in thread
From: Tvrtko Ursulin @ 2020-03-10 10:38 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 06/03/2020 13:38, Chris Wilson wrote:
> Under ideal circumstances, the driver should be able to keep the GPU
> fully saturated with work. Measure how close to ideal we get under the
> harshest of conditions with no user payload.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   .../drm/i915/selftests/i915_perf_selftests.h  |   1 +
>   drivers/gpu/drm/i915/selftests/i915_request.c | 285 +++++++++++++++++-
>   2 files changed, 285 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
> index 3bf7f53e9924..d8da142985eb 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
> +++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
> @@ -16,5 +16,6 @@
>    * Tests are executed in order by igt/i915_selftest
>    */
>   selftest(engine_cs, intel_engine_cs_perf_selftests)
> +selftest(request, i915_request_perf_selftests)
>   selftest(blt, i915_gem_object_blt_perf_selftests)
>   selftest(region, intel_memory_region_perf_selftests)
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index f89d9c42f1fa..d4c088cfe4e1 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -23,6 +23,7 @@
>    */
>   
>   #include <linux/prime_numbers.h>
> +#include <linux/pm_qos.h>
>   
>   #include "gem/i915_gem_pm.h"
>   #include "gem/selftests/mock_context.h"
> @@ -1233,7 +1234,7 @@ static int live_parallel_engines(void *arg)
>   		struct igt_live_test t;
>   		unsigned int idx;
>   
> -		snprintf(name, sizeof(name), "%pS", fn);
> +		snprintf(name, sizeof(name), "%ps", *fn);
>   		err = igt_live_test_begin(&t, i915, __func__, name);
>   		if (err)
>   			break;
> @@ -1470,3 +1471,285 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
>   
>   	return i915_subtests(tests, i915);
>   }
> +
> +struct perf_parallel {
> +	struct intel_engine_cs *engine;
> +	unsigned long count;
> +	ktime_t time;
> +	ktime_t busy;
> +	u64 runtime;
> +};
> +
> +static int switch_to_kernel_sync(struct intel_context *ce, int err)
> +{
> +	struct i915_request *rq;
> +	struct dma_fence *fence;
> +
> +	rq = intel_engine_create_kernel_request(ce->engine);
> +	if (IS_ERR(rq))
> +		return PTR_ERR(rq);
> +
> +	fence = i915_active_fence_get(&ce->timeline->last_request);
> +	if (fence) {
> +		i915_request_await_dma_fence(rq, fence);
> +		dma_fence_put(fence);
> +	}
> +
> +	rq = i915_request_get(rq);
> +	i915_request_add(rq);
> +	if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
> +		err = -ETIME;
> +	i915_request_put(rq);
> +
> +	while (!err && !intel_engine_is_idle(ce->engine))
> +		intel_engine_flush_submission(ce->engine);
> +
> +	return err;
> +}
> +
> +static int perf_sync(void *arg)
> +{
> +	struct perf_parallel *p = arg;
> +	struct intel_engine_cs *engine = p->engine;
> +	struct intel_context *ce;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +	bool busy;
> +	int err = 0;
> +
> +	ce = intel_context_create(engine);
> +	if (IS_ERR(ce))
> +		return PTR_ERR(ce);
> +
> +	err = intel_context_pin(ce);
> +	if (err) {
> +		intel_context_put(ce);
> +		return err;
> +	}
> +
> +	busy = false;
> +	if (intel_engine_supports_stats(engine) &&
> +	    !intel_enable_engine_stats(engine)) {
> +		p->busy = intel_engine_get_busy_time(engine);
> +		busy = true;
> +	}
> +
> +	p->time = ktime_get();
> +	count = 0;
> +	do {
> +		struct i915_request *rq;
> +
> +		rq = i915_request_create(ce);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			break;
> +		}
> +
> +		i915_request_get(rq);
> +		i915_request_add(rq);
> +
> +		err = 0;
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0)
> +			err = -ETIME;
> +		i915_request_put(rq);
> +		if (err)
> +			break;
> +
> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +	p->time = ktime_sub(ktime_get(), p->time);
> +
> +	if (busy) {
> +		p->busy = ktime_sub(intel_engine_get_busy_time(engine),
> +				    p->busy);
> +		intel_disable_engine_stats(engine);
> +	}
> +
> +	err = switch_to_kernel_sync(ce, err);
> +	p->runtime = intel_context_get_total_runtime_ns(ce);
> +	p->count = count;
> +
> +	intel_context_unpin(ce);
> +	intel_context_put(ce);
> +	return err;
> +}
> +
> +static int perf_many(void *arg)
> +{
> +	struct perf_parallel *p = arg;
> +	struct intel_engine_cs *engine = p->engine;
> +	struct intel_context *ce;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +	int err = 0;
> +	bool busy;
> +
> +	ce = intel_context_create(engine);
> +	if (IS_ERR(ce))
> +		return PTR_ERR(ce);
> +
> +	err = intel_context_pin(ce);
> +	if (err) {
> +		intel_context_put(ce);
> +		return err;
> +	}
> +
> +	busy = false;
> +	if (intel_engine_supports_stats(engine) &&
> +	    !intel_enable_engine_stats(engine)) {
> +		p->busy = intel_engine_get_busy_time(engine);
> +		busy = true;
> +	}
> +
> +	count = 0;
> +	p->time = ktime_get();
> +	do {
> +		struct i915_request *rq;
> +
> +		rq = i915_request_create(ce);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			break;
> +		}
> +
> +		i915_request_add(rq);

Any concerns on ring size here and maybe managing the wait explicitly?

> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +	p->time = ktime_sub(ktime_get(), p->time);
> +
> +	if (busy) {
> +		p->busy = ktime_sub(intel_engine_get_busy_time(engine),
> +				    p->busy);
> +		intel_disable_engine_stats(engine);
> +	}
> +
> +	err = switch_to_kernel_sync(ce, err);
> +	p->runtime = intel_context_get_total_runtime_ns(ce);
> +	p->count = count;
> +
> +	intel_context_unpin(ce);
> +	intel_context_put(ce);
> +	return err;
> +}
> +
> +static int perf_parallel_engines(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	static int (* const func[])(void *arg) = {
> +		perf_sync,
> +		perf_many,
> +		NULL,
> +	};
> +	const unsigned int nengines = num_uabi_engines(i915);
> +	struct intel_engine_cs *engine;
> +	int (* const *fn)(void *arg);
> +	struct pm_qos_request *qos;
> +	struct {
> +		struct perf_parallel p;
> +		struct task_struct *tsk;
> +	} *engines;
> +	int err = 0;
> +
> +	engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
> +	if (!engines)
> +		return -ENOMEM;
> +
> +	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
> +	if (qos)
> +		pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, 0);
> +
> +	for (fn = func; *fn; fn++) {
> +		char name[KSYM_NAME_LEN];
> +		struct igt_live_test t;
> +		unsigned int idx;
> +
> +		snprintf(name, sizeof(name), "%ps", *fn);

Is this any better than just storing the name in local static array?

> +		err = igt_live_test_begin(&t, i915, __func__, name);
> +		if (err)
> +			break;
> +
> +		atomic_set(&i915->selftest.counter, nengines);
> +
> +		idx = 0;
> +		for_each_uabi_engine(engine, i915) {

For a pure driver overhead test I would suggest this to be a gt live test.

> +			intel_engine_pm_get(engine);
> +
> +			memset(&engines[idx].p, 0, sizeof(engines[idx].p));
> +			engines[idx].p.engine = engine;
> +
> +			engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
> +						       "igt:%s", engine->name);

Test will get affected by the host CPU core count. How about we only 
measure num_cpu engines? Might be even more important with discrete.

> +			if (IS_ERR(engines[idx].tsk)) {
> +				err = PTR_ERR(engines[idx].tsk);
> +				intel_engine_pm_put(engine);
> +				break;
> +			}
> +			get_task_struct(engines[idx++].tsk);
> +		}
> +
> +		yield(); /* start all threads before we kthread_stop() */
> +
> +		idx = 0;
> +		for_each_uabi_engine(engine, i915) {
> +			int status;
> +
> +			if (IS_ERR(engines[idx].tsk))
> +				break;
> +
> +			status = kthread_stop(engines[idx].tsk);
> +			if (status && !err)
> +				err = status;
> +
> +			intel_engine_pm_put(engine);
> +			put_task_struct(engines[idx++].tsk);
> +		}
> +
> +		if (igt_live_test_end(&t))
> +			err = -EIO;
> +		if (err)
> +			break;
> +
> +		idx = 0;
> +		for_each_uabi_engine(engine, i915) {
> +			struct perf_parallel *p = &engines[idx].p;
> +			u64 busy = 100 * ktime_to_ns(p->busy);
> +			u64 dt = ktime_to_ns(p->time);
> +			int integer, decimal;
> +
> +			if (dt) {
> +				integer = div64_u64(busy, dt);
> +				busy -= integer * dt;
> +				decimal = div64_u64(100 * busy, dt);
> +			} else {
> +				integer = 0;
> +				decimal = 0;
> +			}
> +
> +			GEM_BUG_ON(engine != p->engine);
> +			pr_info("%s %5s: { count:%lu, busy:%d.%02d%%, runtime:%lldms, walltime:%lldms }\n",
> +				name, engine->name, p->count, integer, decimal,
> +				div_u64(p->runtime, 1000 * 1000),
> +				div_u64(ktime_to_ns(p->time), 1000 * 1000));
> +			idx++;
> +		}
> +	}
> +
> +	if (qos) {
> +		pm_qos_remove_request(qos);
> +		kfree(qos);
> +	}
> +	kfree(engines);
> +	return err;
> +}
> +
> +int i915_request_perf_selftests(struct drm_i915_private *i915)
> +{
> +	static const struct i915_subtest tests[] = {
> +		SUBTEST(perf_parallel_engines),
> +	};
> +
> +	if (intel_gt_is_wedged(&i915->gt))
> +		return 0;
> +
> +	return i915_subtests(tests, i915);
> +}
> 

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission()
  2020-03-10 10:07   ` Tvrtko Ursulin
@ 2020-03-10 11:00     ` Chris Wilson
  2020-03-10 11:47       ` Tvrtko Ursulin
  0 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-10 11:00 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-03-10 10:07:33)
> 
> On 06/03/2020 13:38, Chris Wilson wrote:
> > Skip useless priority bumping on adding a new dependency, but otherwise
> > prevent tasklet scheduling until we have completed all the potential
> > rescheduling.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/i915_scheduler.c | 7 ++++++-
> >   1 file changed, 6 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 52f71e83e088..603cba36d6a4 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
> >       if (!inflight)
> >               goto unlock;
> >   
> > +     engine->execlists.queue_priority_hint = prio;
> > +
> 
> What is the significance of moving this up? I couldn't correlate it to 
> the commit message.

It's correcting the priority bumping. If we have the same context as
active, we should not be skipping the hint update and so avoid the useless
bump on a later dependency.

> >       /*
> >        * If we are already the currently executing context, don't
> >        * bother evaluating if we should preempt ourselves.
> > @@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
> >       if (inflight->context == rq->context)
> >               goto unlock;
> >   
> > -     engine->execlists.queue_priority_hint = prio;
> >       if (need_preempt(prio, rq_prio(inflight)))
> >               tasklet_hi_schedule(&engine->execlists.tasklet);
> >   
> > @@ -463,11 +464,15 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
> >       if (!dep)
> >               return -ENOMEM;
> >   
> > +     local_bh_disable();
> > +
> >       if (!__i915_sched_node_add_dependency(node, signal, dep,
> >                                             I915_DEPENDENCY_EXTERNAL |
> >                                             I915_DEPENDENCY_ALLOC))
> >               i915_dependency_free(dep);
> >   
> > +     local_bh_enable(); /* kick submission tasklet */
> > +
> 
> And this presumably postpones the tasklet until __bump_priority -> 
> __i915_schedule is finished. But then why the request submission path 
> which calls __i915_sched_node_add_dependency directly does not need this 
> treatment?

Because we haven't completed the updates by that point, and upon actual
request submission the tasklet is flushed. Plus on not all request
submission paths would it be legal.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf
  2020-03-10 10:38   ` Tvrtko Ursulin
@ 2020-03-10 11:09     ` Chris Wilson
  2020-03-10 11:58       ` Tvrtko Ursulin
  0 siblings, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-10 11:09 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-03-10 10:38:21)
> 
> On 06/03/2020 13:38, Chris Wilson wrote:
> > +static int perf_many(void *arg)
> > +{
> > +     struct perf_parallel *p = arg;
> > +     struct intel_engine_cs *engine = p->engine;
> > +     struct intel_context *ce;
> > +     IGT_TIMEOUT(end_time);
> > +     unsigned long count;
> > +     int err = 0;
> > +     bool busy;
> > +
> > +     ce = intel_context_create(engine);
> > +     if (IS_ERR(ce))
> > +             return PTR_ERR(ce);
> > +
> > +     err = intel_context_pin(ce);
> > +     if (err) {
> > +             intel_context_put(ce);
> > +             return err;
> > +     }
> > +
> > +     busy = false;
> > +     if (intel_engine_supports_stats(engine) &&
> > +         !intel_enable_engine_stats(engine)) {
> > +             p->busy = intel_engine_get_busy_time(engine);
> > +             busy = true;
> > +     }
> > +
> > +     count = 0;
> > +     p->time = ktime_get();
> > +     do {
> > +             struct i915_request *rq;
> > +
> > +             rq = i915_request_create(ce);
> > +             if (IS_ERR(rq)) {
> > +                     err = PTR_ERR(rq);
> > +                     break;
> > +             }
> > +
> > +             i915_request_add(rq);
> 
> Any concerns on ring size here and maybe managing the wait explicitly?

No concern, the intention is to flood the ring. If we are able to wait
on the ring, we have succeeded in submitting faster than the engine can
retire. (Which might be another issue for us to resolve, as it may be
our own interrupt latency that is then the bottleneck.)

If we did a sync0, sync1, many; that could give us some more insight
into the interrupt latency in comparison to engine latency.

> 
> > +             count++;
> > +     } while (!__igt_timeout(end_time, NULL));
> > +     p->time = ktime_sub(ktime_get(), p->time);
> > +
> > +     if (busy) {
> > +             p->busy = ktime_sub(intel_engine_get_busy_time(engine),
> > +                                 p->busy);
> > +             intel_disable_engine_stats(engine);
> > +     }
> > +
> > +     err = switch_to_kernel_sync(ce, err);
> > +     p->runtime = intel_context_get_total_runtime_ns(ce);
> > +     p->count = count;
> > +
> > +     intel_context_unpin(ce);
> > +     intel_context_put(ce);
> > +     return err;
> > +}
> > +
> > +static int perf_parallel_engines(void *arg)
> > +{
> > +     struct drm_i915_private *i915 = arg;
> > +     static int (* const func[])(void *arg) = {
> > +             perf_sync,
> > +             perf_many,
> > +             NULL,
> > +     };
> > +     const unsigned int nengines = num_uabi_engines(i915);
> > +     struct intel_engine_cs *engine;
> > +     int (* const *fn)(void *arg);
> > +     struct pm_qos_request *qos;
> > +     struct {
> > +             struct perf_parallel p;
> > +             struct task_struct *tsk;
> > +     } *engines;
> > +     int err = 0;
> > +
> > +     engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
> > +     if (!engines)
> > +             return -ENOMEM;
> > +
> > +     qos = kzalloc(sizeof(*qos), GFP_KERNEL);
> > +     if (qos)
> > +             pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, 0);
> > +
> > +     for (fn = func; *fn; fn++) {
> > +             char name[KSYM_NAME_LEN];
> > +             struct igt_live_test t;
> > +             unsigned int idx;
> > +
> > +             snprintf(name, sizeof(name), "%ps", *fn);
> 
> Is this any better than just storing the name in local static array?

It's easier for sure, and since the name is already in a static array,
why not use it :)

> > +             err = igt_live_test_begin(&t, i915, __func__, name);
> > +             if (err)
> > +                     break;
> > +
> > +             atomic_set(&i915->selftest.counter, nengines);
> > +
> > +             idx = 0;
> > +             for_each_uabi_engine(engine, i915) {
> 
> For a pure driver overhead test I would suggest this to be a gt live test.

It's a request performance test, so sits above the gt. My thinking is
that this is a more of a high level request/scheduler test than
execlists/guc (though it depends on those backends).
 
> > +                     intel_engine_pm_get(engine);
> > +
> > +                     memset(&engines[idx].p, 0, sizeof(engines[idx].p));
> > +                     engines[idx].p.engine = engine;
> > +
> > +                     engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
> > +                                                    "igt:%s", engine->name);
> 
> Test will get affected by the host CPU core count. How about we only 
> measure num_cpu engines? Might be even more important with discrete.

No. We want to be able to fill the GPU with the different processors.
Comparing glk to kbl helps highlight any inefficiencies we have -- we
have to be efficient enough that core count is simply not a critical
factor to offset our submission overhead.

So we can run the same test and see how it scaled with engines vs cpus
just by running it on different machines and look for problems.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts
  2020-03-06 13:38 ` [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts Chris Wilson
  2020-03-06 14:20   ` Lionel Landwerlin
@ 2020-03-10 11:17   ` Chris Wilson
  2020-03-10 12:01     ` Lionel Landwerlin
  1 sibling, 1 reply; 37+ messages in thread
From: Chris Wilson @ 2020-03-10 11:17 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2020-03-06 13:38:42)
>  static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
>  {
> -       struct i915_request *rq;
> +       struct i915_active *active;
> +       int err;
>  
> -       rq = stream->perf->ops.enable_metric_set(stream);
> -       if (IS_ERR(rq))
> -               return PTR_ERR(rq);
> +       active = i915_active_create();
> +       if (!active)
> +               return -ENOMEM;
>  
> -       i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
> -       i915_request_put(rq);
> +       err = stream->perf->ops.enable_metric_set(stream, active);
> +       if (err == 0)
> +               i915_active_wait(active, TASK_UNINTERRUPTIBLE);

Why UNINTERRUPTIBLE you might ask?

Because if you've demonstrated that by having scheduled the oa config
update that by not waiting for the change, the machine becomes unusable,
that seems like a risk not worth taking.

Hence why the i915_request_wait() was uninterruptible and the
i915_active_wait() keeps the uninterruptible nature.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission()
  2020-03-10 11:00     ` Chris Wilson
@ 2020-03-10 11:47       ` Tvrtko Ursulin
  0 siblings, 0 replies; 37+ messages in thread
From: Tvrtko Ursulin @ 2020-03-10 11:47 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/03/2020 11:00, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2020-03-10 10:07:33)
>>
>> On 06/03/2020 13:38, Chris Wilson wrote:
>>> Skip useless priority bumping on adding a new dependency, but otherwise
>>> prevent tasklet scheduling until we have completed all the potential
>>> rescheduling.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> ---
>>>    drivers/gpu/drm/i915/i915_scheduler.c | 7 ++++++-
>>>    1 file changed, 6 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
>>> index 52f71e83e088..603cba36d6a4 100644
>>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
>>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
>>> @@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
>>>        if (!inflight)
>>>                goto unlock;
>>>    
>>> +     engine->execlists.queue_priority_hint = prio;
>>> +
>>
>> What is the significance of moving this up? I couldn't correlate it to
>> the commit message.
> 
> It's correcting the priority bumping. If we have the same context as
> active, we should not be skipping the hint update and so avoid the useless
> bump on a later dependency.
> 
>>>        /*
>>>         * If we are already the currently executing context, don't
>>>         * bother evaluating if we should preempt ourselves.
>>> @@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
>>>        if (inflight->context == rq->context)
>>>                goto unlock;
>>>    
>>> -     engine->execlists.queue_priority_hint = prio;
>>>        if (need_preempt(prio, rq_prio(inflight)))
>>>                tasklet_hi_schedule(&engine->execlists.tasklet);
>>>    
>>> @@ -463,11 +464,15 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
>>>        if (!dep)
>>>                return -ENOMEM;
>>>    
>>> +     local_bh_disable();
>>> +
>>>        if (!__i915_sched_node_add_dependency(node, signal, dep,
>>>                                              I915_DEPENDENCY_EXTERNAL |
>>>                                              I915_DEPENDENCY_ALLOC))
>>>                i915_dependency_free(dep);
>>>    
>>> +     local_bh_enable(); /* kick submission tasklet */
>>> +
>>
>> And this presumably postpones the tasklet until __bump_priority ->
>> __i915_schedule is finished. But then why the request submission path
>> which calls __i915_sched_node_add_dependency directly does not need this
>> treatment?
> 
> Because we haven't completed the updates by that point, and upon actual
> request submission the tasklet is flushed. Plus on not all request
> submission paths would it be legal.

Okay,

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf
  2020-03-10 11:09     ` Chris Wilson
@ 2020-03-10 11:58       ` Tvrtko Ursulin
  2020-03-10 12:06         ` Chris Wilson
  0 siblings, 1 reply; 37+ messages in thread
From: Tvrtko Ursulin @ 2020-03-10 11:58 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/03/2020 11:09, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2020-03-10 10:38:21)
>>
>> On 06/03/2020 13:38, Chris Wilson wrote:
>>> +static int perf_many(void *arg)
>>> +{
>>> +     struct perf_parallel *p = arg;
>>> +     struct intel_engine_cs *engine = p->engine;
>>> +     struct intel_context *ce;
>>> +     IGT_TIMEOUT(end_time);
>>> +     unsigned long count;
>>> +     int err = 0;
>>> +     bool busy;
>>> +
>>> +     ce = intel_context_create(engine);
>>> +     if (IS_ERR(ce))
>>> +             return PTR_ERR(ce);
>>> +
>>> +     err = intel_context_pin(ce);
>>> +     if (err) {
>>> +             intel_context_put(ce);
>>> +             return err;
>>> +     }
>>> +
>>> +     busy = false;
>>> +     if (intel_engine_supports_stats(engine) &&
>>> +         !intel_enable_engine_stats(engine)) {
>>> +             p->busy = intel_engine_get_busy_time(engine);
>>> +             busy = true;
>>> +     }
>>> +
>>> +     count = 0;
>>> +     p->time = ktime_get();
>>> +     do {
>>> +             struct i915_request *rq;
>>> +
>>> +             rq = i915_request_create(ce);
>>> +             if (IS_ERR(rq)) {
>>> +                     err = PTR_ERR(rq);
>>> +                     break;
>>> +             }
>>> +
>>> +             i915_request_add(rq);
>>
>> Any concerns on ring size here and maybe managing the wait explicitly?
> 
> No concern, the intention is to flood the ring. If we are able to wait
> on the ring, we have succeeded in submitting faster than the engine can
> retire. (Which might be another issue for us to resolve, as it may be
> our own interrupt latency that is then the bottleneck.)
> 
> If we did a sync0, sync1, many; that could give us some more insight
> into the interrupt latency in comparison to engine latency.
> 
>>
>>> +             count++;
>>> +     } while (!__igt_timeout(end_time, NULL));
>>> +     p->time = ktime_sub(ktime_get(), p->time);
>>> +
>>> +     if (busy) {
>>> +             p->busy = ktime_sub(intel_engine_get_busy_time(engine),
>>> +                                 p->busy);
>>> +             intel_disable_engine_stats(engine);
>>> +     }
>>> +
>>> +     err = switch_to_kernel_sync(ce, err);
>>> +     p->runtime = intel_context_get_total_runtime_ns(ce);
>>> +     p->count = count;
>>> +
>>> +     intel_context_unpin(ce);
>>> +     intel_context_put(ce);
>>> +     return err;
>>> +}
>>> +
>>> +static int perf_parallel_engines(void *arg)
>>> +{
>>> +     struct drm_i915_private *i915 = arg;
>>> +     static int (* const func[])(void *arg) = {
>>> +             perf_sync,
>>> +             perf_many,
>>> +             NULL,
>>> +     };
>>> +     const unsigned int nengines = num_uabi_engines(i915);
>>> +     struct intel_engine_cs *engine;
>>> +     int (* const *fn)(void *arg);
>>> +     struct pm_qos_request *qos;
>>> +     struct {
>>> +             struct perf_parallel p;
>>> +             struct task_struct *tsk;
>>> +     } *engines;
>>> +     int err = 0;
>>> +
>>> +     engines = kcalloc(nengines, sizeof(*engines), GFP_KERNEL);
>>> +     if (!engines)
>>> +             return -ENOMEM;
>>> +
>>> +     qos = kzalloc(sizeof(*qos), GFP_KERNEL);
>>> +     if (qos)
>>> +             pm_qos_add_request(qos, PM_QOS_CPU_DMA_LATENCY, 0);
>>> +
>>> +     for (fn = func; *fn; fn++) {
>>> +             char name[KSYM_NAME_LEN];
>>> +             struct igt_live_test t;
>>> +             unsigned int idx;
>>> +
>>> +             snprintf(name, sizeof(name), "%ps", *fn);
>>
>> Is this any better than just storing the name in local static array?
> 
> It's easier for sure, and since the name is already in a static array,
> why not use it :)

It looks weird, it needs KSYM_NAME_LEN of stack space and the special 
%ps. But okay.

> 
>>> +             err = igt_live_test_begin(&t, i915, __func__, name);
>>> +             if (err)
>>> +                     break;
>>> +
>>> +             atomic_set(&i915->selftest.counter, nengines);
>>> +
>>> +             idx = 0;
>>> +             for_each_uabi_engine(engine, i915) {
>>
>> For a pure driver overhead test I would suggest this to be a gt live test.
> 
> It's a request performance test, so sits above the gt. My thinking is
> that this is a more of a high level request/scheduler test than
> execlists/guc (though it depends on those backends).

Okay, yeah, it makes sense.

>   
>>> +                     intel_engine_pm_get(engine);
>>> +
>>> +                     memset(&engines[idx].p, 0, sizeof(engines[idx].p));
>>> +                     engines[idx].p.engine = engine;
>>> +
>>> +                     engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
>>> +                                                    "igt:%s", engine->name);
>>
>> Test will get affected by the host CPU core count. How about we only
>> measure num_cpu engines? Might be even more important with discrete.
> 
> No. We want to be able to fill the GPU with the different processors.
> Comparing glk to kbl helps highlight any inefficiencies we have -- we
> have to be efficient enough that core count is simply not a critical
> factor to offset our submission overhead.
> 
> So we can run the same test and see how it scaled with engines vs cpus
> just by running it on different machines and look for problems.

Normally you would expect one core per engine is enough to saturate the 
engine. I am afraid adding more combinations will be confusing when 
reading test results. (Same GPU, same engine count, different CPU core 
count.) How about two subtest variants? One is 1:1 CPU core to engine, 
and another can be all engines like here?

Or possibly:

1. 1 CPU core - 1 engine - purest latency/overhead
2. 1 CPU core - N engines (N = all engines) - more
3. N CPU cores - N engines (N = min(engines, cores) - global lock 
contention, stable setup
4. M CPU cores - N engines (N, M = max) - lock contention stress
5. N CPU cores - 1 engine (N = all cores) - more extreme lock contention

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts
  2020-03-10 11:17   ` Chris Wilson
@ 2020-03-10 12:01     ` Lionel Landwerlin
  0 siblings, 0 replies; 37+ messages in thread
From: Lionel Landwerlin @ 2020-03-10 12:01 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 10/03/2020 13:17, Chris Wilson wrote:
> Quoting Chris Wilson (2020-03-06 13:38:42)
>>   static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
>>   {
>> -       struct i915_request *rq;
>> +       struct i915_active *active;
>> +       int err;
>>   
>> -       rq = stream->perf->ops.enable_metric_set(stream);
>> -       if (IS_ERR(rq))
>> -               return PTR_ERR(rq);
>> +       active = i915_active_create();
>> +       if (!active)
>> +               return -ENOMEM;
>>   
>> -       i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
>> -       i915_request_put(rq);
>> +       err = stream->perf->ops.enable_metric_set(stream, active);
>> +       if (err == 0)
>> +               i915_active_wait(active, TASK_UNINTERRUPTIBLE);
> Why UNINTERRUPTIBLE you might ask?
>
> Because if you've demonstrated that by having scheduled the oa config
> update that by not waiting for the change, the machine becomes unusable,
> that seems like a risk not worth taking.


Just to confirm, the risk would be that the task could be interrupted 
and that we would schedule another configuration request, without any 
way and that would bring us back to the buggy scenario we saw.


-Lionel


>
> Hence why the i915_request_wait() was uninterruptible and the
> i915_active_wait() keeps the uninterruptible nature.
> -Chris


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

* Re: [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf
  2020-03-10 11:58       ` Tvrtko Ursulin
@ 2020-03-10 12:06         ` Chris Wilson
  0 siblings, 0 replies; 37+ messages in thread
From: Chris Wilson @ 2020-03-10 12:06 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-03-10 11:58:26)
> 
> On 10/03/2020 11:09, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2020-03-10 10:38:21)
> >>
> >> On 06/03/2020 13:38, Chris Wilson wrote:
> >>> +                     intel_engine_pm_get(engine);
> >>> +
> >>> +                     memset(&engines[idx].p, 0, sizeof(engines[idx].p));
> >>> +                     engines[idx].p.engine = engine;
> >>> +
> >>> +                     engines[idx].tsk = kthread_run(*fn, &engines[idx].p,
> >>> +                                                    "igt:%s", engine->name);
> >>
> >> Test will get affected by the host CPU core count. How about we only
> >> measure num_cpu engines? Might be even more important with discrete.
> > 
> > No. We want to be able to fill the GPU with the different processors.
> > Comparing glk to kbl helps highlight any inefficiencies we have -- we
> > have to be efficient enough that core count is simply not a critical
> > factor to offset our submission overhead.
> > 
> > So we can run the same test and see how it scaled with engines vs cpus
> > just by running it on different machines and look for problems.
> 
> Normally you would expect one core per engine is enough to saturate the 
> engine. I am afraid adding more combinations will be confusing when 
> reading test results. (Same GPU, same engine count, different CPU core 
> count.) How about two subtest variants? One is 1:1 CPU core to engine, 
> and another can be all engines like here?

Each machine will have its own consistent configuration. The question I
have in mind is "can we saturate this machine"? This machine remains
constant for all the runs. And our goal is that the driver is not a
bottleneck on any machine.
 
> Or possibly:
> 
> 1. 1 CPU core - 1 engine - purest latency/overhead
> 2. 1 CPU core - N engines (N = all engines) - more
> 3. N CPU cores - N engines (N = min(engines, cores) - global lock 
> contention, stable setup
> 4. M CPU cores - N engines (N, M = max) - lock contention stress
> 5. N CPU cores - 1 engine (N = all cores) - more extreme lock contention

I hear you in that you would like to have a serial test as well. Where
we just use 1 cpu thread to submit to all engines as fast as we can and
see how close we get with just "1 core". (There will still be
parallelism one hopes from our interrupt handler.)
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 37+ messages in thread

end of thread, other threads:[~2020-03-10 12:06 UTC | newest]

Thread overview: 37+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-06 13:38 [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Chris Wilson
2020-03-06 13:38 ` [PATCH 02/17] drm/i915/execlists: Enable timeslice on partial virtual engine dequeue Chris Wilson
2020-03-06 13:38   ` [Intel-gfx] " Chris Wilson
2020-03-07 23:20   ` Sasha Levin
2020-03-07 23:20     ` [Intel-gfx] " Sasha Levin
2020-03-06 13:38 ` [Intel-gfx] [PATCH 03/17] drm/i915: Improve the start alignment of bonded pairs Chris Wilson
2020-03-10  9:59   ` Tvrtko Ursulin
2020-03-06 13:38 ` [Intel-gfx] [PATCH 04/17] drm/i915: Tweak scheduler's kick_submission() Chris Wilson
2020-03-10 10:07   ` Tvrtko Ursulin
2020-03-10 11:00     ` Chris Wilson
2020-03-10 11:47       ` Tvrtko Ursulin
2020-03-06 13:38 ` [Intel-gfx] [PATCH 05/17] drm/i915: Wrap i915_active in a simple kreffed struct Chris Wilson
2020-03-06 14:44   ` Mika Kuoppala
2020-03-06 13:38 ` [Intel-gfx] [PATCH 06/17] drm/i915: Extend i915_request_await_active to use all timelines Chris Wilson
2020-03-10 10:18   ` Tvrtko Ursulin
2020-03-06 13:38 ` [Intel-gfx] [PATCH 07/17] drm/i915/perf: Schedule oa_config after modifying the contexts Chris Wilson
2020-03-06 14:20   ` Lionel Landwerlin
2020-03-10 11:17   ` Chris Wilson
2020-03-10 12:01     ` Lionel Landwerlin
2020-03-06 13:38 ` [Intel-gfx] [PATCH 08/17] drm/i915/selftests: Add request throughput measurement to perf Chris Wilson
2020-03-10 10:38   ` Tvrtko Ursulin
2020-03-10 11:09     ` Chris Wilson
2020-03-10 11:58       ` Tvrtko Ursulin
2020-03-10 12:06         ` Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 09/17] dma-buf: Prettify typecasts for dma-fence-chain Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 10/17] dma-buf: Report signaled links inside dma-fence-chain Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 11/17] dma-buf: Exercise dma-fence-chain under selftests Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 12/17] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 13/17] drm/syncobj: Allow use of dma-fence-proxy Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 14/17] drm/i915/gem: Teach execbuf how to wait on future syncobj Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 15/17] drm/i915/gem: Allow combining submit-fences with syncobj Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 16/17] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
2020-03-06 13:38 ` [Intel-gfx] [PATCH 17/17] drm/i915/gt: Yield the timeslice if caught waiting on a user semaphore Chris Wilson
2020-03-06 14:35 ` [Intel-gfx] [PATCH 01/17] drm/i915/selftests: Apply a heavy handed flush to i915_active Mika Kuoppala
2020-03-06 21:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/17] " Patchwork
2020-03-06 21:33 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2020-03-06 21:59 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.