All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 16/28] drm/i915: Unpeel awaits on a proxy fence
Date: Sun,  7 Jun 2020 23:20:56 +0100	[thread overview]
Message-ID: <20200607222108.14401-16-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200607222108.14401-1-chris@chris-wilson.co.uk>

If the real target for a proxy fence is known at the time we are
attaching our awaits, use the real target in preference to hooking up to
the proxy. If use the real target instead, we can optimize the awaits,
e.g. if it along the same engine, we can order the submission and avoid
the wait-for-completion.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_request.c   | 155 ++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.c |  41 +++++++
 drivers/gpu/drm/i915/i915_scheduler.h |   3 +
 3 files changed, 199 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 3bb7320249ae..f04f91b4d879 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -24,6 +24,7 @@
 
 #include <linux/dma-fence-array.h>
 #include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-proxy.h>
 #include <linux/irq_work.h>
 #include <linux/prefetch.h>
 #include <linux/sched.h>
@@ -461,6 +462,7 @@ static bool fatal_error(int error)
 	case 0: /* not an error! */
 	case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
 	case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+	case -EDEADLK: /* cyclic fence lockup (await_proxy)  */
 		return false;
 	default:
 		return true;
@@ -1241,6 +1243,136 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
 	return err;
 }
 
+struct await_proxy {
+	struct wait_queue_entry base;
+	struct i915_request *request;
+	struct dma_fence *fence;
+	struct timer_list timer;
+	struct work_struct work;
+	int (*attach)(struct await_proxy *ap);
+	void *data;
+};
+
+static void await_proxy_work(struct work_struct *work)
+{
+	struct await_proxy *ap = container_of(work, typeof(*ap), work);
+	struct i915_request *rq = ap->request;
+
+	del_timer_sync(&ap->timer);
+
+	if (ap->fence) {
+		int err = 0;
+
+		/*
+		 * If the fence is external, we impose a 10s timeout.
+		 * However, if the fence is internal, we skip a timeout in
+		 * the belief that all fences are in-order (DAG, no cycles)
+		 * and we can enforce forward progress by reset the GPU if
+		 * necessary. A future fence, provided userspace, can trivially
+		 * generate a cycle in the dependency graph, and so cause
+		 * that entire cycle to become deadlocked and for no forward
+		 * progress to either be made, and the driver being kept
+		 * eternally awake.
+		 */
+		if (dma_fence_is_i915(ap->fence) &&
+		    !i915_sched_node_verify_dag(&rq->sched,
+						&to_request(ap->fence)->sched))
+			err = -EDEADLK;
+
+		if (!err) {
+			mutex_lock(&rq->context->timeline->mutex);
+			err = ap->attach(ap);
+			mutex_unlock(&rq->context->timeline->mutex);
+		}
+
+		/* Don't flag an error for co-dependent scheduling */
+		if (err == -EDEADLK) {
+			struct i915_sched_node *waiter =
+				&to_request(ap->fence)->sched;
+			struct i915_dependency *p;
+
+			for_each_waiter(p, rq) {
+				if (p->waiter == waiter &&
+				    p->flags & I915_DEPENDENCY_WEAK) {
+					err = 0;
+					break;
+				}
+			}
+		}
+
+		if (err < 0)
+			i915_sw_fence_set_error_once(&rq->submit, err);
+	}
+
+	i915_sw_fence_complete(&rq->submit);
+
+	dma_fence_put(ap->fence);
+	kfree(ap);
+}
+
+static int
+await_proxy_wake(struct wait_queue_entry *entry,
+		 unsigned int mode,
+		 int flags,
+		 void *fence)
+{
+	struct await_proxy *ap = container_of(entry, typeof(*ap), base);
+
+	ap->fence = dma_fence_get(fence);
+	schedule_work(&ap->work);
+
+	return 0;
+}
+
+static void
+await_proxy_timer(struct timer_list *t)
+{
+	struct await_proxy *ap = container_of(t, typeof(*ap), timer);
+
+	if (dma_fence_remove_proxy_listener(ap->base.private, &ap->base)) {
+		struct i915_request *rq = ap->request;
+
+		pr_notice("Asynchronous wait on unset proxy fence by %s:%s:%llx timed out\n",
+			  rq->fence.ops->get_driver_name(&rq->fence),
+			  rq->fence.ops->get_timeline_name(&rq->fence),
+			  rq->fence.seqno);
+		i915_sw_fence_set_error_once(&rq->submit, -ETIMEDOUT);
+
+		schedule_work(&ap->work);
+	}
+}
+
+static int
+__i915_request_await_proxy(struct i915_request *rq,
+			   struct dma_fence *fence,
+			   unsigned long timeout,
+			   int (*attach)(struct await_proxy *ap),
+			   void *data)
+{
+	struct await_proxy *ap;
+
+	ap = kzalloc(sizeof(*ap), I915_FENCE_GFP);
+	if (!ap)
+		return -ENOMEM;
+
+	i915_sw_fence_await(&rq->submit);
+	mark_external(rq);
+
+	ap->base.private = fence;
+	ap->base.func = await_proxy_wake;
+	ap->request = rq;
+	INIT_WORK(&ap->work, await_proxy_work);
+	ap->attach = attach;
+	ap->data = data;
+
+	timer_setup(&ap->timer, await_proxy_timer, 0);
+	if (timeout)
+		mod_timer(&ap->timer, round_jiffies_up(jiffies + timeout));
+
+	dma_fence_add_proxy_listener(fence, &ap->base);
+	return 0;
+}
+
 int
 i915_request_await_execution(struct i915_request *rq,
 			     struct dma_fence *fence,
@@ -1339,6 +1471,24 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 	return 0;
 }
 
+static int await_proxy(struct await_proxy *ap)
+{
+	return i915_request_await_dma_fence(ap->request, ap->fence);
+}
+
+static int
+i915_request_await_proxy(struct i915_request *rq, struct dma_fence *fence)
+{
+	/*
+	 * Wait until we know the real fence so that can optimise the
+	 * inter-fence synchronisation.
+	 */
+	return __i915_request_await_proxy(rq, fence,
+					  i915_fence_context_timeout(rq->engine->i915,
+								     fence->context),
+					  await_proxy, NULL);
+}
+
 int
 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 {
@@ -1346,6 +1496,9 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 	unsigned int nchild = 1;
 	int ret;
 
+	/* Unpeel the proxy fence if the real target is already known */
+	fence = dma_fence_proxy_get_real(fence);
+
 	/*
 	 * Note that if the fence-array was created in signal-on-any mode,
 	 * we should *not* decompose it into its individual fences. However,
@@ -1385,6 +1538,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
 
 		if (dma_fence_is_i915(fence))
 			ret = i915_request_await_request(rq, to_request(fence));
+		else if (dma_fence_is_proxy(fence))
+			ret = i915_request_await_proxy(rq, fence);
 		else
 			ret = i915_request_await_external(rq, fence);
 		if (ret < 0)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index cbb880b10c65..250832768279 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -469,6 +469,47 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
 	return 0;
 }
 
+bool i915_sched_node_verify_dag(struct i915_sched_node *waiter,
+				struct i915_sched_node *signaler)
+{
+	struct i915_dependency *dep, *p;
+	struct i915_dependency stack;
+	bool result = false;
+	LIST_HEAD(dfs);
+
+	if (list_empty(&waiter->waiters_list))
+		return true;
+
+	spin_lock_irq(&schedule_lock);
+
+	stack.signaler = signaler;
+	list_add(&stack.dfs_link, &dfs);
+
+	list_for_each_entry(dep, &dfs, dfs_link) {
+		struct i915_sched_node *node = dep->signaler;
+
+		if (node_signaled(node))
+			continue;
+
+		list_for_each_entry(p, &node->signalers_list, signal_link) {
+			if (p->signaler == waiter)
+				goto out;
+
+			if (list_empty(&p->dfs_link))
+				list_add_tail(&p->dfs_link, &dfs);
+		}
+	}
+
+	result = true;
+out:
+	list_for_each_entry_safe(dep, p, &dfs, dfs_link)
+		INIT_LIST_HEAD(&dep->dfs_link);
+
+	spin_unlock_irq(&schedule_lock);
+
+	return result;
+}
+
 void i915_sched_node_fini(struct i915_sched_node *node)
 {
 	struct i915_dependency *dep, *tmp;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6f0bf00fc569..13432add8929 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -28,6 +28,9 @@
 void i915_sched_node_init(struct i915_sched_node *node);
 void i915_sched_node_reinit(struct i915_sched_node *node);
 
+bool i915_sched_node_verify_dag(struct i915_sched_node *waiter,
+				struct i915_sched_node *signal);
+
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 				      struct i915_sched_node *signal,
 				      struct i915_dependency *dep,
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-06-07 22:21 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-07 22:20 [Intel-gfx] [PATCH 01/28] drm/i915: Adjust the sentinel assert to match implementation Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 02/28] drm/i915/selftests: Make the hanging request non-preemptible Chris Wilson
2020-06-08 20:58   ` Mika Kuoppala
2020-06-07 22:20 ` [Intel-gfx] [PATCH 03/28] drm/i915/selftests: Teach hang-self to target only itself Chris Wilson
2020-06-10 13:21   ` Mika Kuoppala
2020-06-07 22:20 ` [Intel-gfx] [PATCH 04/28] drm/i915/selftests: Remove live_suppress_wait_preempt Chris Wilson
2020-06-11 11:38   ` Tvrtko Ursulin
2020-06-07 22:20 ` [Intel-gfx] [PATCH 05/28] drm/i915/selftests: Trim execlists runtime Chris Wilson
2020-06-12 23:05   ` Andi Shyti
2020-06-07 22:20 ` [Intel-gfx] [PATCH 06/28] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 07/28] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 08/28] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 09/28] drm/i915: Add list_for_each_entry_safe_continue_reverse Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 10/28] drm/i915/gem: Separate reloc validation into an earlier step Chris Wilson
2020-06-09  7:47   ` Tvrtko Ursulin
2020-06-09 10:48     ` Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 11/28] drm/i915/gem: Lift GPU relocation allocation Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 12/28] drm/i915/gem: Build the reloc request first Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 13/28] drm/i915/gem: Add all GPU reloc awaits/signals en masse Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 14/28] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 15/28] drm/i915: Lift waiter/signaler iterators Chris Wilson
2020-06-07 22:20 ` Chris Wilson [this message]
2020-06-07 22:20 ` [Intel-gfx] [PATCH 17/28] drm/i915/gem: Make relocations atomic within execbuf Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 18/28] drm/i915: Strip out internal priorities Chris Wilson
2020-06-07 22:20 ` [Intel-gfx] [PATCH 19/28] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 20/28] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 21/28] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 22/28] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 23/28] drm/i915: Restructure priority inheritance Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 24/28] ipi-dag Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 25/28] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 26/28] drm/i915: Fair low-latency scheduling Chris Wilson
2020-06-16  9:07   ` Thomas Hellström (Intel)
2020-06-16 10:12     ` Chris Wilson
2020-06-16 12:11       ` Thomas Hellström (Intel)
2020-06-16 12:44         ` Chris Wilson
2020-06-16 10:54     ` Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 27/28] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-06-07 22:21 ` [Intel-gfx] [PATCH 28/28] drm/i915: Replace the priority boosting for the display with a deadline Chris Wilson
2020-06-07 22:49 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/28] drm/i915: Adjust the sentinel assert to match implementation Patchwork
2020-06-07 22:51 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-06-07 23:12 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-06-08  0:58 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-06-08  7:44 ` [Intel-gfx] [PATCH 01/28] " Tvrtko Ursulin
2020-06-08  9:33   ` Chris Wilson
2020-06-09  6:59     ` Tvrtko Ursulin
2020-06-09 10:29       ` Chris Wilson
2020-06-09 10:39         ` Tvrtko Ursulin
2020-06-09 10:47           ` Chris Wilson
2020-06-09 11:45             ` Tvrtko Ursulin
2020-06-08 20:43 ` Mika Kuoppala

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200607222108.14401-16-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.