All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 31/38] drm/i915: Allow contexts to share a single timeline across all engines
Date: Fri, 18 Jan 2019 14:01:02 +0000	[thread overview]
Message-ID: <20190118140109.25261-32-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20190118140109.25261-1-chris@chris-wilson.co.uk>

Previously, our view has been always to run the engines independently
within a context. (Multiple engines happened before we had contexts and
timelines, so they always operated independently and that behaviour
persisted into contexts.) However, at the user level the context often
represents a single timeline (e.g. GL contexts) and userspace must
ensure that the individual engines are serialised to present that
ordering to the client (or forgot about this detail entirely and hope no
one notices - a fair ploy if the client can only directly control one
engine themselves ;)

In the next patch, we will want to construct a set of engines that
operate as one, that have a single timeline interwoven between them, to
present a single virtual engine to the user. (They submit to the virtual
engine, then we decide which engine to execute on based.)

To that end, we want to be able to create contexts which have a single
timeline (fence context) shared between all engines, rather than multiple
timelines.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c       | 33 +++++++++++++---
 drivers/gpu/drm/i915/i915_gem_context.h       |  3 ++
 drivers/gpu/drm/i915/i915_request.c           | 10 ++++-
 drivers/gpu/drm/i915/i915_request.h           |  5 ++-
 drivers/gpu/drm/i915/i915_sw_fence.c          | 39 ++++++++++++++++---
 drivers/gpu/drm/i915/i915_sw_fence.h          | 13 ++++++-
 drivers/gpu/drm/i915/intel_lrc.c              |  5 ++-
 .../gpu/drm/i915/selftests/i915_gem_context.c | 17 ++++----
 drivers/gpu/drm/i915/selftests/mock_context.c |  2 +-
 include/uapi/drm/i915_drm.h                   |  1 +
 10 files changed, 103 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index ec5e3e1c6402..e28be242399d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -225,6 +225,9 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 			ce->ops->destroy(ce);
 	}
 
+	if (ctx->timeline)
+		i915_timeline_put(ctx->timeline);
+
 	kfree(ctx->name);
 	put_pid(ctx->pid);
 
@@ -425,12 +428,17 @@ static void __set_ppgtt(struct i915_gem_context *ctx,
 
 static struct i915_gem_context *
 i915_gem_create_context(struct drm_i915_private *dev_priv,
-			struct drm_i915_file_private *file_priv)
+			struct drm_i915_file_private *file_priv,
+			unsigned int flags)
 {
 	struct i915_gem_context *ctx;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
+	if (flags & I915_GEM_CONTEXT_SINGLE_TIMELINE &&
+	    !HAS_EXECLISTS(dev_priv))
+		return ERR_PTR(-EINVAL);
+
 	/* Reap the most stale context */
 	contexts_free_first(dev_priv);
 
@@ -453,6 +461,18 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
 		i915_ppgtt_put(ppgtt);
 	}
 
+	if (flags & I915_GEM_CONTEXT_SINGLE_TIMELINE) {
+		struct i915_timeline *timeline;
+
+		timeline = i915_timeline_create(dev_priv, ctx->name, NULL);
+		if (IS_ERR(timeline)) {
+			__destroy_hw_context(ctx, file_priv);
+			return ERR_CAST(timeline);
+		}
+
+		ctx->timeline = timeline;
+	}
+
 	trace_i915_context_create(ctx);
 
 	return ctx;
@@ -481,7 +501,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
 	if (ret)
 		return ERR_PTR(ret);
 
-	ctx = i915_gem_create_context(to_i915(dev), NULL);
+	ctx = i915_gem_create_context(to_i915(dev), NULL, 0);
 	if (IS_ERR(ctx))
 		goto out;
 
@@ -517,7 +537,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	struct i915_gem_context *ctx;
 	int err;
 
-	ctx = i915_gem_create_context(i915, NULL);
+	ctx = i915_gem_create_context(i915, NULL, 0);
 	if (IS_ERR(ctx))
 		return ctx;
 
@@ -638,7 +658,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	idr_init_base(&file_priv->vm_idr, 1);
 
 	mutex_lock(&i915->drm.struct_mutex);
-	ctx = i915_gem_create_context(i915, file_priv);
+	ctx = i915_gem_create_context(i915, file_priv, 0);
 	mutex_unlock(&i915->drm.struct_mutex);
 	if (IS_ERR(ctx)) {
 		idr_destroy(&file_priv->context_idr);
@@ -992,7 +1012,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
 		return -ENODEV;
 
-	if (args->flags)
+	if (args->flags &
+	    ~(I915_GEM_CONTEXT_SINGLE_TIMELINE))
 		return -EINVAL;
 
 	if (client_is_banned(file_priv)) {
@@ -1007,7 +1028,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		return ret;
 
-	ctx = i915_gem_create_context(dev_priv, file_priv);
+	ctx = i915_gem_create_context(dev_priv, file_priv, args->flags);
 	mutex_unlock(&dev->struct_mutex);
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 9786f86b659d..b3a840747330 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -41,6 +41,7 @@ struct drm_i915_private;
 struct drm_i915_file_private;
 struct i915_hw_ppgtt;
 struct i915_request;
+struct i915_timeline;
 struct i915_vma;
 struct intel_ring;
 
@@ -66,6 +67,8 @@ struct i915_gem_context {
 	/** file_priv: owning file descriptor */
 	struct drm_i915_file_private *file_priv;
 
+	struct i915_timeline *timeline;
+
 	/**
 	 * @ppgtt: unique address space (GTT)
 	 *
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7bccf578cd65..ca432d3d8211 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -860,8 +860,14 @@ void i915_request_add(struct i915_request *request)
 	prev = i915_gem_active_raw(&timeline->last_request,
 				   &request->i915->drm.struct_mutex);
 	if (prev && !i915_request_completed(prev)) {
-		i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
-					     &request->submitq);
+		if (prev->engine == engine)
+			i915_sw_fence_await_sw_fence(&request->submit,
+						     &prev->submit,
+						     &request->submitq);
+		else
+			__i915_sw_fence_await_dma_fence(&request->submit,
+							&prev->fence,
+							&request->dmaq);
 		if (engine->schedule)
 			__i915_sched_node_add_dependency(&request->sched,
 							 &prev->sched,
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 679b4663f774..f715384ff485 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -108,7 +108,10 @@ struct i915_request {
 	 * It is used by the driver to then queue the request for execution.
 	 */
 	struct i915_sw_fence submit;
-	wait_queue_entry_t submitq;
+	union {
+		wait_queue_entry_t submitq;
+		struct i915_sw_dma_fence_cb dmaq;
+	};
 
 	/*
 	 * A list of everyone we wait upon, and everyone who waits upon us.
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7c58b049ecb5..7bb64437dbbe 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
 	return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
 }
 
-struct i915_sw_dma_fence_cb {
-	struct dma_fence_cb base;
-	struct i915_sw_fence *fence;
-};
-
 struct i915_sw_dma_fence_cb_timer {
 	struct i915_sw_dma_fence_cb base;
 	struct dma_fence *dma;
@@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 	return ret;
 }
 
+static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
+				     struct dma_fence_cb *data)
+{
+	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+	i915_sw_fence_complete(cb->fence);
+}
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				    struct dma_fence *dma,
+				    struct i915_sw_dma_fence_cb *cb)
+{
+	int ret;
+
+	debug_fence_assert(fence);
+
+	if (dma_fence_is_signaled(dma))
+		return 0;
+
+	cb->fence = fence;
+	i915_sw_fence_await(fence);
+
+	ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
+	if (ret == 0) {
+		ret = 1;
+	} else {
+		i915_sw_fence_complete(fence);
+		if (ret == -ENOENT) /* fence already signaled */
+			ret = 0;
+	}
+
+	return ret;
+}
+
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
 				    const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0e055ea0179f..b420ceadb813 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -9,14 +9,13 @@
 #ifndef _I915_SW_FENCE_H_
 #define _I915_SW_FENCE_H_
 
+#include <linux/dma-fence.h>
 #include <linux/gfp.h>
 #include <linux/kref.h>
 #include <linux/notifier.h> /* for NOTIFY_DONE */
 #include <linux/wait.h>
 
 struct completion;
-struct dma_fence;
-struct dma_fence_ops;
 struct reservation_object;
 
 struct i915_sw_fence {
@@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
 				     struct i915_sw_fence *after,
 				     gfp_t gfp);
+
+struct i915_sw_dma_fence_cb {
+	struct dma_fence_cb base;
+	struct i915_sw_fence *fence;
+};
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+				    struct dma_fence *dma,
+				    struct i915_sw_dma_fence_cb *cb);
 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
 				  struct dma_fence *dma,
 				  unsigned long timeout,
 				  gfp_t gfp);
+
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
 				    struct reservation_object *resv,
 				    const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 323341e9bf2d..10c42820bb46 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2644,7 +2644,10 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		goto error_deref_obj;
 	}
 
-	timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+	if (ctx->timeline)
+		timeline = i915_timeline_get(ctx->timeline);
+	else
+		timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
 	if (IS_ERR(timeline)) {
 		ret = PTR_ERR(timeline);
 		goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 2864cfb82325..3e68c2888b9c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -143,7 +143,7 @@ static int live_nop_switch(void *arg)
 	}
 
 	for (n = 0; n < nctx; n++) {
-		ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+		ctx[n] = i915_gem_create_context(i915, file->driver_priv, 0);
 		if (IS_ERR(ctx[n])) {
 			err = PTR_ERR(ctx[n]);
 			goto out_unlock;
@@ -601,7 +601,8 @@ static int igt_ctx_exec(void *arg)
 			struct i915_gem_context *ctx;
 			intel_wakeref_t wakeref;
 
-			ctx = i915_gem_create_context(i915, file->driver_priv);
+			ctx = i915_gem_create_context(i915,
+						      file->driver_priv, 0);
 			if (IS_ERR(ctx)) {
 				err = PTR_ERR(ctx);
 				goto out_unlock;
@@ -698,7 +699,8 @@ static int igt_shared_ctx_exec(void *arg)
 		if (err)
 			goto out_unlock;
 
-		parent = i915_gem_create_context(i915, file->driver_priv);
+		parent = i915_gem_create_context(i915,
+						 file->driver_priv, 0);
 		if (IS_ERR(parent)) {
 			err = PTR_ERR(parent);
 			if (err == -ENODEV) /* no logical ctx support */
@@ -720,7 +722,8 @@ static int igt_shared_ctx_exec(void *arg)
 			if (ctx)
 				__destroy_hw_context(ctx, file->driver_priv);
 
-			ctx = i915_gem_create_context(i915, file->driver_priv);
+			ctx = i915_gem_create_context(i915,
+						      file->driver_priv, 0);
 			if (IS_ERR(ctx)) {
 				err = PTR_ERR(ctx);
 				goto out_unlock;
@@ -813,7 +816,7 @@ static int igt_ctx_readonly(void *arg)
 	if (err)
 		goto out_unlock;
 
-	ctx = i915_gem_create_context(i915, file->driver_priv);
+	ctx = i915_gem_create_context(i915, file->driver_priv, 0);
 	if (IS_ERR(ctx)) {
 		err = PTR_ERR(ctx);
 		goto out_unlock;
@@ -1139,13 +1142,13 @@ static int igt_vm_isolation(void *arg)
 	if (err)
 		goto out_unlock;
 
-	ctx_a = i915_gem_create_context(i915, file->driver_priv);
+	ctx_a = i915_gem_create_context(i915, file->driver_priv, 0);
 	if (IS_ERR(ctx_a)) {
 		err = PTR_ERR(ctx_a);
 		goto out_unlock;
 	}
 
-	ctx_b = i915_gem_create_context(i915, file->driver_priv);
+	ctx_b = i915_gem_create_context(i915, file->driver_priv, 0);
 	if (IS_ERR(ctx_b)) {
 		err = PTR_ERR(ctx_b);
 		goto out_unlock;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b9fd2a4b95e9..f13f9c726034 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -99,7 +99,7 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	return i915_gem_create_context(i915, file->driver_priv);
+	return i915_gem_create_context(i915, file->driver_priv, 0);
 }
 
 struct i915_gem_context *
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 704e9d2fe2d6..72749dc9801e 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1444,6 +1444,7 @@ struct drm_i915_gem_context_create {
 struct drm_i915_gem_context_create_ext {
 	__u32 ctx_id; /* output: id of new context*/
 	__u32 flags;
+#define I915_GEM_CONTEXT_SINGLE_TIMELINE	0x1
 	__u64 extensions;
 };
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-01-18 14:01 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-18 14:00 Keeping Tvrtko busy Chris Wilson
2019-01-18 14:00 ` [PATCH 01/38] drm/i915/execlists: Store the highest priority context Chris Wilson
2019-01-18 14:00 ` [PATCH 02/38] drm/i915: Make all GPU resets atomic Chris Wilson
2019-01-18 14:22   ` Mika Kuoppala
2019-01-18 14:00 ` [PATCH 03/38] drm/i915/guc: Disable global reset Chris Wilson
2019-01-18 14:00 ` [PATCH 04/38] drm/i915: Remove GPU reset dependence on struct_mutex Chris Wilson
2019-01-18 14:00 ` [PATCH 05/38] drm/i915/selftests: Trim struct_mutex duration for set-wedged selftest Chris Wilson
2019-01-18 14:29   ` Mika Kuoppala
2019-01-18 14:00 ` [PATCH 06/38] drm/i915: Issue engine resets onto idle engines Chris Wilson
2019-01-18 14:00 ` [PATCH 07/38] drm/i915: Stop tracking MRU activity on VMA Chris Wilson
2019-01-18 16:03   ` Tvrtko Ursulin
2019-01-18 16:06     ` Chris Wilson
2019-01-22 14:19     ` Chris Wilson
2019-01-25 10:46       ` Tvrtko Ursulin
2019-01-25 13:38         ` Chris Wilson
2019-01-25 13:46           ` Chris Wilson
2019-01-25 14:08             ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 08/38] drm/i915: Pull VM lists under the VM mutex Chris Wilson
2019-01-18 16:04   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 09/38] drm/i915: Move vma lookup to its own lock Chris Wilson
2019-01-18 16:14   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 10/38] drm/i915/selftests: Allocate mock ring/timeline per context Chris Wilson
2019-01-18 16:26   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 11/38] drm/i915: Always allocate an object/vma for the HWSP Chris Wilson
2019-01-18 14:00 ` [PATCH 12/38] drm/i915: Move list of timelines under its own lock Chris Wilson
2019-01-18 16:28   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 13/38] drm/i915: Introduce concept of per-timeline (context) HWSP Chris Wilson
2019-01-18 14:00 ` [PATCH 14/38] drm/i915: Enlarge vma->pin_count Chris Wilson
2019-01-18 14:00 ` [PATCH 15/38] drm/i915: Allocate a status page for each timeline Chris Wilson
2019-01-21 11:18   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 16/38] drm/i915: Share per-timeline HWSP using a slab suballocator Chris Wilson
2019-01-18 14:00 ` [PATCH 17/38] drm/i915: Keep all partially allocated HWSP on a freelist Chris Wilson
2019-01-18 14:00 ` [PATCH 18/38] drm/i915: Track the context's seqno in its own timeline HWSP Chris Wilson
2019-01-18 14:00 ` [PATCH 19/38] drm/i915: Identify active requests Chris Wilson
2019-01-18 14:00 ` [PATCH 20/38] drm/i915: Remove the intel_engine_notify tracepoint Chris Wilson
2019-01-18 14:00 ` [PATCH 21/38] drm/i915: Replace global breadcrumbs with per-context interrupt tracking Chris Wilson
2019-01-18 14:00 ` [PATCH 22/38] drm/i915: Drop fake breadcrumb irq Chris Wilson
2019-01-18 14:00 ` [PATCH 23/38] drm/i915: Replace global_seqno with a hangcheck heartbeat seqno Chris Wilson
2019-01-18 14:00 ` [PATCH 24/38] drm/i915: Avoid presumption of execution ordering for kernel context switching Chris Wilson
2019-01-18 14:00 ` [PATCH 25/38] drm/i915/pmu: Always sample an active ringbuffer Chris Wilson
2019-01-22  9:20   ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 26/38] drm/i915: Remove the global per-engine execution timeline Chris Wilson
2019-01-18 14:00 ` [PATCH 27/38] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2019-01-22  9:31   ` Tvrtko Ursulin
2019-01-22 10:47     ` Chris Wilson
2019-01-22 11:05       ` Tvrtko Ursulin
2019-01-18 14:00 ` [PATCH 28/38] drm/i915: Create/destroy VM (ppGTT) for use with contexts Chris Wilson
2019-01-23 11:30   ` Tvrtko Ursulin
2019-01-23 11:51     ` Chris Wilson
2019-01-23 12:03       ` Tvrtko Ursulin
2019-01-24 15:58     ` [PATCH v3] " Chris Wilson
2019-01-18 14:01 ` [PATCH 29/38] drm/i915: Expose user control over the ppGTT associated with a context Chris Wilson
2019-01-23 12:00   ` Tvrtko Ursulin
2019-01-23 12:15     ` Chris Wilson
2019-01-18 14:01 ` [PATCH 30/38] drm/i915: Extend CONTEXT_CREATE to set parameters upon construction Chris Wilson
2019-01-18 14:01 ` Chris Wilson [this message]
2019-01-24 17:35   ` [PATCH 31/38] drm/i915: Allow contexts to share a single timeline across all engines Tvrtko Ursulin
2019-01-18 14:01 ` [PATCH 32/38] drm/i915: Fix I915_EXEC_RING_MASK Chris Wilson
2019-01-18 14:01 ` [PATCH 33/38] drm/i915: Remove last traces of exec-id (GEM_BUSY) Chris Wilson
2019-01-18 14:01 ` [PATCH 34/38] drm/i915: Re-arrange execbuf so context is known before engine Chris Wilson
2019-01-18 14:01 ` [PATCH 35/38] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-01-18 14:01 ` [PATCH 36/38] drm/i915/execlists: Refactor out can_merge_rq() Chris Wilson
2019-01-18 14:01 ` [PATCH 37/38] drm/i915: Store the BIT(engine->id) as the engine's mask Chris Wilson
2019-01-18 14:01 ` [PATCH 38/38] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-01-18 14:17 ` ✗ Fi.CI.BAT: failure for series starting with [01/38] drm/i915/execlists: Store the highest priority context Patchwork
2019-01-24 16:28 ` ✗ Fi.CI.BAT: failure for series starting with [01/38] drm/i915/execlists: Store the highest priority context (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190118140109.25261-32-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.