All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 32/39] drm/i915: Drop the deferred active reference
Date: Wed, 13 Mar 2019 14:43:54 +0000	[thread overview]
Message-ID: <20190313144401.17735-32-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20190313144401.17735-1-chris@chris-wilson.co.uk>

An old optimisation to reduce the number of atomics per batch sadly
relies on struct_mutex for coordination. In order to remove struct_mutex
from serialising object/context closing, always taking and releasing an
active reference on first use / last use greatly simplifies the locking.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c    | 13 +---------
 drivers/gpu/drm/i915/gem/i915_gem_object.h    | 24 +------------------
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  8 -------
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  3 +--
 .../i915/gem/selftests/i915_gem_coherency.c   |  4 ++--
 .../drm/i915/gem/selftests/i915_gem_context.c | 11 +++++----
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
 drivers/gpu/drm/i915/i915_gem_batch_pool.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c  |  2 +-
 drivers/gpu/drm/i915/i915_vma.c               | 15 +++++-------
 drivers/gpu/drm/i915/intel_engine_cs.c        |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c       |  3 +--
 drivers/gpu/drm/i915/selftests/i915_request.c |  8 -------
 drivers/gpu/drm/i915/selftests/igt_spinner.c  |  9 +------
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  9 +------
 .../drm/i915/selftests/intel_workarounds.c    |  3 ---
 18 files changed, 26 insertions(+), 96 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index f3c55c99eb1e..af96b57f9d1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -129,7 +129,7 @@ static void lut_close(struct i915_gem_context *ctx)
 		radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
 
 		vma->open_count--;
-		__i915_gem_object_release_unless_active(vma->obj);
+		i915_vma_put(vma);
 	}
 	rcu_read_unlock();
 }
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index ebed1898a6cc..d21a7095092c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -156,7 +156,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 		list_del(&lut->ctx_link);
 
 		i915_lut_handle_free(lut);
-		__i915_gem_object_release_unless_active(obj);
+		i915_gem_object_put(obj);
 	}
 
 	mutex_unlock(&i915->drm.struct_mutex);
@@ -348,17 +348,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 }
 
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
-{
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-	if (!i915_gem_object_has_active_reference(obj) &&
-	    i915_gem_object_is_active(obj))
-		i915_gem_object_set_active_reference(obj);
-	else
-		i915_gem_object_put(obj);
-}
-
 static inline enum fb_op_origin
 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index d229a8d675d1..afc665359e58 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -161,31 +161,9 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 static inline bool
 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
 {
-	return obj->active_count;
+	return READ_ONCE(obj->active_count);
 }
 
-static inline bool
-i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
-{
-	return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
-{
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-	__set_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-static inline void
-i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
-{
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-	__clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
-}
-
-void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
-
 static inline bool
 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index da6a33e2395f..9f61220795a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -120,14 +120,6 @@ struct drm_i915_gem_object {
 	struct list_head batch_pool_link;
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
-	unsigned long flags;
-
-	/**
-	 * Have we taken a reference for the object for incomplete GPU
-	 * activity?
-	 */
-#define I915_BO_ACTIVE_REF 0
-
 	/*
 	 * Is the object to be mapped as read-only to the GPU
 	 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 189fdb5aece8..295e2dc30840 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -979,8 +979,6 @@ static int gpu_write(struct i915_vma *vma,
 	if (err)
 		goto err_request;
 
-	i915_gem_object_set_active_reference(batch->obj);
-
 	i915_vma_lock(vma);
 	err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
 	if (err == 0)
@@ -999,6 +997,7 @@ static int gpu_write(struct i915_vma *vma,
 err_batch:
 	i915_vma_unpin(batch);
 	i915_vma_close(batch);
+	i915_vma_put(batch);
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index aebc79500ca1..ec93febcf0fb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -365,7 +365,7 @@ static int igt_gem_coherency(void *arg)
 						}
 					}
 
-					__i915_gem_object_release_unless_active(obj);
+					i915_gem_object_put(obj);
 				}
 			}
 		}
@@ -377,7 +377,7 @@ static int igt_gem_coherency(void *arg)
 	return err;
 
 put_object:
-	__i915_gem_object_release_unless_active(obj);
+	i915_gem_object_put(obj);
 	goto unlock;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 9be75739c9fa..9448d1b434a6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -314,14 +314,14 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 	if (err)
 		goto skip_request;
 
-	i915_gem_object_set_active_reference(batch->obj);
+	i915_request_add(rq);
+
 	i915_vma_unpin(batch);
 	i915_vma_close(batch);
+	i915_vma_put(batch);
 
 	i915_vma_unpin(vma);
 
-	i915_request_add(rq);
-
 	return 0;
 
 skip_request:
@@ -803,9 +803,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 	if (err)
 		goto skip_request;
 
-	i915_gem_object_set_active_reference(batch->obj);
 	i915_vma_unpin(batch);
 	i915_vma_close(batch);
+	i915_vma_put(batch);
 
 	i915_vma_unpin(vma);
 
@@ -821,6 +821,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 	i915_request_add(rq);
 err_batch:
 	i915_vma_unpin(batch);
+	i915_vma_put(batch);
 err_vma:
 	i915_vma_unpin(vma);
 
@@ -1368,9 +1369,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
 	if (err)
 		goto skip_request;
 
-	i915_gem_object_set_active_reference(obj);
 	i915_vma_unpin(vma);
 	i915_vma_close(vma);
+	i915_vma_put(vma);
 
 	i915_request_add(rq);
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 062c6bdea3d8..50d26069889e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -353,8 +353,8 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
 
 	i915_request_add(rq);
 
-	__i915_gem_object_release_unless_active(obj);
 	i915_vma_unpin(vma);
+	i915_gem_object_put(obj); /* leave it only alive via its active ref */
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6b9b8b46f17..9cef54e699cf 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -608,7 +608,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 				i915_vma_unpin(bb->vma);
 				i915_vma_close(bb->vma);
 			}
-			__i915_gem_object_release_unless_active(bb->obj);
+			i915_gem_object_put(bb->obj);
 		}
 		list_del(&bb->list);
 		kfree(bb);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index f3890b664e3f..56adfdcaed3e 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -55,7 +55,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 		list_for_each_entry_safe(obj, next,
 					 &pool->cache_list[n],
 					 batch_pool_link)
-			__i915_gem_object_release_unless_active(obj);
+			i915_gem_object_put(obj);
 
 		INIT_LIST_HEAD(&pool->cache_list[n]);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a608ddf74b5c..80a9a134ee60 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -230,6 +230,6 @@ int i915_gem_render_state_emit(struct i915_request *rq)
 err_vma:
 	i915_vma_close(so.vma);
 err_obj:
-	__i915_gem_object_release_unless_active(so.obj);
+	i915_gem_object_put(so.obj);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 51a8d2b0d7b3..741ae66eeff8 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -114,10 +114,7 @@ static void __i915_vma_retire(struct i915_active *ref)
 	 */
 	obj_bump_mru(obj);
 
-	if (i915_gem_object_has_active_reference(obj)) {
-		i915_gem_object_clear_active_reference(obj);
-		i915_gem_object_put(obj);
-	}
+	i915_gem_object_put(obj); /* and drop the active reference */
 }
 
 static struct i915_vma *
@@ -442,7 +439,7 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
 	if (flags & I915_VMA_RELEASE_MAP)
 		i915_gem_object_unpin_map(obj);
 
-	__i915_gem_object_release_unless_active(obj);
+	i915_gem_object_put(obj);
 }
 
 bool i915_vma_misplaced(const struct i915_vma *vma,
@@ -939,12 +936,12 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (!vma->active.count)
-		obj->active_count++;
+	if (!vma->active.count && !obj->active_count++)
+		i915_gem_object_get(obj); /* once more for the active ref */
 
 	if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
-		if (!vma->active.count)
-			obj->active_count--;
+		if (!vma->active.count && !--obj->active_count)
+			i915_gem_object_put(obj);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 381b52a26fe3..4eb0e879b76a 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -481,7 +481,7 @@ static void cleanup_status_page(struct intel_engine_cs *engine)
 		i915_vma_unpin(vma);
 
 	i915_gem_object_unpin_map(vma->obj);
-	__i915_gem_object_release_unless_active(vma->obj);
+	i915_gem_object_put(vma->obj);
 }
 
 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 07212e955304..b51824baa069 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1340,10 +1340,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 void intel_ring_free(struct kref *ref)
 {
 	struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
-	struct drm_i915_gem_object *obj = ring->vma->obj;
 
 	i915_vma_close(ring->vma);
-	__i915_gem_object_release_unless_active(obj);
+	i915_vma_put(ring->vma);
 
 	i915_timeline_put(ring->timeline);
 	kfree(ring);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 1162d37bc106..da19543f7c4e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -877,11 +877,6 @@ static int live_all_engines(void *arg)
 		GEM_BUG_ON(err);
 		request[id]->batch = batch;
 
-		if (!i915_gem_object_has_active_reference(batch->obj)) {
-			i915_gem_object_get(batch->obj);
-			i915_gem_object_set_active_reference(batch->obj);
-		}
-
 		i915_vma_lock(batch);
 		err = i915_vma_move_to_active(batch, request[id], 0);
 		i915_vma_unlock(batch);
@@ -1004,9 +999,6 @@ static int live_sequential_engines(void *arg)
 		i915_vma_unlock(batch);
 		GEM_BUG_ON(err);
 
-		i915_gem_object_set_active_reference(batch->obj);
-		i915_vma_get(batch);
-
 		i915_request_get(request[id]);
 		i915_request_add(request[id]);
 
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index d822072fe8df..b9e6ce180271 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -77,15 +77,8 @@ static int move_to_active(struct i915_vma *vma,
 	i915_vma_lock(vma);
 	err = i915_vma_move_to_active(vma, rq, flags);
 	i915_vma_unlock(vma);
-	if (err)
-		return err;
-
-	if (!i915_gem_object_has_active_reference(vma->obj)) {
-		i915_gem_object_get(vma->obj);
-		i915_gem_object_set_active_reference(vma->obj);
-	}
 
-	return 0;
+	return err;
 }
 
 struct i915_request *
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index df1cb01ffb9c..7c25c056579c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -117,15 +117,8 @@ static int move_to_active(struct i915_vma *vma,
 	i915_vma_lock(vma);
 	err = i915_vma_move_to_active(vma, rq, flags);
 	i915_vma_unlock(vma);
-	if (err)
-		return err;
-
-	if (!i915_gem_object_has_active_reference(vma->obj)) {
-		i915_gem_object_get(vma->obj);
-		i915_gem_object_set_active_reference(vma->obj);
-	}
 
-	return 0;
+	return err;
 }
 
 static struct i915_request *
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index c068d64543d9..783f0244ac49 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -136,9 +136,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 	}
 	intel_ring_advance(rq, cs);
 
-	i915_gem_object_get(result);
-	i915_gem_object_set_active_reference(result);
-
 	i915_request_add(rq);
 	i915_vma_unpin(vma);
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-03-13 14:45 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-13 14:43 [PATCH 01/39] drm/i915: Hold a ref to the ring while retiring Chris Wilson
2019-03-13 14:43 ` [PATCH 02/39] drm/i915: Lock the gem_context->active_list while dropping the link Chris Wilson
2019-03-13 14:43 ` [PATCH 03/39] drm/i915: Hold a reference to the active HW context Chris Wilson
2019-03-13 14:43 ` [PATCH 04/39] drm/i915: Stop needlessly acquiring wakeref for debugfs/drop_caches_set Chris Wilson
2019-03-13 14:43 ` [PATCH 05/39] drm/i915/selftests: Provide stub reset functions Chris Wilson
2019-03-13 14:43 ` [PATCH 06/39] drm/i915: Switch to use HWS indices rather than addresses Chris Wilson
2019-03-13 14:43 ` [PATCH 07/39] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2019-03-14 14:52   ` Tvrtko Ursulin
2019-03-14 14:59     ` Chris Wilson
2019-03-14 16:10       ` Tvrtko Ursulin
2019-03-13 14:43 ` [PATCH 08/39] drm/i915: Create/destroy VM (ppGTT) for use with contexts Chris Wilson
2019-03-13 20:11   ` Rodrigo Vivi
2019-03-13 20:47     ` Chris Wilson
2019-03-13 21:15       ` Rodrigo Vivi
2019-03-14 16:07   ` Tvrtko Ursulin
2019-03-14 16:46     ` Chris Wilson
2019-03-13 14:43 ` [PATCH 09/39] drm/i915: Extend CONTEXT_CREATE to set parameters upon construction Chris Wilson
2019-03-13 14:43 ` [PATCH 10/39] drm/i915: Allow contexts to share a single timeline across all engines Chris Wilson
2019-03-14 16:09   ` Tvrtko Ursulin
2019-03-13 14:43 ` [PATCH 11/39] drm/i915: Allow userspace to clone contexts on creation Chris Wilson
2019-03-14 16:18   ` Tvrtko Ursulin
2019-03-14 16:54     ` Chris Wilson
2019-03-14 17:49       ` Tvrtko Ursulin
2019-03-14 17:55         ` Chris Wilson
2019-03-14 16:56     ` Chris Wilson
2019-03-13 14:43 ` [PATCH 12/39] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-03-14 16:47   ` Tvrtko Ursulin
2019-03-14 17:15     ` Chris Wilson
2019-03-14 17:58       ` Tvrtko Ursulin
2019-03-14 18:09         ` Chris Wilson
2019-03-13 14:43 ` [PATCH 13/39] drm/i915: Extend I915_CONTEXT_PARAM_SSEU to support local ctx->engine[] Chris Wilson
2019-03-14 16:49   ` Tvrtko Ursulin
2019-03-14 17:04     ` Chris Wilson
2019-03-14 17:19       ` Tvrtko Ursulin
2019-03-13 14:43 ` [PATCH 14/39] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-03-13 14:43 ` [PATCH 15/39] drm/i915: Extend execution fence to support a callback Chris Wilson
2019-03-14 16:50   ` Tvrtko Ursulin
2019-03-13 14:43 ` [PATCH 16/39] drm/i915/execlists: Virtual engine bonding Chris Wilson
2019-03-14 17:26   ` Tvrtko Ursulin
2019-03-15  9:45     ` Chris Wilson
2019-03-13 14:43 ` [PATCH 17/39] drm/i915: Allow specification of parallel execbuf Chris Wilson
2019-03-14 17:27   ` Tvrtko Ursulin
2019-03-13 14:43 ` [PATCH 18/39] drm/i915/execlists: Skip direct submission if only lite-restore Chris Wilson
2019-03-13 14:43 ` [PATCH 19/39] drm/i915: Split GEM object type definition to its own header Chris Wilson
2019-03-13 14:43 ` [PATCH 20/39] drm/i915: Pull GEM ioctls interface to its own file Chris Wilson
2019-03-13 14:43 ` [PATCH 21/39] drm/i915: Move object->pages API to i915_gem_object.[ch] Chris Wilson
2019-03-13 14:43 ` [PATCH 22/39] drm/i915: Move shmem object setup to its own file Chris Wilson
2019-03-13 14:43 ` [PATCH 23/39] drm/i915: Move phys objects " Chris Wilson
2019-03-13 14:43 ` [PATCH 24/39] drm/i915: Move mmap and friends " Chris Wilson
2019-03-13 14:43 ` [PATCH 25/39] drm/i915: Move GEM domain management " Chris Wilson
2019-03-13 14:43 ` [PATCH 26/39] drm/i915: Move more GEM objects under gem/ Chris Wilson
2019-03-13 14:43 ` [PATCH 27/39] drm/i915: Pull scatterlist utils out of i915_gem.h Chris Wilson
2019-03-13 14:43 ` [PATCH 28/39] drm/i915: Move GEM object domain management from struct_mutex to local Chris Wilson
2019-03-13 14:43 ` [PATCH 29/39] drm/i915: Move GEM object waiting to its own file Chris Wilson
2019-03-13 14:43 ` [PATCH 30/39] drm/i915: Move GEM object busy checking " Chris Wilson
2019-03-13 14:43 ` [PATCH 31/39] drm/i915: Move GEM client throttling " Chris Wilson
2019-03-13 14:43 ` Chris Wilson [this message]
2019-03-13 14:43 ` [PATCH 33/39] drm/i915: Move object close under its own lock Chris Wilson
2019-03-13 14:43 ` [PATCH 34/39] drm/i915: Rename intel_context.active to .inflight Chris Wilson
2019-03-13 14:43 ` [PATCH 35/39] drm/i915: Keep contexts pinned until after the next kernel context switch Chris Wilson
2019-03-13 14:46   ` Chris Wilson
2019-03-13 14:43 ` [PATCH 36/39] drm/i915: Stop retiring along engine Chris Wilson
2019-03-13 14:43 ` [PATCH 37/39] drm/i915: Replace engine->timeline with a plain list Chris Wilson
2019-03-13 14:44 ` [PATCH 38/39] drm/i915/execlists: Preempt-to-busy Chris Wilson
2019-03-13 14:44 ` [PATCH 39/39] drm/i915: Remove logical HW ID Chris Wilson
2019-03-13 23:55 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/39] drm/i915: Hold a ref to the ring while retiring Patchwork
2019-03-14  0:12 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-03-14  0:22 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-03-14 18:26 ` ✗ Fi.CI.BAT: failure for series starting with [01/39] drm/i915: Hold a ref to the ring while retiring (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190313144401.17735-32-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.