All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 23/66] drm/i915/gem: Include cmdparser in common execbuf pinning
Date: Wed, 15 Jul 2020 12:51:04 +0100	[thread overview]
Message-ID: <20200715115147.11866-23-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200715115147.11866-1-chris@chris-wilson.co.uk>

Pull the cmdparser allocations in to the reservation phase, and then
they are included in the common vma pinning pass.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 360 +++++++++++-------
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  10 +
 drivers/gpu/drm/i915/i915_cmd_parser.c        |  21 +-
 3 files changed, 230 insertions(+), 161 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index af2b4aeb6df0..8c1f3528b1e9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -25,6 +25,7 @@
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_ioctls.h"
+#include "i915_memcpy.h"
 #include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 
@@ -52,6 +53,7 @@ struct eb_bind_vma {
 
 struct eb_vma_array {
 	struct kref kref;
+	struct list_head aux_list;
 	struct eb_vma vma[];
 };
 
@@ -246,7 +248,6 @@ struct i915_execbuffer {
 
 	struct i915_request *request; /** our request to build */
 	struct eb_vma *batch; /** identity of the batch obj/vma */
-	struct i915_vma *trampoline; /** trampoline used for chaining */
 
 	/** actual size of execobj[] as we may extend it for the cmdparser */
 	unsigned int buffer_count;
@@ -281,6 +282,11 @@ struct i915_execbuffer {
 		unsigned int rq_size;
 	} reloc_cache;
 
+	struct eb_cmdparser {
+		struct eb_vma *shadow;
+		struct eb_vma *trampoline;
+	} parser;
+
 	u64 invalid_flags; /** Set of execobj.flags that are invalid */
 	u32 context_flags; /** Set of execobj.flags to insert from the ctx */
 
@@ -298,6 +304,10 @@ struct i915_execbuffer {
 	struct eb_vma_array *array;
 };
 
+static struct drm_i915_gem_exec_object2 no_entry = {
+	.offset = -1ull
+};
+
 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
 {
 	return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -314,6 +324,7 @@ static struct eb_vma_array *eb_vma_array_create(unsigned int count)
 		return NULL;
 
 	kref_init(&arr->kref);
+	INIT_LIST_HEAD(&arr->aux_list);
 	arr->vma[0].vma = NULL;
 
 	return arr;
@@ -339,16 +350,31 @@ static inline void eb_unreserve_vma(struct eb_vma *ev)
 		       __EXEC_OBJECT_HAS_FENCE);
 }
 
+static void eb_vma_destroy(struct eb_vma *ev)
+{
+	eb_unreserve_vma(ev);
+	i915_vma_put(ev->vma);
+}
+
+static void eb_destroy_aux(struct eb_vma_array *arr)
+{
+	struct eb_vma *ev, *en;
+
+	list_for_each_entry_safe(ev, en, &arr->aux_list, reloc_link) {
+		eb_vma_destroy(ev);
+		kfree(ev);
+	}
+}
+
 static void eb_vma_array_destroy(struct kref *kref)
 {
 	struct eb_vma_array *arr = container_of(kref, typeof(*arr), kref);
-	struct eb_vma *ev = arr->vma;
+	struct eb_vma *ev;
 
-	while (ev->vma) {
-		eb_unreserve_vma(ev);
-		i915_vma_put(ev->vma);
-		ev++;
-	}
+	eb_destroy_aux(arr);
+
+	for (ev = arr->vma; ev->vma; ev++)
+		eb_vma_destroy(ev);
 
 	kvfree(arr);
 }
@@ -396,8 +422,8 @@ eb_lock_vma(struct i915_execbuffer *eb, struct ww_acquire_ctx *acquire)
 
 static int eb_create(struct i915_execbuffer *eb)
 {
-	/* Allocate an extra slot for use by the command parser + sentinel */
-	eb->array = eb_vma_array_create(eb->buffer_count + 2);
+	/* Allocate an extra slot for use by the sentinel */
+	eb->array = eb_vma_array_create(eb->buffer_count + 1);
 	if (!eb->array)
 		return -ENOMEM;
 
@@ -1078,7 +1104,7 @@ static int eb_reserve_vma(struct eb_vm_work *work, struct eb_bind_vma *bind)
 	GEM_BUG_ON(!(drm_mm_node_allocated(&vma->node) ^
 		     drm_mm_node_allocated(&bind->hole)));
 
-	if (entry->offset != vma->node.start) {
+	if (entry != &no_entry && entry->offset != vma->node.start) {
 		entry->offset = vma->node.start | UPDATE;
 		*work->p_flags |= __EXEC_HAS_RELOC;
 	}
@@ -1371,7 +1397,8 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
 		struct i915_vma *vma = ev->vma;
 
 		if (eb_pin_vma_inplace(eb, entry, ev)) {
-			if (entry->offset != vma->node.start) {
+			if (entry != &no_entry &&
+			    entry->offset != vma->node.start) {
 				entry->offset = vma->node.start | UPDATE;
 				eb->args->flags |= __EXEC_HAS_RELOC;
 			}
@@ -1542,6 +1569,113 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
 	} while (1);
 }
 
+static int eb_alloc_cmdparser(struct i915_execbuffer *eb)
+{
+	struct intel_gt_buffer_pool_node *pool;
+	struct i915_vma *vma;
+	struct eb_vma *ev;
+	unsigned int len;
+	int err;
+
+	if (range_overflows_t(u64,
+			      eb->batch_start_offset, eb->batch_len,
+			      eb->batch->vma->size)) {
+		drm_dbg(&eb->i915->drm,
+			"Attempting to use out-of-bounds batch\n");
+		return -EINVAL;
+	}
+
+	if (eb->batch_len == 0)
+		eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
+
+	if (!eb_use_cmdparser(eb))
+		return 0;
+
+	len = eb->batch_len;
+	if (!CMDPARSER_USES_GGTT(eb->i915)) {
+		/*
+		 * ppGTT backed shadow buffers must be mapped RO, to prevent
+		 * post-scan tampering
+		 */
+		if (!eb->context->vm->has_read_only) {
+			drm_dbg(&eb->i915->drm,
+				"Cannot prevent post-scan tampering without RO capable vm\n");
+			return -EINVAL;
+		}
+	} else {
+		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
+	}
+
+	pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
+	if (IS_ERR(pool))
+		return PTR_ERR(pool);
+
+	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+	if (!ev) {
+		err = -ENOMEM;
+		goto err_pool;
+	}
+
+	vma = i915_vma_instance(pool->obj, eb->context->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err_ev;
+	}
+	i915_gem_object_set_readonly(vma->obj);
+	i915_gem_object_set_cache_coherency(vma->obj, I915_CACHE_LLC);
+	vma->private = pool;
+
+	ev->vma = i915_vma_get(vma);
+	ev->exec = &no_entry;
+	list_add(&ev->reloc_link, &eb->array->aux_list);
+	list_add(&ev->bind_link, &eb->bind_list);
+	list_add(&ev->submit_link, &eb->submit_list);
+
+	if (CMDPARSER_USES_GGTT(eb->i915)) {
+		eb->parser.trampoline = ev;
+
+		/*
+		 * Special care when binding will be required for full-ppgtt
+		 * as there will be distinct vm involved, and we will need to
+		 * separate the binding/eviction passes (different vm->mutex).
+		 */
+		if (GEM_WARN_ON(eb->context->vm != &eb->engine->gt->ggtt->vm)) {
+			ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+			if (!ev) {
+				err = -ENOMEM;
+				goto err_pool;
+			}
+
+			vma = i915_vma_instance(pool->obj,
+						&eb->engine->gt->ggtt->vm,
+						NULL);
+			if (IS_ERR(vma)) {
+				err = PTR_ERR(vma);
+				goto err_ev;
+			}
+			vma->private = pool;
+
+			ev->vma = i915_vma_get(vma);
+			ev->exec = &no_entry;
+			list_add(&ev->reloc_link, &eb->array->aux_list);
+			list_add(&ev->bind_link, &eb->bind_list);
+			list_add(&ev->submit_link, &eb->submit_list);
+		}
+
+		ev->flags = EXEC_OBJECT_NEEDS_GTT;
+		eb->batch_flags |= I915_DISPATCH_SECURE;
+	}
+
+	eb->parser.shadow = ev;
+	return 0;
+
+err_ev:
+	kfree(ev);
+err_pool:
+	intel_gt_buffer_pool_put(pool);
+	return err;
+}
+
 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
 {
 	if (eb->args->flags & I915_EXEC_BATCH_FIRST)
@@ -1683,9 +1817,15 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 
 		eb_add_vma(eb, i, batch, vma);
 	}
-
 	eb->vma[i].vma = NULL;
-	return err;
+	if (err)
+		return err;
+
+	err = eb_alloc_cmdparser(eb);
+	if (err)
+		return err;
+
+	return 0;
 }
 
 static struct eb_vma *
@@ -1712,9 +1852,7 @@ static void eb_destroy(const struct i915_execbuffer *eb)
 {
 	GEM_BUG_ON(eb->reloc_cache.rq);
 
-	if (eb->array)
-		eb_vma_array_put(eb->array);
-
+	eb_vma_array_put(eb->array);
 	if (eb->lut_size > 0)
 		kfree(eb->buckets);
 }
@@ -2416,8 +2554,6 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 	}
 	ww_acquire_fini(&acquire);
 
-	eb_vma_array_put(fetch_and_zero(&eb->array));
-
 	if (unlikely(err))
 		goto err_skip;
 
@@ -2481,25 +2617,6 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 	return 0;
 }
 
-static struct i915_vma *
-shadow_batch_pin(struct drm_i915_gem_object *obj,
-		 struct i915_address_space *vm,
-		 unsigned int flags)
-{
-	struct i915_vma *vma;
-	int err;
-
-	vma = i915_vma_instance(obj, vm, NULL);
-	if (IS_ERR(vma))
-		return vma;
-
-	err = i915_vma_pin(vma, 0, 0, flags);
-	if (err)
-		return ERR_PTR(err);
-
-	return vma;
-}
-
 struct eb_parse_work {
 	struct dma_fence_work base;
 	struct intel_engine_cs *engine;
@@ -2522,9 +2639,18 @@ static int __eb_parse(struct dma_fence_work *work)
 				       pw->trampoline);
 }
 
+static void __eb_parse_release(struct dma_fence_work *work)
+{
+	struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+	i915_gem_object_unpin_pages(pw->shadow->obj);
+	i915_gem_object_unpin_pages(pw->batch->obj);
+}
+
 static const struct dma_fence_work_ops eb_parse_ops = {
 	.name = "eb_parse",
 	.work = __eb_parse,
+	.release = __eb_parse_release,
 };
 
 static inline int
@@ -2542,36 +2668,51 @@ parser_mark_active(struct eb_parse_work *pw, struct intel_timeline *tl)
 {
 	int err;
 
+	GEM_BUG_ON(pw->trampoline &&
+		   pw->trampoline->private != pw->shadow->private);
+
 	err = i915_active_ref(&pw->batch->active,
 			      tl->fence_context,
 			      &pw->base.dma);
 	if (err)
 		return err;
 
-	err = __parser_mark_active(pw->shadow, tl, &pw->base.dma);
-	if (err)
-		return err;
-
-	if (pw->trampoline) {
-		err = __parser_mark_active(pw->trampoline, tl, &pw->base.dma);
-		if (err)
-			return err;
-	}
-
-	return 0;
+	return __parser_mark_active(pw->shadow, tl, &pw->base.dma);
 }
 
 static int eb_parse_pipeline(struct i915_execbuffer *eb,
 			     struct i915_vma *shadow,
 			     struct i915_vma *trampoline)
 {
+	struct i915_vma *batch = eb->batch->vma;
 	struct eb_parse_work *pw;
+	void *ptr;
 	int err;
 
+	GEM_BUG_ON(!i915_vma_is_pinned(shadow));
+	GEM_BUG_ON(trampoline && !i915_vma_is_pinned(trampoline));
+
 	pw = kzalloc(sizeof(*pw), GFP_KERNEL);
 	if (!pw)
 		return -ENOMEM;
 
+	ptr = i915_gem_object_pin_map(shadow->obj, I915_MAP_FORCE_WB);
+	if (IS_ERR(ptr)) {
+		err = PTR_ERR(ptr);
+		goto err_free;
+	}
+
+	if (!(batch->obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ) &&
+	    i915_has_memcpy_from_wc()) {
+		ptr = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+		if (IS_ERR(ptr)) {
+			err = PTR_ERR(ptr);
+			goto err_dst;
+		}
+	} else {
+		__i915_gem_object_pin_pages(batch->obj);
+	}
+
 	dma_fence_work_init(&pw->base, &eb_parse_ops);
 
 	pw->engine = eb->engine;
@@ -2620,86 +2761,36 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
 	i915_sw_fence_set_error_once(&pw->base.chain, err);
 	dma_fence_work_commit_imm(&pw->base);
 	return err;
+
+err_dst:
+	i915_gem_object_unpin_pages(shadow->obj);
+err_free:
+	kfree(pw);
+	return err;
 }
 
 static int eb_parse(struct i915_execbuffer *eb)
 {
-	struct drm_i915_private *i915 = eb->i915;
-	struct intel_gt_buffer_pool_node *pool;
-	struct i915_vma *shadow, *trampoline;
-	unsigned int len;
 	int err;
 
-	if (!eb_use_cmdparser(eb))
-		return 0;
-
-	len = eb->batch_len;
-	if (!CMDPARSER_USES_GGTT(eb->i915)) {
-		/*
-		 * ppGTT backed shadow buffers must be mapped RO, to prevent
-		 * post-scan tampering
-		 */
-		if (!eb->context->vm->has_read_only) {
-			drm_dbg(&i915->drm,
-				"Cannot prevent post-scan tampering without RO capable vm\n");
-			return -EINVAL;
-		}
-	} else {
-		len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
-	}
-
-	pool = intel_gt_get_buffer_pool(eb->engine->gt, len);
-	if (IS_ERR(pool))
-		return PTR_ERR(pool);
-
-	shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
-	if (IS_ERR(shadow)) {
-		err = PTR_ERR(shadow);
-		goto err;
+	if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
+		drm_dbg(&eb->i915->drm,
+			"Attempting to use self-modifying batch buffer\n");
+		return -EINVAL;
 	}
-	i915_gem_object_set_readonly(shadow->obj);
-	shadow->private = pool;
-
-	trampoline = NULL;
-	if (CMDPARSER_USES_GGTT(eb->i915)) {
-		trampoline = shadow;
-
-		shadow = shadow_batch_pin(pool->obj,
-					  &eb->engine->gt->ggtt->vm,
-					  PIN_GLOBAL);
-		if (IS_ERR(shadow)) {
-			err = PTR_ERR(shadow);
-			shadow = trampoline;
-			goto err_shadow;
-		}
-		shadow->private = pool;
 
-		eb->batch_flags |= I915_DISPATCH_SECURE;
-	}
+	if (!eb->parser.shadow)
+		return 0;
 
-	err = eb_parse_pipeline(eb, shadow, trampoline);
+	err = eb_parse_pipeline(eb,
+				eb->parser.shadow->vma,
+				eb->parser.trampoline ? eb->parser.trampoline->vma : NULL);
 	if (err)
-		goto err_trampoline;
-
-	eb->batch = &eb->vma[eb->buffer_count++];
-	eb->batch->vma = i915_vma_get(shadow);
-	eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
-	list_add_tail(&eb->batch->submit_link, &eb->submit_list);
-	eb->vma[eb->buffer_count].vma = NULL;
+		return err;
 
-	eb->trampoline = trampoline;
+	eb->batch = eb->parser.shadow;
 	eb->batch_start_offset = 0;
-
 	return 0;
-
-err_trampoline:
-	if (trampoline)
-		i915_vma_unpin(trampoline);
-err_shadow:
-	i915_vma_unpin(shadow);
-err:
-	intel_gt_buffer_pool_put(pool);
-	return err;
 }
 
 static void
@@ -2748,10 +2839,10 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
 	if (err)
 		return err;
 
-	if (eb->trampoline) {
+	if (eb->parser.trampoline) {
 		GEM_BUG_ON(eb->batch_start_offset);
 		err = eb->engine->emit_bb_start(eb->request,
-						eb->trampoline->node.start +
+						eb->parser.trampoline->vma->node.start +
 						eb->batch_len,
 						0, 0);
 		if (err)
@@ -3242,7 +3333,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	eb.buffer_count = args->buffer_count;
 	eb.batch_start_offset = args->batch_start_offset;
 	eb.batch_len = args->batch_len;
-	eb.trampoline = NULL;
+	memset(&eb.parser, 0, sizeof(eb.parser));
 
 	eb.batch_flags = 0;
 	if (args->flags & I915_EXEC_SECURE) {
@@ -3317,24 +3408,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		goto err_vma;
 	}
 
-	if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
-		drm_dbg(&i915->drm,
-			"Attempting to use self-modifying batch buffer\n");
-		err = -EINVAL;
-		goto err_vma;
-	}
-
-	if (range_overflows_t(u64,
-			      eb.batch_start_offset, eb.batch_len,
-			      eb.batch->vma->size)) {
-		drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
-		err = -EINVAL;
-		goto err_vma;
-	}
-
-	if (eb.batch_len == 0)
-		eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
-
 	err = eb_parse(&eb);
 	if (err)
 		goto err_vma;
@@ -3360,7 +3433,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 		vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
 		if (IS_ERR(vma)) {
 			err = PTR_ERR(vma);
-			goto err_parse;
+			goto err_vma;
 		}
 
 		GEM_BUG_ON(vma->obj != batch->obj);
@@ -3412,8 +3485,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * to explicitly hold another reference here.
 	 */
 	eb.request->batch = batch;
-	if (batch->private)
-		intel_gt_buffer_pool_mark_active(batch->private, eb.request);
+	if (eb.parser.shadow)
+		intel_gt_buffer_pool_mark_active(eb.parser.shadow->vma->private,
+						 eb.request);
 
 	trace_i915_request_queue(eb.request, eb.batch_flags);
 	err = eb_submit(&eb, batch);
@@ -3430,18 +3504,14 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_batch_unpin:
 	if (eb.batch_flags & I915_DISPATCH_SECURE)
 		i915_vma_unpin(batch);
-err_parse:
-	if (batch->private)
-		intel_gt_buffer_pool_put(batch->private);
-	i915_vma_put(batch);
 err_vma:
-	if (eb.trampoline)
-		i915_vma_unpin(eb.trampoline);
 	eb_unlock_engine(&eb);
 	/* *** TIMELINE UNLOCK *** */
 err_engine:
 	eb_unpin_engine(&eb);
 err_context:
+	if (eb.parser.shadow)
+		intel_gt_buffer_pool_put(eb.parser.shadow->vma->private);
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e5b9276d254c..6f60687b6be2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -368,6 +368,16 @@ enum i915_map_type {
 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 					   enum i915_map_type type);
 
+static inline void *__i915_gem_object_mapping(struct drm_i915_gem_object *obj)
+{
+	return page_mask_bits(obj->mm.mapping);
+}
+
+static inline int __i915_gem_object_mapping_type(struct drm_i915_gem_object *obj)
+{
+	return page_unmask_bits(obj->mm.mapping);
+}
+
 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
 				 unsigned long offset,
 				 unsigned long size);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 372354d33f55..dc8770206bb8 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1140,29 +1140,22 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 {
 	bool needs_clflush;
 	void *dst, *src;
-	int ret;
 
-	dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
-	if (IS_ERR(dst))
-		return dst;
+	GEM_BUG_ON(!i915_gem_object_has_pages(src_obj));
 
-	ret = i915_gem_object_pin_pages(src_obj);
-	if (ret) {
-		i915_gem_object_unpin_map(dst_obj);
-		return ERR_PTR(ret);
-	}
+	dst = __i915_gem_object_mapping(dst_obj);
+	GEM_BUG_ON(!dst);
 
 	needs_clflush =
 		!(src_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ);
 
 	src = ERR_PTR(-ENODEV);
 	if (needs_clflush && i915_has_memcpy_from_wc()) {
-		src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
-		if (!IS_ERR(src)) {
+		if (__i915_gem_object_mapping_type(src_obj) == I915_MAP_WC) {
+			src = __i915_gem_object_mapping(src_obj);
 			i915_unaligned_memcpy_from_wc(dst,
 						      src + offset,
 						      length);
-			i915_gem_object_unpin_map(src_obj);
 		}
 	}
 	if (IS_ERR(src)) {
@@ -1198,9 +1191,6 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
 		}
 	}
 
-	i915_gem_object_unpin_pages(src_obj);
-
-	/* dst_obj is returned with vmap pinned */
 	return dst;
 }
 
@@ -1546,7 +1536,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 
 	if (!IS_ERR_OR_NULL(jump_whitelist))
 		kfree(jump_whitelist);
-	i915_gem_object_unpin_map(shadow->obj);
 	return ret;
 }
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-15 11:52 UTC|newest]

Thread overview: 156+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-15 11:50 [Intel-gfx] [PATCH 01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 02/66] drm/i915: Remove i915_request.lock requirement for execution callbacks Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 03/66] drm/i915: Remove requirement for holding i915_request.lock for breadcrumbs Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 04/66] drm/i915: Add a couple of missing i915_active_fini() Chris Wilson
2020-07-17 12:00   ` Tvrtko Ursulin
2020-07-21 12:23   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 05/66] drm/i915: Skip taking acquire mutex for no ref->active callback Chris Wilson
2020-07-17 12:04   ` Tvrtko Ursulin
2020-07-21 12:32   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 06/66] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-07-17 12:21   ` Tvrtko Ursulin
2020-07-17 12:45     ` Chris Wilson
2020-07-17 13:06       ` Tvrtko Ursulin
2020-07-21 15:33   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 07/66] drm/i915: Keep the most recently used active-fence upon discard Chris Wilson
2020-07-17 12:38   ` Tvrtko Ursulin
2020-07-28 14:22     ` Chris Wilson
2020-07-22  9:46   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 08/66] drm/i915: Make the stale cached active node available for any timeline Chris Wilson
2020-07-17 13:04   ` Tvrtko Ursulin
2020-07-28 14:28     ` Chris Wilson
2020-07-29 12:40       ` Tvrtko Ursulin
2020-07-29 13:42         ` Chris Wilson
2020-07-29 13:53           ` Chris Wilson
2020-07-29 14:22           ` Tvrtko Ursulin
2020-07-29 14:39             ` Chris Wilson
2020-07-29 14:52             ` Chris Wilson
2020-07-29 15:31               ` Tvrtko Ursulin
2020-07-22 11:19   ` Thomas Hellström (Intel)
2020-07-28 14:31     ` Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 09/66] drm/i915: Provide a fastpath for waiting on vma bindings Chris Wilson
2020-07-17 13:23   ` Tvrtko Ursulin
2020-07-28 14:35     ` Chris Wilson
2020-07-29 12:43       ` Tvrtko Ursulin
2020-07-22 15:07   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 10/66] drm/i915: Soften the tasklet flush frequency before waits Chris Wilson
2020-07-16 14:23   ` Mika Kuoppala
2020-07-22 15:10   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 11/66] drm/i915: Preallocate stashes for vma page-directories Chris Wilson
2020-07-20 10:35   ` Matthew Auld
2020-07-23 14:33   ` Thomas Hellström (Intel)
2020-07-28 14:42     ` Chris Wilson
2020-07-31  7:43       ` Thomas Hellström (Intel)
2020-07-27  9:24   ` Thomas Hellström (Intel)
2020-07-28 14:50     ` Chris Wilson
2020-07-30 12:04       ` Thomas Hellström (Intel)
2020-07-30 12:28       ` Thomas Hellström (Intel)
2020-08-04 14:08         ` Chris Wilson
2020-08-04 16:14           ` Daniel Vetter
2020-08-04 16:14             ` Daniel Vetter
2020-07-15 11:50 ` [Intel-gfx] [PATCH 12/66] drm/i915: Switch to object allocations for page directories Chris Wilson
2020-07-20 10:34   ` Matthew Auld
2020-07-20 10:40     ` Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 13/66] drm/i915/gem: Don't drop the timeline lock during execbuf Chris Wilson
2020-07-23 16:09   ` Thomas Hellström (Intel)
2020-07-28 14:46     ` Thomas Hellström (Intel)
2020-07-28 14:51     ` Chris Wilson
2020-07-31  8:09   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 14/66] drm/i915/gem: Rename execbuf.bind_link to unbound_link Chris Wilson
2020-07-31  8:11   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 15/66] drm/i915/gem: Break apart the early i915_vma_pin from execbuf object lookup Chris Wilson
2020-07-31  8:51   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 16/66] drm/i915/gem: Remove the call for no-evict i915_vma_pin Chris Wilson
2020-07-17 14:36   ` Tvrtko Ursulin
2020-07-28 15:04     ` Chris Wilson
2020-07-28  9:46   ` Thomas Hellström (Intel)
2020-07-28 15:05     ` Chris Wilson
2020-07-31  8:58       ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 17/66] drm/i915: Add list_for_each_entry_safe_continue_reverse Chris Wilson
2020-07-31  8:59   ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 18/66] drm/i915: Always defer fenced work to the worker Chris Wilson
2020-07-31  9:03   ` Thomas Hellström (Intel)
2020-07-31 13:28     ` Chris Wilson
2020-07-31 13:31       ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 19/66] drm/i915/gem: Assign context id for async work Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 20/66] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson
2020-07-31  9:23   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 21/66] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-07-31 13:09   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 22/66] drm/i915/gem: Bind the fence async for execbuf Chris Wilson
2020-07-27 18:19   ` Thomas Hellström (Intel)
2020-07-28 15:08     ` Chris Wilson
2020-07-31 13:12       ` Thomas Hellström (Intel)
2020-07-15 11:51 ` Chris Wilson [this message]
2020-07-31  9:43   ` [Intel-gfx] [PATCH 23/66] drm/i915/gem: Include cmdparser in common execbuf pinning Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 24/66] drm/i915/gem: Include secure batch " Chris Wilson
2020-07-31  9:47   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 25/66] drm/i915/gem: Reintroduce multiple passes for reloc processing Chris Wilson
2020-07-31 10:05   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 26/66] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Chris Wilson
2020-07-31 10:07   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 27/66] drm/i915/gem: Pull execbuf dma resv under a single critical section Chris Wilson
2020-07-27 18:08   ` Thomas Hellström (Intel)
2020-07-28 15:16     ` Chris Wilson
2020-07-30 12:57       ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 28/66] drm/i915/gem: Replace i915_gem_object.mm.mutex with reservation_ww_class Chris Wilson
2020-07-15 15:43   ` Maarten Lankhorst
2020-07-16 15:53     ` Tvrtko Ursulin
2020-07-28 11:17       ` Thomas Hellström (Intel)
2020-07-29  7:56         ` Thomas Hellström (Intel)
2020-07-29 12:17         ` Tvrtko Ursulin
2020-07-29 13:44           ` Thomas Hellström (Intel)
2020-08-05 12:12             ` Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 29/66] drm/i915: Hold wakeref for the duration of the vma GGTT binding Chris Wilson
2020-07-31 10:09   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 30/66] drm/i915: Specialise " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 31/66] drm/i915/gt: Acquire backing storage for the context Chris Wilson
2020-07-31 10:27   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 32/66] drm/i915/gt: Push the wait for the context to bound to the request Chris Wilson
2020-07-31 10:48   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 33/66] drm/i915: Remove unused i915_gem_evict_vm() Chris Wilson
2020-07-31 10:51   ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 34/66] drm/i915/gt: Decouple completed requests on unwind Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 35/66] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 36/66] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 37/66] drm/i915/gt: Free stale request on destroying the virtual engine Chris Wilson
2020-07-15 11:51 ` [PATCH 38/66] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-07-15 11:51   ` [Intel-gfx] " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 39/66] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 40/66] drm/i915/gt: Defer schedule_out until after the next dequeue Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 41/66] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 42/66] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 43/66] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 44/66] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 45/66] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 46/66] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 47/66] drm/i915: Lift waiter/signaler iterators Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 48/66] drm/i915: Strip out internal priorities Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 49/66] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 50/66] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 51/66] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 52/66] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 53/66] drm/i915: Restructure priority inheritance Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 54/66] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 55/66] drm/i915: Fair low-latency scheduling Chris Wilson
2020-07-15 15:33   ` [Intel-gfx] [PATCH] " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 56/66] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 57/66] drm/i915: Replace the priority boosting for the display with a deadline Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 58/66] drm/i915: Move saturated workload detection to the GT Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 59/66] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 60/66] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 61/66] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 62/66] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 63/66] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 64/66] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 65/66] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 66/66] drm/i915/gem: Remove timeline nesting from snb relocs Chris Wilson
2020-07-15 13:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Patchwork
2020-07-15 13:28 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-15 14:20 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-07-15 15:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait (rev2) Patchwork
2020-07-15 15:42 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-15 16:03 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-15 19:55 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-07-23 20:32 ` [Intel-gfx] [PATCH 01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Dave Airlie
2020-07-27  9:35   ` Tvrtko Ursulin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200715115147.11866-23-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.