dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>,
	Tvrtko Ursulin <tvrtko.ursulin@intel.com>,
	Sudeep Dutt <sudeep.dutt@intel.com>,
	dri-devel@lists.freedesktop.org, CQ Tang <cq.tang@intel.com>,
	Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>,
	Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Subject: [RFC PATCH 088/162] drm/i915: support basic object migration
Date: Fri, 27 Nov 2020 12:06:04 +0000	[thread overview]
Message-ID: <20201127120718.454037-89-matthew.auld@intel.com> (raw)
In-Reply-To: <20201127120718.454037-1-matthew.auld@intel.com>

We are going want to able to move objects between different regions
like system memory and local memory. In the future everything should
be just another region.

Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Prathap Kumar Valsan <prathap.kumar.valsan@intel.com>
Cc: CQ Tang <cq.tang@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      |  13 ++
 drivers/gpu/drm/i915/gem/i915_gem_mman.h      |   2 +
 drivers/gpu/drm/i915/gem/i915_gem_object.c    | 125 +++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   9 +
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |   2 +-
 .../drm/i915/selftests/intel_memory_region.c  | 174 +++++++++++++++++-
 6 files changed, 322 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 2561a2f1e54f..4e8a05c35252 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -546,6 +546,19 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 	spin_unlock(&obj->mmo.lock);
 }
 
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_release_mmap_gtt(obj);
+	i915_gem_object_release_mmap_offset(obj);
+}
+
 static struct i915_mmap_offset *
 lookup_mmo(struct drm_i915_gem_object *obj,
 	   enum i915_mmap_type mmap_type)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index efee9e0d2508..7c5ccdf59359 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -24,6 +24,8 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
 			      struct drm_device *dev,
 			      u32 handle, u64 *offset);
 
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+
 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
 void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 5326b4b5a9f7..7ff430503497 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -26,11 +26,14 @@
 
 #include "display/intel_frontbuffer.h"
 #include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
 #include "i915_gem_mman.h"
 #include "i915_gem_object.h"
+#include "i915_gem_object_blt.h"
+#include "i915_gem_region.h"
 #include "i915_globals.h"
 #include "i915_trace.h"
 
@@ -311,6 +314,128 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
 		queue_work(i915->wq, &i915->mm.free_work);
 }
 
+int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	assert_object_held(obj);
+
+	if (obj->mm.madv != I915_MADV_WILLNEED)
+		return -EINVAL;
+
+	if (i915_gem_object_needs_bit17_swizzle(obj))
+		return -EINVAL;
+
+	if (i915_gem_object_is_framebuffer(obj))
+		return -EBUSY;
+
+	i915_gem_object_release_mmap(obj);
+
+	GEM_BUG_ON(obj->mm.mapping);
+	GEM_BUG_ON(obj->base.filp && mapping_mapped(obj->base.filp->f_mapping));
+
+	err = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT);
+	if (err)
+		return err;
+
+	return i915_gem_object_unbind(obj,
+				      I915_GEM_OBJECT_UNBIND_ACTIVE);
+}
+
+int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+			    struct i915_gem_ww_ctx *ww,
+			    struct intel_context *ce,
+			    enum intel_region_id id)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct drm_i915_gem_object *donor;
+	struct intel_memory_region *mem;
+	struct sg_table *pages = NULL;
+	unsigned int page_sizes;
+	int err = 0;
+
+	assert_object_held(obj);
+	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
+	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
+	if (obj->mm.region->id == id)
+		return 0;
+
+	mem = i915->mm.regions[id];
+
+	donor = i915_gem_object_create_region(mem, obj->base.size, 0);
+	if (IS_ERR(donor)) {
+		err = PTR_ERR(donor);
+		return err;
+	}
+
+	err = i915_gem_object_lock(donor, ww);
+	if (err)
+		goto err_put_donor;
+
+	/* Copy backing-pages if we have to */
+	if (i915_gem_object_has_pages(obj) ||
+	    obj->base.filp) {
+		err = i915_gem_object_ww_copy_blt(obj, donor, ww, ce);
+		if (err)
+			goto unlock_donor;
+	}
+
+	err = i915_gem_object_set_to_cpu_domain(donor, false);
+	if (err)
+		goto unlock_donor;
+
+	intel_gt_retire_requests(&i915->gt);
+
+	i915_gem_object_unbind(donor, 0);
+	err = i915_gem_object_unbind(obj, 0);
+	if (err)
+		goto unlock_donor;
+
+	pages = __i915_gem_object_unset_pages(obj);
+	if (pages)
+		obj->ops->put_pages(obj, pages);
+
+	page_sizes = donor->mm.page_sizes.phys;
+	pages = __i915_gem_object_unset_pages(donor);
+
+	if (obj->ops->release)
+		obj->ops->release(obj);
+
+	/* We need still need a little special casing for shmem */
+	if (obj->base.filp)
+		fput(fetch_and_zero(&obj->base.filp));
+	else if (donor->base.filp) {
+		atomic_long_inc(&donor->base.filp->f_count);
+		obj->base.filp = donor->base.filp;
+	}
+
+	obj->base.size = donor->base.size;
+	obj->mm.region = intel_memory_region_get(mem);
+	obj->flags = donor->flags;
+	obj->ops = donor->ops;
+	obj->cache_level = donor->cache_level;
+	obj->cache_coherent = donor->cache_coherent;
+	obj->cache_dirty = donor->cache_dirty;
+
+	list_replace_init(&donor->mm.blocks, &obj->mm.blocks);
+
+	/* set pages after migrated */
+	if (pages)
+		__i915_gem_object_set_pages(obj, pages, page_sizes);
+
+	GEM_BUG_ON(i915_gem_object_has_pages(donor));
+	GEM_BUG_ON(i915_gem_object_has_pinned_pages(donor));
+unlock_donor:
+	i915_gem_ww_unlock_single(donor);
+err_put_donor:
+	i915_gem_object_put(donor);
+
+	return err;
+}
+
 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
 	return !(obj->cache_level == I915_CACHE_NONE ||
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c6c7ab181a65..1a1aa71a4494 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -51,8 +51,17 @@ void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 				    struct sg_table *pages);
 
 
+enum intel_region_id;
+int i915_gem_object_prepare_move(struct drm_i915_gem_object *obj);
+int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+			    struct i915_gem_ww_ctx *ww,
+			    struct intel_context *ce,
+			    enum intel_region_id id);
+
 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
 
+void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj);
+
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index eacad971b955..2cdb7cf63383 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -183,7 +183,7 @@ void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
 		obj->ops->writeback(obj);
 }
 
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
+void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 {
 	struct radix_tree_iter iter;
 	void __rcu **slot;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 84525ddba321..7acb94e0e5fe 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -14,6 +14,7 @@
 
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
+#include "gem/i915_gem_object_blt.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_object_blt.h"
 #include "gem/selftests/igt_gem_utils.h"
@@ -476,6 +477,71 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_smem_create_migrate(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->gt.engine[BCS0]->kernel_context;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_ww_ctx ww;
+	int err = 0;
+
+	/* Switch object backing-store on create */
+	obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	for_i915_gem_ww(&ww, err, true) {
+		err = i915_gem_object_lock(obj, &ww);
+		if (err)
+			continue;
+
+		err = i915_gem_object_migrate(obj, &ww, ce, INTEL_REGION_SMEM);
+		if (err)
+			continue;
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			continue;
+
+		i915_gem_object_unpin_pages(obj);
+	}
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
+static int igt_lmem_create_migrate(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct intel_context *ce = i915->gt.engine[BCS0]->kernel_context;
+	struct drm_i915_gem_object *obj;
+	struct i915_gem_ww_ctx ww;
+	int err = 0;
+
+	/* Switch object backing-store on create */
+	obj = i915_gem_object_create_shmem(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	for_i915_gem_ww(&ww, err, true) {
+		err = i915_gem_object_lock(obj, &ww);
+		if (err)
+			continue;
+
+		err = i915_gem_object_migrate(obj, &ww, ce, INTEL_REGION_LMEM);
+		if (err)
+			continue;
+
+		err = i915_gem_object_pin_pages(obj);
+		if (err)
+			continue;
+
+		i915_gem_object_unpin_pages(obj);
+	}
+	i915_gem_object_put(obj);
+
+	return err;
+}
 static int igt_lmem_write_gpu(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -880,7 +946,7 @@ static int igt_mock_shrink(void *arg)
 
 		list_add(&obj->st_link, &objects);
 
-		err = i915_gem_object_pin_pages(obj);
+		err = i915_gem_object_pin_pages_unlocked(obj);
 		if (err)
 			goto err_close_objects;
 
@@ -902,7 +968,7 @@ static int igt_mock_shrink(void *arg)
 		list_add(&obj->st_link, &objects);
 
 		/* Provoke the shrinker to start violently swinging its axe! */
-		err = i915_gem_object_pin_pages(obj);
+		err = i915_gem_object_pin_pages_unlocked(obj);
 		if (err) {
 			pr_err("failed to shrink for target=%pa", &target);
 			goto err_close_objects;
@@ -923,6 +989,107 @@ static int igt_mock_shrink(void *arg)
 	return err;
 }
 
+static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
+				  struct intel_context *ce,
+				  struct drm_i915_gem_object *obj)
+{
+	int err;
+
+	err = i915_gem_object_lock(obj, ww);
+	if (err)
+		return err;
+
+	err = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE |
+				   I915_WAIT_PRIORITY |
+				   I915_WAIT_ALL,
+				   MAX_SCHEDULE_TIMEOUT);
+	if (err)
+		return err;
+
+	err = i915_gem_object_prepare_move(obj);
+	if (err)
+		return err;
+
+	if (i915_gem_object_is_lmem(obj)) {
+		err = i915_gem_object_migrate(obj, ww, ce, INTEL_REGION_SMEM);
+		if (err)
+			return err;
+
+		if (i915_gem_object_is_lmem(obj)) {
+			pr_err("object still backed by lmem\n");
+			err = -EINVAL;
+		}
+
+		if (!list_empty(&obj->mm.blocks)) {
+			pr_err("object leaking memory region\n");
+			err = -EINVAL;
+		}
+
+		if (!i915_gem_object_has_struct_page(obj)) {
+			pr_err("object not backed by struct page\n");
+			err = -EINVAL;
+		}
+
+	} else {
+		err = i915_gem_object_migrate(obj, ww, ce, INTEL_REGION_LMEM);
+		if (err)
+			return err;
+
+		if (i915_gem_object_has_struct_page(obj)) {
+			pr_err("object still backed by struct page\n");
+			err = -EINVAL;
+		}
+
+		if (!i915_gem_object_is_lmem(obj)) {
+			pr_err("object not backed by lmem\n");
+			err = -EINVAL;
+		}
+	}
+
+	return err;
+}
+
+static int igt_lmem_pages_migrate(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	struct intel_context *ce;
+	struct i915_gem_ww_ctx ww;
+	int err;
+	int i;
+
+	if (!HAS_ENGINE(&i915->gt, BCS0))
+		return 0;
+
+	ce = i915->gt.engine[BCS0]->kernel_context;
+
+	/* From LMEM to shmem and back again */
+
+	obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	err = i915_gem_object_fill_blt(obj, ce, 0);
+	if (err)
+		goto out_put;
+
+	for (i = 1; i <= 4; ++i) {
+		for_i915_gem_ww(&ww, err, true)
+			err = lmem_pages_migrate_one(&ww, ce, obj);
+		if (err)
+			break;
+
+		err = i915_gem_object_fill_blt(obj, ce, 0xdeadbeaf);
+		if (err)
+			break;
+	}
+out_put:
+	i915_gem_object_put(obj);
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -960,6 +1127,9 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_lmem_create),
 		SUBTEST(igt_lmem_write_cpu),
 		SUBTEST(igt_lmem_write_gpu),
+		SUBTEST(igt_smem_create_migrate),
+		SUBTEST(igt_lmem_create_migrate),
+		SUBTEST(igt_lmem_pages_migrate),
 	};
 
 	if (!HAS_LMEM(i915)) {
-- 
2.26.2

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2020-11-27 12:11 UTC|newest]

Thread overview: 208+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-27 12:04 [RFC PATCH 000/162] DG1 + LMEM enabling Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 001/162] drm/i915/selftest: also consider non-contiguous objects Matthew Auld
2020-11-27 19:44   ` Chris Wilson
2020-11-27 12:04 ` [RFC PATCH 002/162] drm/i915/selftest: assert we get 2M GTT pages Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 003/162] drm/i915/selftest: handle local-memory in perf_memcpy Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 004/162] drm/i915/gt: Move move context layout registers and offsets to lrc_reg.h Matthew Auld
2020-11-27 19:55   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:04 ` [RFC PATCH 005/162] drm/i915/gt: Rename lrc.c to execlists_submission.c Matthew Auld
2020-11-27 19:56   ` Chris Wilson
2020-11-27 12:04 ` [RFC PATCH 006/162] drm/i915: split gen8+ flush and bb_start emission functions to their own file Matthew Auld
2020-11-27 19:58   ` Chris Wilson
2020-11-27 12:04 ` [RFC PATCH 007/162] drm/i915: split wa_bb code to its " Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 008/162] HAX drm/i915: Work around the selftest timeline lock splat workaround Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 009/162] drm/i915: Introduce drm_i915_lock_isolated Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 010/162] drm/i915: Lock hwsp objects isolated for pinning at create time Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 011/162] drm/i915: Pin timeline map after first timeline pin, v5 Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 012/162] drm/i915: Move cmd parser pinning to execbuffer Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 013/162] drm/i915: Add missing -EDEADLK handling to execbuf pinning, v2 Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 014/162] drm/i915: Ensure we hold the object mutex in pin correctly v2 Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 015/162] drm/i915: Add gem object locking to madvise Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 016/162] drm/i915: Move HAS_STRUCT_PAGE to obj->flags Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 017/162] drm/i915: Rework struct phys attachment handling Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 018/162] drm/i915: Convert i915_gem_object_attach_phys() to ww locking, v2 Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 019/162] drm/i915: make lockdep slightly happier about execbuf Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 020/162] drm/i915: Disable userptr pread/pwrite support Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 021/162] drm/i915: No longer allow exporting userptr through dma-buf Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 022/162] drm/i915: Reject more ioctls for userptr Matthew Auld
2020-11-27 12:04 ` [RFC PATCH 023/162] drm/i915: Reject UNSYNCHRONIZED for userptr, v2 Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 024/162] drm/i915: Make compilation of userptr code depend on MMU_NOTIFIER Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 025/162] drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v5 Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 026/162] drm/i915: Flatten obj->mm.lock Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 027/162] drm/i915: Populate logical context during first pin Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 028/162] drm/i915: Make ring submission compatible with obj->mm.lock removal, v2 Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 029/162] drm/i915: Handle ww locking in init_status_page Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 030/162] drm/i915: Rework clflush to work correctly without obj->mm.lock Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 031/162] drm/i915: Pass ww ctx to intel_pin_to_display_plane Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 032/162] drm/i915: Add object locking to vm_fault_cpu Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 033/162] drm/i915: Move pinning to inside engine_wa_list_verify() Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 034/162] drm/i915: Take reservation lock around i915_vma_pin Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 035/162] drm/i915: Make intel_init_workaround_bb more compatible with ww locking Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 036/162] drm/i915: Make __engine_unpark() compatible with ww locking v2 Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 037/162] drm/i915: Take obj lock around set_domain ioctl Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 038/162] drm/i915: Defer pin calls in buffer pool until first use by caller Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 039/162] drm/i915: Fix pread/pwrite to work with new locking rules Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 040/162] drm/i915: Fix workarounds selftest, part 1 Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 041/162] drm/i915: Prepare for obj->mm.lock removal Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 042/162] drm/i915: Add igt_spinner_pin() to allow for ww locking around spinner Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 043/162] drm/i915: Add ww locking around vm_access() Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 044/162] drm/i915: Increase ww locking for perf Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 045/162] drm/i915: Lock ww in ucode objects correctly Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 046/162] drm/i915: Add ww locking to dma-buf ops Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 047/162] drm/i915: Add missing ww lock in intel_dsb_prepare Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 048/162] drm/i915: Fix ww locking in shmem_create_from_object Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 049/162] drm/i915: Use a single page table lock for each gtt Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 050/162] drm/i915/selftests: Prepare huge_pages testcases for obj->mm.lock removal Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 051/162] drm/i915/selftests: Prepare client blit " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 052/162] drm/i915/selftests: Prepare coherency tests " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 053/162] drm/i915/selftests: Prepare context " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 054/162] drm/i915/selftests: Prepare dma-buf " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 055/162] drm/i915/selftests: Prepare execbuf " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 056/162] drm/i915/selftests: Prepare mman testcases " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 057/162] drm/i915/selftests: Prepare object tests " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 058/162] drm/i915/selftests: Prepare object blit " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 059/162] drm/i915/selftests: Prepare igt_gem_utils " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 060/162] drm/i915/selftests: Prepare context selftest " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 061/162] drm/i915/selftests: Prepare hangcheck " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 062/162] drm/i915/selftests: Prepare execlists " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 063/162] drm/i915/selftests: Prepare mocs tests " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 064/162] drm/i915/selftests: Prepare ring submission " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 065/162] drm/i915/selftests: Prepare timeline tests " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 066/162] drm/i915/selftests: Prepare i915_request " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 067/162] drm/i915/selftests: Prepare memory region " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 068/162] drm/i915/selftests: Prepare cs engine " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 069/162] drm/i915/selftests: Prepare gtt " Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 070/162] drm/i915: Finally remove obj->mm.lock Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 071/162] drm/i915: Keep userpointer bindings if seqcount is unchanged, v2 Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 072/162] drm/i915: Avoid some false positives in assert_object_held() Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 073/162] drm/i915: Reference contending lock objects Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 074/162] drm/i915: Break out dma_resv ww locking utilities to separate files Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 075/162] drm/i915: Introduce a for_i915_gem_ww(){} Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 076/162] drm/i915: Untangle the vma pages_mutex Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 077/162] drm/i915/fbdev: Use lmem physical addresses for fb_mmap() on discrete Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 078/162] drm/i915: Return error value when bo not in LMEM for discrete Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 079/162] drm/i915/dmabuf: Disallow LMEM objects from dma-buf Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 080/162] drm/i915/lmem: Fail driver init if LMEM training failed Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 081/162] HAX drm/i915/lmem: support CPU relocations Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 082/162] HAX drm/i915/lmem: support pread and pwrite Matthew Auld
2020-11-27 12:05 ` [RFC PATCH 083/162] drm/i915: Update the helper to set correct mapping Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 084/162] drm/i915: introduce kernel blitter_context Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 085/162] drm/i915/region: support basic eviction Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 086/162] drm/i915: Add blit functions that can be called from within a WW transaction Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 087/162] drm/i915: Delay publishing objects on the eviction lists Matthew Auld
2020-11-27 12:06 ` Matthew Auld [this message]
2020-11-27 12:06 ` [RFC PATCH 089/162] drm/i915/dg1: Fix occasional migration error Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 090/162] drm/i915/query: Expose memory regions through the query uAPI Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 091/162] drm/i915: Store gt in memory region Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 092/162] drm/i915/uapi: introduce drm_i915_gem_create_ext Matthew Auld
2020-11-27 13:25   ` [Intel-gfx] " Chris Wilson
2020-12-01 15:06     ` Thomas Hellström (Intel)
2020-11-27 19:21   ` Chris Wilson
2020-12-01 12:55   ` Chris Wilson
2020-12-01 13:43     ` Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 093/162] drm/i915/lmem: allocate cmd ring in lmem Matthew Auld
2020-11-27 13:27   ` Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 094/162] drm/i915/dg1: Do not check r->sgt.pfn for NULL Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 095/162] drm/i915/dg1: Introduce dmabuf mmap to LMEM Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 096/162] drm/i915: setup the LMEM region Matthew Auld
2020-11-30 10:14   ` Jani Nikula
2020-11-27 12:06 ` [RFC PATCH 097/162] drm/i915: Distinction of memory regions Matthew Auld
2020-11-27 13:30   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 098/162] drm/i915/gtt: map the PD up front Matthew Auld
2020-11-27 13:31   ` Chris Wilson
2021-01-12 10:47     ` [Intel-gfx] " Matthew Auld
2021-01-12 14:33       ` Daniel Vetter
2020-11-27 12:06 ` [RFC PATCH 099/162] drm/i915/gtt/dgfx: place the PD in LMEM Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 100/162] drm/i915/gtt: make flushing conditional Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 101/162] drm/i915/gtt/dg1: add PTE_LM plumbing for PPGTT Matthew Auld
2020-11-27 13:35   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 102/162] drm/i915/gtt/dg1: add PTE_LM plumbing for GGTT Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 103/162] drm/i915: allocate context from LMEM Matthew Auld
2020-11-27 13:37   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 104/162] drm/i915: move engine scratch to LMEM Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 105/162] drm/i915: Provide a way to disable PCIe relaxed write ordering Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 106/162] drm/i915: i915 returns -EBUSY on thread contention Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 107/162] drm/i915: setup GPU device lmem region Matthew Auld
2020-11-30 11:18   ` Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 108/162] drm/i915: Fix object page offset within a region Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 109/162] drm/i915: add i915_gem_object_is_devmem() function Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 110/162] drm/i915: finish memory region support for stolen objects Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 111/162] drm/i915/lmem: support optional CPU clearing for special internal use Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 112/162] drm/i915/guc: put all guc objects in lmem when available Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 113/162] drm/i915: Create stolen memory region from local memory Matthew Auld
2020-12-07 13:39   ` [Intel-gfx] " Jani Nikula
2020-11-27 12:06 ` [RFC PATCH 114/162] drm/i915/lmem: Bypass aperture when lmem is available Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 115/162] drm/i915/lmem: reset the lmem buffer created by fbdev Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 116/162] drm/i915/dsb: Enable lmem for dsb Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 117/162] drm/i915: Reintroduce mem->reserved Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 118/162] drm/i915/dg1: Reserve first 1MB of local memory Matthew Auld
2020-11-27 13:52   ` [Intel-gfx] " Chris Wilson
2020-11-30 11:09     ` Matthew Auld
2020-11-30 11:22       ` Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 119/162] drm/i915/dg1: Read OPROM via SPI controller Matthew Auld
2020-11-30 10:16   ` [Intel-gfx] " Jani Nikula
2020-11-27 12:06 ` [RFC PATCH 120/162] drm/i915/oprom: Basic sanitization Matthew Auld
2020-11-30 10:24   ` [Intel-gfx] " Jani Nikula
2020-11-27 12:06 ` [RFC PATCH 121/162] drm/i915: WA for zero memory channel Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 122/162] drm/i915/dg1: Compute MEM Bandwidth using MCHBAR Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 123/162] drm/i915/dg1: Double memory bandwidth available Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 124/162] drm/i915/lmem: allocate HWSP in lmem Matthew Auld
2020-11-27 13:55   ` [Intel-gfx] " Chris Wilson
2020-11-30 17:17     ` Matthew Auld
2020-11-30 17:35       ` Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 125/162] drm/i915/lmem: Limit block size to 4G Matthew Auld
2020-11-27 14:02   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 126/162] drm/i915/gem: Update shmem available memory Matthew Auld
2020-11-27 14:04   ` Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 127/162] drm/i915: Allow non-uniform subslices in gen12+ Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 128/162] drm/i915/dg1: intel_memory_region_evict() changes for eviction Matthew Auld
2020-11-27 14:07   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 129/162] drm/i915/dg1: i915_gem_object_memcpy(..) infrastructure Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 130/162] drm/i915/dg1: Eviction logic Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 131/162] drm/i915/dg1: Add enable_eviction modparam Matthew Auld
2020-11-30 12:20   ` Jani Nikula
2020-11-27 12:06 ` [RFC PATCH 132/162] drm/i915/dg1: Add lmem_size modparam Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 133/162] drm/i915/dg1: Track swap in/out stats via debugfs Matthew Auld
2020-11-27 14:09   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 134/162] drm/i915/dg1: Measure swap in/out timing stats Matthew Auld
2020-11-27 14:11   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 135/162] drm/i915: define intel_partial_pages_for_sg_table Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 136/162] drm/i915: create and destroy dummy vma Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 137/162] drm/i915: blt copy between objs using pre-created vma windows Matthew Auld
2020-11-27 14:19   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 138/162] drm/i915/dg1: Eliminate eviction mutex Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 139/162] drm/i915/dg1: Keep engine awake across whole blit Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 140/162] drm/i915: window_blt_copy is used for swapin and swapout Matthew Auld
2020-11-27 14:20   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 141/162] drm/i915: Lmem eviction statistics by category Matthew Auld
2020-11-27 14:21   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:06 ` [RFC PATCH 142/162] drm/i915/gem/selftest: test and measure window based blt cpy Matthew Auld
2020-11-27 12:06 ` [RFC PATCH 143/162] drm/i915: suspend/resume eviction Matthew Auld
2020-11-27 14:22   ` Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 144/162] drm/i915: Reset blitter context when unpark engine Matthew Auld
2020-11-27 14:26   ` Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 145/162] drm/i915/dg1: Add dedicated context for blitter eviction Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 146/162] drm/i915/pm: suspend and restore ppgtt mapping Matthew Auld
2020-11-27 14:29   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 147/162] drm/i915/gt: Allocate default ctx objects in SMEM Matthew Auld
2020-11-27 14:30   ` Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 148/162] drm/i915: suspend/resume enable blitter eviction Matthew Auld
2020-11-27 14:32   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 149/162] drm/i915: suspend/resume handling of perma-pinned objects Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 150/162] drm/i915: need consider system BO snoop for dgfx Matthew Auld
2020-11-27 14:36   ` Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 151/162] drm/i915: move eviction to prepare hook Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 152/162] drm/i915: Perform execbuffer object locking as a separate step Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 153/162] drm/i915: Implement eviction locking v2 Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 154/162] drm/i915: Support ww eviction Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 155/162] drm/i915: Use a ww transaction in the fault handler Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 156/162] drm/i915: Use a ww transaction in i915_gem_object_pin_map_unlocked() Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 157/162] drm/i915: Improve accuracy of eviction stats Matthew Auld
2020-11-27 14:40   ` [Intel-gfx] " Chris Wilson
2020-11-30 10:36     ` Tvrtko Ursulin
2020-11-27 12:07 ` [RFC PATCH 158/162] drm/i915: Support ww locks in suspend/resume Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 159/162] drm/i915/dg1: Fix mapping type for default state object Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 160/162] drm/i915/dg1: Fix GPU hang due to shmemfs page drop Matthew Auld
2020-11-27 14:44   ` [Intel-gfx] " Chris Wilson
2020-11-27 12:07 ` [RFC PATCH 161/162] drm/i915/dg1: allow pci to auto probe Matthew Auld
2020-11-27 12:07 ` [RFC PATCH 162/162] drm/i915: drop fake lmem Matthew Auld

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201127120718.454037-89-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=abdiel.janulgue@linux.intel.com \
    --cc=cq.tang@intel.com \
    --cc=daniele.ceraolospurio@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=prathap.kumar.valsan@intel.com \
    --cc=sudeep.dutt@intel.com \
    --cc=tvrtko.ursulin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).