All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH v2 11/37] drm/i915/selftests: move gpu-write-dw into utils
Date: Thu, 27 Jun 2019 21:56:07 +0100	[thread overview]
Message-ID: <20190627205633.1143-12-matthew.auld@intel.com> (raw)
In-Reply-To: <20190627205633.1143-1-matthew.auld@intel.com>

Using the gpu to write to some dword over a number of pages is rather
useful, and we already have two copies of such a thing, and we don't
want a third so move it to utils. There is probably some other stuff
also...

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
---
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 120 ++--------------
 .../drm/i915/gem/selftests/i915_gem_context.c | 134 ++---------------
 .../drm/i915/gem/selftests/igt_gem_utils.c    | 135 ++++++++++++++++++
 .../drm/i915/gem/selftests/igt_gem_utils.h    |  16 +++
 4 files changed, 169 insertions(+), 236 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index fd547b98ec69..1cdf98b7535e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -960,126 +960,22 @@ static int igt_mock_ppgtt_64K(void *arg)
 	return err;
 }
 
-static struct i915_vma *
-gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
-{
-	struct drm_i915_private *i915 = vma->vm->i915;
-	const int gen = INTEL_GEN(i915);
-	unsigned int count = vma->size >> PAGE_SHIFT;
-	struct drm_i915_gem_object *obj;
-	struct i915_vma *batch;
-	unsigned int size;
-	u32 *cmd;
-	int n;
-	int err;
-
-	size = (1 + 4 * count) * sizeof(u32);
-	size = round_up(size, PAGE_SIZE);
-	obj = i915_gem_object_create_internal(i915, size);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
-	if (IS_ERR(cmd)) {
-		err = PTR_ERR(cmd);
-		goto err;
-	}
-
-	offset += vma->node.start;
-
-	for (n = 0; n < count; n++) {
-		if (gen >= 8) {
-			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
-			*cmd++ = lower_32_bits(offset);
-			*cmd++ = upper_32_bits(offset);
-			*cmd++ = val;
-		} else if (gen >= 4) {
-			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
-				(gen < 6 ? MI_USE_GGTT : 0);
-			*cmd++ = 0;
-			*cmd++ = offset;
-			*cmd++ = val;
-		} else {
-			*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-			*cmd++ = offset;
-			*cmd++ = val;
-		}
-
-		offset += PAGE_SIZE;
-	}
-
-	*cmd = MI_BATCH_BUFFER_END;
-	intel_gt_chipset_flush(vma->vm->gt);
-
-	i915_gem_object_unpin_map(obj);
-
-	batch = i915_vma_instance(obj, vma->vm, NULL);
-	if (IS_ERR(batch)) {
-		err = PTR_ERR(batch);
-		goto err;
-	}
-
-	err = i915_vma_pin(batch, 0, 0, PIN_USER);
-	if (err)
-		goto err;
-
-	return batch;
-
-err:
-	i915_gem_object_put(obj);
-
-	return ERR_PTR(err);
-}
-
 static int gpu_write(struct i915_vma *vma,
 		     struct i915_gem_context *ctx,
 		     struct intel_engine_cs *engine,
-		     u32 dword,
-		     u32 value)
+		     u32 dw,
+		     u32 val)
 {
-	struct i915_request *rq;
-	struct i915_vma *batch;
 	int err;
 
-	GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
-	batch = gpu_write_dw(vma, dword * sizeof(u32), value);
-	if (IS_ERR(batch))
-		return PTR_ERR(batch);
-
-	rq = igt_request_alloc(ctx, engine);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto err_batch;
-	}
-
-	i915_vma_lock(batch);
-	err = i915_vma_move_to_active(batch, rq, 0);
-	i915_vma_unlock(batch);
-	if (err)
-		goto err_request;
-
-	i915_vma_lock(vma);
-	err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
-	if (err == 0)
-		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-	i915_vma_unlock(vma);
+	i915_gem_object_lock(vma->obj);
+	err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
+	i915_gem_object_unlock(vma->obj);
 	if (err)
-		goto err_request;
-
-	err = engine->emit_bb_start(rq,
-				    batch->node.start, batch->node.size,
-				    0);
-err_request:
-	if (err)
-		i915_request_skip(rq, err);
-	i915_request_add(rq);
-err_batch:
-	i915_vma_unpin(batch);
-	i915_vma_close(batch);
-	i915_vma_put(batch);
+		return err;
 
-	return err;
+	return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
+			       vma->size >> PAGE_SHIFT, val);
 }
 
 static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 53c81b5dfd69..db666106c244 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -155,70 +155,6 @@ static int live_nop_switch(void *arg)
 	return err;
 }
 
-static struct i915_vma *
-gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
-{
-	struct drm_i915_gem_object *obj;
-	const int gen = INTEL_GEN(vma->vm->i915);
-	unsigned long n, size;
-	u32 *cmd;
-	int err;
-
-	size = (4 * count + 1) * sizeof(u32);
-	size = round_up(size, PAGE_SIZE);
-	obj = i915_gem_object_create_internal(vma->vm->i915, size);
-	if (IS_ERR(obj))
-		return ERR_CAST(obj);
-
-	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
-	if (IS_ERR(cmd)) {
-		err = PTR_ERR(cmd);
-		goto err;
-	}
-
-	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
-	offset += vma->node.start;
-
-	for (n = 0; n < count; n++) {
-		if (gen >= 8) {
-			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
-			*cmd++ = lower_32_bits(offset);
-			*cmd++ = upper_32_bits(offset);
-			*cmd++ = value;
-		} else if (gen >= 4) {
-			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
-				(gen < 6 ? MI_USE_GGTT : 0);
-			*cmd++ = 0;
-			*cmd++ = offset;
-			*cmd++ = value;
-		} else {
-			*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-			*cmd++ = offset;
-			*cmd++ = value;
-		}
-		offset += PAGE_SIZE;
-	}
-	*cmd = MI_BATCH_BUFFER_END;
-	i915_gem_object_flush_map(obj);
-	i915_gem_object_unpin_map(obj);
-
-	vma = i915_vma_instance(obj, vma->vm, NULL);
-	if (IS_ERR(vma)) {
-		err = PTR_ERR(vma);
-		goto err;
-	}
-
-	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err)
-		goto err;
-
-	return vma;
-
-err:
-	i915_gem_object_put(obj);
-	return ERR_PTR(err);
-}
-
 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
 {
 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -235,10 +171,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 		    unsigned int dw)
 {
 	struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
-	struct i915_request *rq;
 	struct i915_vma *vma;
-	struct i915_vma *batch;
-	unsigned int flags;
 	int err;
 
 	GEM_BUG_ON(obj->base.size > vm->total);
@@ -249,7 +182,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 		return PTR_ERR(vma);
 
 	i915_gem_object_lock(obj);
-	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	err = i915_gem_object_set_to_gtt_domain(obj, true);
 	i915_gem_object_unlock(obj);
 	if (err)
 		return err;
@@ -258,70 +191,23 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 	if (err)
 		return err;
 
-	/* Within the GTT the huge objects maps every page onto
+	/*
+	 * Within the GTT the huge objects maps every page onto
 	 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
 	 * We set the nth dword within the page using the nth
 	 * mapping via the GTT - this should exercise the GTT mapping
 	 * whilst checking that each context provides a unique view
 	 * into the object.
 	 */
-	batch = gpu_fill_dw(vma,
-			    (dw * real_page_count(obj)) << PAGE_SHIFT |
-			    (dw * sizeof(u32)),
-			    real_page_count(obj),
-			    dw);
-	if (IS_ERR(batch)) {
-		err = PTR_ERR(batch);
-		goto err_vma;
-	}
-
-	rq = igt_request_alloc(ctx, engine);
-	if (IS_ERR(rq)) {
-		err = PTR_ERR(rq);
-		goto err_batch;
-	}
-
-	flags = 0;
-	if (INTEL_GEN(vm->i915) <= 5)
-		flags |= I915_DISPATCH_SECURE;
-
-	err = engine->emit_bb_start(rq,
-				    batch->node.start, batch->node.size,
-				    flags);
-	if (err)
-		goto err_request;
-
-	i915_vma_lock(batch);
-	err = i915_vma_move_to_active(batch, rq, 0);
-	i915_vma_unlock(batch);
-	if (err)
-		goto skip_request;
-
-	i915_vma_lock(vma);
-	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-	i915_vma_unlock(vma);
-	if (err)
-		goto skip_request;
-
-	i915_request_add(rq);
-
-	i915_vma_unpin(batch);
-	i915_vma_close(batch);
-	i915_vma_put(batch);
-
+	err = igt_gpu_fill_dw(vma,
+			      ctx,
+			      engine,
+			      (dw * real_page_count(obj)) << PAGE_SHIFT |
+			      (dw * sizeof(u32)),
+			      real_page_count(obj),
+			      dw);
 	i915_vma_unpin(vma);
 
-	return 0;
-
-skip_request:
-	i915_request_skip(rq, err);
-err_request:
-	i915_request_add(rq);
-err_batch:
-	i915_vma_unpin(batch);
-	i915_vma_put(batch);
-err_vma:
-	i915_vma_unpin(vma);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index b232e6d2cd92..dc47a6e2586c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -9,6 +9,8 @@
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
+#include "i915_vma.h"
+#include "i915_drv.h"
 
 #include "i915_request.h"
 
@@ -32,3 +34,136 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 
 	return rq;
 }
+
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+		  u64 offset,
+		  unsigned long count,
+		  u32 val)
+{
+	struct drm_i915_gem_object *obj;
+	const int gen = INTEL_GEN(vma->vm->i915);
+	unsigned long n, size;
+	u32 *cmd;
+	int err;
+
+	size = (4 * count + 1) * sizeof(u32);
+	size = round_up(size, PAGE_SIZE);
+	obj = i915_gem_object_create_internal(vma->vm->i915, size);
+	if (IS_ERR(obj))
+		return ERR_CAST(obj);
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+	offset += vma->node.start;
+
+	for (n = 0; n < count; n++) {
+		if (gen >= 8) {
+			*cmd++ = MI_STORE_DWORD_IMM_GEN4;
+			*cmd++ = lower_32_bits(offset);
+			*cmd++ = upper_32_bits(offset);
+			*cmd++ = val;
+		} else if (gen >= 4) {
+			*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+				(gen < 6 ? MI_USE_GGTT : 0);
+			*cmd++ = 0;
+			*cmd++ = offset;
+			*cmd++ = val;
+		} else {
+			*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+			*cmd++ = offset;
+			*cmd++ = val;
+		}
+		offset += PAGE_SIZE;
+	}
+	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(obj);
+
+	vma = i915_vma_instance(obj, vma->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER);
+	if (err)
+		goto err;
+
+	return vma;
+
+err:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+		    struct i915_gem_context *ctx,
+		    struct intel_engine_cs *engine,
+		    u64 offset,
+		    unsigned long count,
+		    u32 val)
+{
+	struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
+	struct i915_request *rq;
+	struct i915_vma *batch;
+	unsigned int flags;
+	int err;
+
+	GEM_BUG_ON(vma->size > vm->total);
+	GEM_BUG_ON(!intel_engine_can_store_dword(engine));
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+
+	batch = igt_emit_store_dw(vma, offset, count, val);
+	if (IS_ERR(batch))
+		return PTR_ERR(batch);
+
+	rq = igt_request_alloc(ctx, engine);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_batch;
+	}
+
+	flags = 0;
+	if (INTEL_GEN(vm->i915) <= 5)
+		flags |= I915_DISPATCH_SECURE;
+
+	err = engine->emit_bb_start(rq,
+				    batch->node.start, batch->node.size,
+				    flags);
+	if (err)
+		goto err_request;
+
+	i915_vma_lock(batch);
+	err = i915_vma_move_to_active(batch, rq, 0);
+	i915_vma_unlock(batch);
+	if (err)
+		goto skip_request;
+
+	i915_vma_lock(vma);
+	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	i915_vma_unlock(vma);
+	if (err)
+		goto skip_request;
+
+	i915_request_add(rq);
+
+	i915_vma_unpin(batch);
+	i915_vma_close(batch);
+	i915_vma_put(batch);
+
+	return 0;
+
+skip_request:
+	i915_request_skip(rq, err);
+err_request:
+	i915_request_add(rq);
+err_batch:
+	i915_vma_unpin(batch);
+	i915_vma_put(batch);
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 0f17251cf75d..361a7ef866b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -7,11 +7,27 @@
 #ifndef __IGT_GEM_UTILS_H__
 #define __IGT_GEM_UTILS_H__
 
+#include <linux/types.h>
+
 struct i915_request;
 struct i915_gem_context;
 struct intel_engine_cs;
+struct i915_vma;
 
 struct i915_request *
 igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
 
+struct i915_vma *
+igt_emit_store_dw(struct i915_vma *vma,
+		  u64 offset,
+		  unsigned long count,
+		  u32 val);
+
+int igt_gpu_fill_dw(struct i915_vma *vma,
+		    struct i915_gem_context *ctx,
+		    struct intel_engine_cs *engine,
+		    u64 offset,
+		    unsigned long count,
+		    u32 val);
+
 #endif /* __IGT_GEM_UTILS_H__ */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-06-27 20:56 UTC|newest]

Thread overview: 88+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-27 20:55 [PATCH v2 00/37] Introduce memory region concept (including device local memory) Matthew Auld
2019-06-27 20:55 ` [PATCH v2 01/37] drm/i915: buddy allocator Matthew Auld
2019-06-27 22:28   ` Chris Wilson
2019-06-28  9:35   ` Chris Wilson
2019-06-27 20:55 ` [PATCH v2 02/37] drm/i915: introduce intel_memory_region Matthew Auld
2019-06-27 22:47   ` Chris Wilson
2019-06-28  8:09   ` Chris Wilson
2019-06-27 20:55 ` [PATCH v2 03/37] drm/i915/region: support basic eviction Matthew Auld
2019-06-27 22:59   ` Chris Wilson
2019-07-30 16:26   ` Daniel Vetter
2019-08-15 10:48     ` Matthew Auld
2019-08-15 14:26       ` Daniel Vetter
2019-08-15 14:34         ` Daniel Vetter
2019-08-15 14:57         ` Tang, CQ
2019-08-15 16:20           ` Daniel Vetter
2019-08-15 16:35             ` Tang, CQ
2019-08-15 15:26       ` Chris Wilson
2019-08-15 16:23         ` Daniel Vetter
2019-06-27 20:56 ` [PATCH v2 04/37] drm/i915/region: support continuous allocations Matthew Auld
2019-06-27 23:01   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 05/37] drm/i915/region: support volatile objects Matthew Auld
2019-06-27 23:03   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 06/37] drm/i915: Add memory region information to device_info Matthew Auld
2019-06-27 23:05   ` Chris Wilson
2019-06-27 23:08   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 07/37] drm/i915: support creating LMEM objects Matthew Auld
2019-06-27 23:11   ` Chris Wilson
2019-06-27 23:16   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 08/37] drm/i915: setup io-mapping for LMEM Matthew Auld
2019-06-27 20:56 ` [PATCH v2 09/37] drm/i915/lmem: support kernel mapping Matthew Auld
2019-06-27 23:27   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 10/37] drm/i915/blt: support copying objects Matthew Auld
2019-06-27 23:35   ` Chris Wilson
2019-06-27 20:56 ` Matthew Auld [this message]
2019-06-27 20:56 ` [PATCH v2 12/37] drm/i915/selftests: add write-dword test for LMEM Matthew Auld
2019-06-27 20:56 ` [PATCH v2 13/37] drm/i915/selftests: don't just test CACHE_NONE for huge-pages Matthew Auld
2019-06-27 23:40   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 14/37] drm/i915/selftest: extend coverage to include LMEM huge-pages Matthew Auld
2019-06-27 23:42   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 15/37] drm/i915/lmem: support CPU relocations Matthew Auld
2019-06-27 23:46   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 16/37] drm/i915/lmem: support pread Matthew Auld
2019-06-27 23:50   ` Chris Wilson
2019-07-30  8:58   ` Daniel Vetter
2019-07-30  9:25     ` Matthew Auld
2019-07-30  9:50       ` Daniel Vetter
2019-07-30 12:05     ` Chris Wilson
2019-07-30 12:42       ` Daniel Vetter
2019-06-27 20:56 ` [PATCH v2 17/37] drm/i915/lmem: support pwrite Matthew Auld
2019-06-27 20:56 ` [PATCH v2 18/37] drm/i915: enumerate and init each supported region Matthew Auld
2019-06-27 20:56 ` [PATCH v2 19/37] drm/i915: treat shmem as a region Matthew Auld
2019-06-27 23:55   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 20/37] drm/i915: treat stolen " Matthew Auld
2019-06-27 20:56 ` [PATCH v2 21/37] drm/i915: define HAS_MAPPABLE_APERTURE Matthew Auld
2019-06-27 20:56 ` [PATCH v2 22/37] drm/i915: do not map aperture if it is not available Matthew Auld
2019-06-27 20:56 ` [PATCH v2 23/37] drm/i915: expose missing map_gtt support to users Matthew Auld
2019-06-27 23:59   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 24/37] drm/i915: set num_fence_regs to 0 if there is no aperture Matthew Auld
2019-06-28  0:00   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 25/37] drm/i915/selftests: check for missing aperture Matthew Auld
2019-06-27 20:56 ` [PATCH v2 26/37] drm/i915: error capture with no ggtt slot Matthew Auld
2019-06-27 20:56 ` [PATCH v2 27/37] drm/i915: Don't try to place HWS in non-existing mappable region Matthew Auld
2019-06-27 20:56 ` [PATCH v2 28/37] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core Matthew Auld
2019-06-28  0:05   ` Chris Wilson
2019-06-28  0:08   ` Chris Wilson
2019-06-28  0:09   ` Chris Wilson
2019-06-28  0:10   ` Chris Wilson
2019-06-27 20:56 ` [PATCH v2 29/37] drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET Matthew Auld
2019-06-28  0:12   ` Chris Wilson
2019-07-30  9:49   ` Daniel Vetter
2019-07-30 14:28     ` Matthew Auld
2019-07-30 16:22       ` Daniel Vetter
2019-08-12 16:18         ` Daniel Vetter
2019-06-27 20:56 ` [PATCH v2 30/37] drm/i915/lmem: add helper to get CPU accessible offset Matthew Auld
2019-06-27 20:56 ` [PATCH v2 31/37] drm/i915: Add cpu and lmem fault handlers Matthew Auld
2019-06-27 20:56 ` [PATCH v2 32/37] drm/i915: cpu-map based dumb buffers Matthew Auld
2019-06-27 20:56 ` [PATCH v2 33/37] drm/i915: support basic object migration Matthew Auld
2019-06-27 20:56 ` [PATCH v2 34/37] drm/i915: Introduce GEM_OBJECT_SETPARAM with I915_PARAM_MEMORY_REGION Matthew Auld
2019-06-28  0:22   ` Chris Wilson
2019-06-28  5:53   ` Tvrtko Ursulin
2019-07-30 16:17   ` Daniel Vetter
2019-06-27 20:56 ` [PATCH v2 35/37] drm/i915/query: Expose memory regions through the query uAPI Matthew Auld
2019-06-28  5:59   ` Tvrtko Ursulin
2019-06-27 20:56 ` [PATCH v2 36/37] HAX drm/i915: add the fake lmem region Matthew Auld
2019-06-27 20:56 ` [PATCH v2 37/37] HAX drm/i915/lmem: default userspace allocations to LMEM Matthew Auld
2019-06-27 21:36 ` ✗ Fi.CI.CHECKPATCH: warning for Introduce memory region concept (including device local memory) (rev2) Patchwork
2019-06-27 21:50 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-28  9:59 ` ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190627205633.1143-12-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.