All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/i915/selftests: Test vm isolation
@ 2018-10-29 10:59 Chris Wilson
  2018-10-29 10:59 ` [PATCH 2/4] drm/i915/gtt: Record the scratch pte Chris Wilson
                   ` (9 more replies)
  0 siblings, 10 replies; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 10:59 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

The vm of two contexts are supposed to be independent, such that a stray
write by one cannot be detected by another. Normally the GTT is filled
explicitly by userspace, but the space in between objects is filled with
a scratch page -- and that scratch page should be able to form an
inter-context backchannel.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 .../gpu/drm/i915/selftests/i915_gem_context.c | 318 ++++++++++++++++++
 1 file changed, 318 insertions(+)

diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 1be3b67a7c48..067303446a1c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -760,6 +760,323 @@ static int igt_ctx_readonly(void *arg)
 	return err;
 }
 
+static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+{
+	struct drm_mm_node *node =
+		__drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+					offset, offset + sizeof(u32) - 1);
+	if (!node || node->start > offset)
+		return 0;
+
+	GEM_BUG_ON(offset >= node->start + node->size);
+
+	pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
+	       upper_32_bits(offset), lower_32_bits(offset));
+	return -EINVAL;
+}
+
+static int write_to_scratch(struct i915_gem_context *ctx,
+			    struct intel_engine_cs *engine,
+			    u64 offset, u32 value)
+{
+	struct drm_i915_private *i915 = ctx->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	u32 *cmd;
+	int err;
+
+	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	*cmd++ = MI_STORE_DWORD_IMM_GEN4;
+	if (INTEL_GEN(i915) >= 8) {
+		*cmd++ = lower_32_bits(offset);
+		*cmd++ = upper_32_bits(offset);
+	} else {
+		*cmd++ = 0;
+		*cmd++ = offset;
+	}
+	*cmd++ = value;
+	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(obj);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err)
+		goto err;
+
+	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+	if (err)
+		goto err;
+
+	err = check_scratch(ctx, offset);
+	if (err)
+		goto err_unpin;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_unpin;
+	}
+
+	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+	if (err)
+		goto err_request;
+
+	err = i915_vma_move_to_active(vma, rq, 0);
+	if (err)
+		goto skip_request;
+
+	i915_gem_object_set_active_reference(obj);
+	i915_vma_unpin(vma);
+	i915_vma_close(vma);
+
+	i915_request_add(rq);
+
+	return 0;
+
+skip_request:
+	i915_request_skip(rq, err);
+err_request:
+	i915_request_add(rq);
+err_unpin:
+	i915_vma_unpin(vma);
+err:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static int read_from_scratch(struct i915_gem_context *ctx,
+			     struct intel_engine_cs *engine,
+			     u64 offset, u32 *value)
+{
+	struct drm_i915_private *i915 = ctx->i915;
+	struct drm_i915_gem_object *obj;
+	struct i915_request *rq;
+	struct i915_vma *vma;
+	const u32 RCS_GPR0 = 0x2600;
+	const u32 result = 0x100;
+	u32 *cmd;
+	int err;
+
+	GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+	obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	memset(cmd, POISON_INUSE, PAGE_SIZE);
+	if (INTEL_GEN(i915) >= 8) {
+		*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = lower_32_bits(offset);
+		*cmd++ = upper_32_bits(offset);
+		*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = result;
+		*cmd++ = 0;
+	} else {
+		*cmd++ = MI_LOAD_REGISTER_MEM;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = offset;
+		*cmd++ = MI_STORE_REGISTER_MEM;
+		*cmd++ = RCS_GPR0;
+		*cmd++ = result;
+	}
+	*cmd = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(obj);
+
+	err = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (err)
+		goto err;
+
+	vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto err;
+	}
+
+	err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+	if (err)
+		goto err;
+
+	err = check_scratch(ctx, offset);
+	if (err)
+		goto err_unpin;
+
+	rq = i915_request_alloc(engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto err_unpin;
+	}
+
+	err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+	if (err)
+		goto err_request;
+
+	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+	if (err)
+		goto skip_request;
+
+	i915_vma_unpin(vma);
+	i915_vma_close(vma);
+
+	i915_request_add(rq);
+
+	err = i915_gem_object_set_to_cpu_domain(obj, false);
+	if (err)
+		goto err;
+
+	cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto err;
+	}
+
+	*value = cmd[result / sizeof(*cmd)];
+	i915_gem_object_unpin_map(obj);
+	i915_gem_object_put(obj);
+
+	return 0;
+
+skip_request:
+	i915_request_skip(rq, err);
+err_request:
+	i915_request_add(rq);
+err_unpin:
+	i915_vma_unpin(vma);
+err:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static int igt_vm_isolation(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *ctx_a, *ctx_b;
+	struct intel_engine_cs *engine;
+	struct drm_file *file;
+	I915_RND_STATE(prng);
+	unsigned long count;
+	struct live_test t;
+	unsigned int id;
+	u64 vm_total;
+	int err;
+
+	if (INTEL_GEN(i915) < 7)
+		return 0;
+
+	/*
+	 * The simple goal here is that a write into one context is not
+	 * observed in a second (separate page tables and scratch).
+	 */
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	err = begin_live_test(&t, i915, __func__, "");
+	if (err)
+		goto out_unlock;
+
+	ctx_a = i915_gem_create_context(i915, file->driver_priv);
+	if (IS_ERR(ctx_a)) {
+		err = PTR_ERR(ctx_a);
+		goto out_unlock;
+	}
+
+	ctx_b = i915_gem_create_context(i915, file->driver_priv);
+	if (IS_ERR(ctx_b)) {
+		err = PTR_ERR(ctx_b);
+		goto out_unlock;
+	}
+
+	/* We can only test vm isolation, if the vm are distinct */
+	if (ctx_a->ppgtt == ctx_b->ppgtt)
+		goto out_unlock;
+
+	vm_total = ctx_a->ppgtt->vm.total;
+	GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+	vm_total -= I915_GTT_PAGE_SIZE;
+
+	intel_runtime_pm_get(i915);
+
+	count = 0;
+	for_each_engine(engine, i915, id) {
+		IGT_TIMEOUT(end_time);
+		unsigned long this = 0;
+
+		if (!intel_engine_can_store_dword(engine))
+			continue;
+
+		while (!__igt_timeout(end_time, NULL)) {
+			u32 value = 0xc5c5c5c5;
+			u64 offset;
+
+			div64_u64_rem(i915_prandom_u64_state(&prng),
+				      vm_total, &offset);
+			offset &= ~sizeof(u32);
+			offset += I915_GTT_PAGE_SIZE;
+
+			err = write_to_scratch(ctx_a, engine,
+					       offset, 0xdeadbeef);
+			if (err == 0)
+				err = read_from_scratch(ctx_b, engine,
+							offset, &value);
+			if (err)
+				goto out_rpm;
+
+			if (value) {
+				pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
+				       engine->name, value,
+				       upper_32_bits(offset),
+				       lower_32_bits(offset),
+				       this);
+				err = -EINVAL;
+				goto out_rpm;
+			}
+
+			this++;
+		}
+		count += this;
+	}
+	pr_info("Checked %lu scratch offsets across %d engines\n",
+		count, INTEL_INFO(i915)->num_rings);
+
+out_rpm:
+	intel_runtime_pm_put(i915);
+out_unlock:
+	if (end_live_test(&t))
+		err = -EIO;
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	mock_file_free(i915, file);
+	return err;
+}
+
 static __maybe_unused const char *
 __engine_name(struct drm_i915_private *i915, unsigned int engines)
 {
@@ -915,6 +1232,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
 		SUBTEST(live_nop_switch),
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
+		SUBTEST(igt_vm_isolation),
 	};
 
 	if (i915_terminally_wedged(&dev_priv->gpu_error))
-- 
2.19.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/4] drm/i915/gtt: Record the scratch pte
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
@ 2018-10-29 10:59 ` Chris Wilson
  2018-10-29 12:50   ` Matthew Auld
  2018-10-29 13:06   ` [PATCH v2] " Chris Wilson
  2018-10-29 10:59 ` [PATCH 3/4] drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories Chris Wilson
                   ` (8 subsequent siblings)
  9 siblings, 2 replies; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 10:59 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

Record the scratch PTE encoding upon creation rather than recomputing
the bits everytime. This is important for the next patch where we forgo
having a valid scratch page with which we may compute the bits and so
require keeping the PTE value instead.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 83 +++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_gtt.h | 11 ++--
 2 files changed, 49 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 19b2d991b5d8..a640f8e39511 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -186,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 }
 
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
 {
 	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 
@@ -225,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 #define gen8_pdpe_encode gen8_pde_encode
 #define gen8_pml4e_encode gen8_pde_encode
 
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -247,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -271,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -287,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -300,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -666,14 +666,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
 static void gen8_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	fill_px(vm, pt,
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+	fill_px(vm, pt, vm->scratch_pte);
 }
 
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+	fill32_px(vm, pt, vm->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -807,15 +806,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 /* Removes entries from a single page table, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries.
  */
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 				struct i915_page_table *pt,
 				u64 start, u64 length)
 {
 	unsigned int num_entries = gen8_pte_count(start, length);
 	unsigned int pte = gen8_pte_index(start);
 	unsigned int pte_end = pte + num_entries;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 	gen8_pte_t *vaddr;
 
 	GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -826,7 +823,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 
 	vaddr = kmap_atomic_px(pt);
 	while (pte < pte_end)
-		vaddr[pte++] = scratch_pte;
+		vaddr[pte++] = vm->scratch_pte;
 	kunmap_atomic(vaddr);
 
 	return false;
@@ -1216,6 +1213,11 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	if (ret)
 		return ret;
 
+	vm->scratch_pte =
+		gen8_pte_encode(vm->scratch_page.daddr,
+				I915_CACHE_LLC,
+				PTE_READ_ONLY);
+
 	vm->scratch_pt = alloc_pt(vm);
 	if (IS_ERR(vm->scratch_pt)) {
 		ret = PTR_ERR(vm->scratch_pt);
@@ -1524,8 +1526,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
 	struct i915_address_space *vm = &ppgtt->vm;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+	const gen8_pte_t scratch_pte = vm->scratch_pte;
 	u64 start = 0, length = ppgtt->vm.total;
 
 	if (use_4lvl(vm)) {
@@ -1672,7 +1673,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-	const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+	const gen6_pte_t scratch_pte = base->vm.scratch_pte;
 	struct i915_page_table *pt;
 	u32 pte, pde;
 
@@ -1785,7 +1786,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned int pde = first_entry / GEN6_PTES;
 	unsigned int pte = first_entry % GEN6_PTES;
 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+	const gen6_pte_t scratch_pte = vm->scratch_pte;
 
 	while (num_entries) {
 		struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1876,7 +1877,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 			if (IS_ERR(pt))
 				goto unwind_out;
 
-			gen6_initialize_pt(ppgtt, pt);
+			gen6_initialize_pt(vm, pt);
 			ppgtt->base.pd.page_table[pde] = pt;
 
 			if (i915_vma_is_bound(ppgtt->vma,
@@ -1914,9 +1915,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 	if (ret)
 		return ret;
 
-	ppgtt->scratch_pte =
-		vm->pte_encode(vm->scratch_page.daddr,
-			       I915_CACHE_NONE, PTE_READ_ONLY);
+	vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+					 I915_CACHE_NONE,
+					 PTE_READ_ONLY);
 
 	vm->scratch_pt = alloc_pt(vm);
 	if (IS_ERR(vm->scratch_pt)) {
@@ -1924,7 +1925,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 		return PTR_ERR(vm->scratch_pt);
 	}
 
-	gen6_initialize_pt(ppgtt, vm->scratch_pt);
+	gen6_initialize_pt(vm, vm->scratch_pt);
 	gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
 		ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
@@ -2469,8 +2470,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
 	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+	const gen8_pte_t scratch_pte = vm->scratch_pte;
 	gen8_pte_t __iomem *gtt_base =
 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2595,8 +2595,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, 0);
+	scratch_pte = vm->scratch_pte;
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -3002,6 +3001,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 		return ret;
 	}
 
+	ggtt->vm.scratch_pte =
+		ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+				    I915_CACHE_NONE, 0);
+
 	return 0;
 }
 
@@ -3348,6 +3351,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
+	ggtt->vm.pte_encode = gen8_pte_encode;
+
 	setup_private_pat(dev_priv);
 
 	return ggtt_probe_common(ggtt, size);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 5d2c5ba55ad8..a030299b79cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,7 @@ struct i915_address_space {
 
 	struct mutex mutex; /* protects vma and our lists */
 
+	u64 scratch_pte;
 	struct i915_page_dma scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -335,12 +336,11 @@ struct i915_address_space {
 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
 	bool has_read_only:1;
 
-	/* FIXME: Need a more generic return type */
-	gen6_pte_t (*pte_encode)(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 flags); /* Create a valid PTE */
-	/* flags for pte_encode */
+	u64 (*pte_encode)(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags); /* Create a valid PTE */
 #define PTE_READ_ONLY	(1<<0)
+
 	int (*allocate_va_range)(struct i915_address_space *vm,
 				 u64 start, u64 length);
 	void (*clear_range)(struct i915_address_space *vm,
@@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
 
 	struct i915_vma *vma;
 	gen6_pte_t __iomem *pd_addr;
-	gen6_pte_t scratch_pte;
 
 	unsigned int pin_count;
 	bool scan_for_unused_pt;
-- 
2.19.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/4] drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
  2018-10-29 10:59 ` [PATCH 2/4] drm/i915/gtt: Record the scratch pte Chris Wilson
@ 2018-10-29 10:59 ` Chris Wilson
  2018-10-29 12:50   ` Matthew Auld
  2018-10-29 10:59 ` [PATCH 4/4] drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start Chris Wilson
                   ` (7 subsequent siblings)
  9 siblings, 1 reply; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 10:59 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

If we can prevent stray writes from landing in the scratch page, we can
reuse the same page and same scratch PT for all contexts without fear of
information leaks and side-channels.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 30 +++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a640f8e39511..cd8454e90e38 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -580,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
 	 * region, including any PTEs which happen to point to scratch.
 	 *
 	 * This is only relevant for the 48b PPGTT where we support
-	 * huge-gtt-pages, see also i915_vma_insert().
-	 *
-	 * TODO: we should really consider write-protecting the scratch-page and
-	 * sharing between ppgtt
+	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+	 * scratch (read-only) between all vm, we create one 64k scratch page
+	 * for all.
 	 */
 	size = I915_GTT_PAGE_SIZE_4K;
 	if (i915_vm_is_48bit(vm) &&
@@ -1209,6 +1208,26 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 {
 	int ret;
 
+	/*
+	 * If everybody agrees to not to write into the scratch page,
+	 * we can reuse it for all vm and separate contexts and processes.
+	 */
+	if (vm->has_read_only &&
+	    vm->i915->kernel_context &&
+	    vm->i915->kernel_context->ppgtt) {
+		struct i915_address_space *clone =
+			&vm->i915->kernel_context->ppgtt->vm;
+
+		GEM_BUG_ON(!clone->has_read_only);
+
+		vm->scratch_page.order = clone->scratch_page.order;
+		vm->scratch_pte = clone->scratch_pte;
+		vm->scratch_pt  = clone->scratch_pt;
+		vm->scratch_pd  = clone->scratch_pd;
+		vm->scratch_pdp = clone->scratch_pdp;
+		return 0;
+	}
+
 	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
 	if (ret)
 		return ret;
@@ -1289,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
 
 static void gen8_free_scratch(struct i915_address_space *vm)
 {
+	if (!vm->scratch_page.daddr)
+		return;
+
 	if (use_4lvl(vm))
 		free_pdp(vm, vm->scratch_pdp);
 	free_pd(vm, vm->scratch_pd);
-- 
2.19.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 4/4] drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
  2018-10-29 10:59 ` [PATCH 2/4] drm/i915/gtt: Record the scratch pte Chris Wilson
  2018-10-29 10:59 ` [PATCH 3/4] drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories Chris Wilson
@ 2018-10-29 10:59 ` Chris Wilson
  2018-10-29 11:02 ` [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 10:59 UTC (permalink / raw)
  To: intel-gfx

Since commit 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu
notifiers") we have been able to report failure from
mmu_invalidate_range_start which allows us to use a trylock on the
struct_mutex to avoid potential recursion and report -EBUSY instead.
Furthermore, this allows us to pull the work into the main callback and
avoid the sleight-of-hand in using a workqueue to avoid lockdep.

However, not all paths to mmu_invalidate_range_start are prepared to
handle failure, so instead of reporting the recursion, deal with it.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=108375
References: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |   4 +-
 drivers/gpu/drm/i915/i915_gem.c         |  18 +-
 drivers/gpu/drm/i915/i915_gem_object.h  |   7 +
 drivers/gpu/drm/i915/i915_gem_userptr.c | 217 +++++++++++-------------
 4 files changed, 120 insertions(+), 126 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2d7761b8ac07..c3b94c3f930f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3076,8 +3076,8 @@ enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
 	I915_MM_SHRINKER
 };
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				 enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				enum i915_mm_subclass subclass);
 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
 
 enum i915_map_type {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 93d09282710d..9a8af9454a53 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2429,8 +2429,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 	struct sg_table *pages;
 
 	pages = fetch_and_zero(&obj->mm.pages);
-	if (!pages)
-		return NULL;
+	if (IS_ERR_OR_NULL(pages))
+		return pages;
 
 	spin_lock(&i915->mm.obj_lock);
 	list_del(&obj->mm.link);
@@ -2454,17 +2454,16 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 	return pages;
 }
 
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
-				 enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+				enum i915_mm_subclass subclass)
 {
 	struct sg_table *pages;
+	int ret = -EBUSY;
 
 	if (i915_gem_object_has_pinned_pages(obj))
-		return;
+		return -EBUSY;
 
 	GEM_BUG_ON(obj->bind_count);
-	if (!i915_gem_object_has_pages(obj))
-		return;
 
 	/* May be called by shrinker from within get_pages() (on another bo) */
 	mutex_lock_nested(&obj->mm.lock, subclass);
@@ -2477,11 +2476,16 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	 * lists early.
 	 */
 	pages = __i915_gem_object_unset_pages(obj);
+	if (!pages && !i915_gem_object_needs_async_cancel(obj))
+		pages = ERR_PTR(-EINVAL);
 	if (!IS_ERR(pages))
 		obj->ops->put_pages(obj, pages);
 
+	ret = 0;
 unlock:
 	mutex_unlock(&obj->mm.lock);
+
+	return ret;
 }
 
 bool i915_sg_trim(struct sg_table *orig_st)
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index a6dd7c46de0d..49ce797173b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -56,6 +56,7 @@ struct drm_i915_gem_object_ops {
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE	BIT(0)
 #define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(1)
 #define I915_GEM_OBJECT_IS_PROXY	BIT(2)
+#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(3)
 
 	/* Interface between the GEM object and its backing storage.
 	 * get_pages() is called once prior to the use of the associated set
@@ -386,6 +387,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 	return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
 }
 
+static inline bool
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+{
+	return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+}
+
 static inline bool
 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2c9b284036d1..ab5ae426e27b 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -50,79 +50,84 @@ struct i915_mmu_notifier {
 	struct hlist_node node;
 	struct mmu_notifier mn;
 	struct rb_root_cached objects;
-	struct workqueue_struct *wq;
+	struct i915_mm_struct *mm;
 };
 
 struct i915_mmu_object {
 	struct i915_mmu_notifier *mn;
 	struct drm_i915_gem_object *obj;
 	struct interval_tree_node it;
-	struct list_head link;
-	struct work_struct work;
-	bool attached;
 };
 
-static void cancel_userptr(struct work_struct *work)
-{
-	struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
-	struct drm_i915_gem_object *obj = mo->obj;
-	struct work_struct *active;
-
-	/* Cancel any active worker and force us to re-evaluate gup */
-	mutex_lock(&obj->mm.lock);
-	active = fetch_and_zero(&obj->userptr.work);
-	mutex_unlock(&obj->mm.lock);
-	if (active)
-		goto out;
-
-	i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
-
-	mutex_lock(&obj->base.dev->struct_mutex);
-
-	/* We are inside a kthread context and can't be interrupted */
-	if (i915_gem_object_unbind(obj) == 0)
-		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-	WARN_ONCE(i915_gem_object_has_pages(obj),
-		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
-		  obj->bind_count,
-		  atomic_read(&obj->mm.pages_pin_count),
-		  obj->pin_global);
-
-	mutex_unlock(&obj->base.dev->struct_mutex);
-
-out:
-	i915_gem_object_put(obj);
-}
-
 static void add_object(struct i915_mmu_object *mo)
 {
-	if (mo->attached)
+	if (!RB_EMPTY_NODE(&mo->it.rb))
 		return;
 
 	interval_tree_insert(&mo->it, &mo->mn->objects);
-	mo->attached = true;
 }
 
 static void del_object(struct i915_mmu_object *mo)
 {
-	if (!mo->attached)
+	if (RB_EMPTY_NODE(&mo->it.rb))
 		return;
 
 	interval_tree_remove(&mo->it, &mo->mn->objects);
-	mo->attached = false;
+	RB_CLEAR_NODE(&mo->it.rb);
+}
+
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
+{
+	struct i915_mmu_object *mo = obj->userptr.mmu_object;
+
+	/*
+	 * During mm_invalidate_range we need to cancel any userptr that
+	 * overlaps the range being invalidated. Doing so requires the
+	 * struct_mutex, and that risks recursion. In order to cause
+	 * recursion, the user must alias the userptr address space with
+	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+	 * to invalidate that mmaping, mm_invalidate_range is called with
+	 * the userptr address *and* the struct_mutex held.  To prevent that
+	 * we set a flag under the i915_mmu_notifier spinlock to indicate
+	 * whether this object is valid.
+	 */
+	if (!mo)
+		return;
+
+	spin_lock(&mo->mn->lock);
+	if (value)
+		add_object(mo);
+	else
+		del_object(mo);
+	spin_unlock(&mo->mn->lock);
+}
+
+static struct mutex *__i915_mutex_lock_recursive(struct mutex *m)
+{
+	switch (mutex_trylock_recursive(m)) {
+	default:
+	case MUTEX_TRYLOCK_FAILED:
+		mutex_lock(m);
+	case MUTEX_TRYLOCK_SUCCESS:
+		return m;
+
+	case MUTEX_TRYLOCK_RECURSIVE:
+		return ERR_PTR(-EEXIST);
+	}
 }
 
 static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
-						       struct mm_struct *mm,
-						       unsigned long start,
-						       unsigned long end,
-						       bool blockable)
+						      struct mm_struct *mm,
+						      unsigned long start,
+						      unsigned long end,
+						      bool blockable)
 {
 	struct i915_mmu_notifier *mn =
 		container_of(_mn, struct i915_mmu_notifier, mn);
-	struct i915_mmu_object *mo;
 	struct interval_tree_node *it;
-	LIST_HEAD(cancelled);
+	struct mutex *unlock = NULL;
+	int ret = 0;
 
 	if (RB_EMPTY_ROOT(&mn->objects.rb_root))
 		return 0;
@@ -133,11 +138,15 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 	spin_lock(&mn->lock);
 	it = interval_tree_iter_first(&mn->objects, start, end);
 	while (it) {
+		struct drm_i915_gem_object *obj;
+
 		if (!blockable) {
-			spin_unlock(&mn->lock);
-			return -EAGAIN;
+			ret = -EAGAIN;
+			break;
 		}
-		/* The mmu_object is released late when destroying the
+
+		/*
+		 * The mmu_object is released late when destroying the
 		 * GEM object so it is entirely possible to gain a
 		 * reference on an object in the process of being freed
 		 * since our serialisation is via the spinlock and not
@@ -146,21 +155,33 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
 		 * use-after-free we only acquire a reference on the
 		 * object if it is not in the process of being destroyed.
 		 */
-		mo = container_of(it, struct i915_mmu_object, it);
-		if (kref_get_unless_zero(&mo->obj->base.refcount))
-			queue_work(mn->wq, &mo->work);
-
-		list_add(&mo->link, &cancelled);
-		it = interval_tree_iter_next(it, start, end);
+		obj = container_of(it, struct i915_mmu_object, it)->obj;
+		if (!kref_get_unless_zero(&obj->base.refcount)) {
+			it = interval_tree_iter_next(it, start, end);
+			continue;
+		}
+		spin_unlock(&mn->lock);
+
+		if (!unlock)
+			unlock = __i915_mutex_lock_recursive(&mn->mm->i915->drm.struct_mutex);
+		ret = i915_gem_object_unbind(obj);
+		if (ret == 0)
+			ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+		i915_gem_object_put(obj);
+		if (ret)
+			goto unlock;
+
+		spin_lock(&mn->lock);
+		it = interval_tree_iter_first(&mn->objects, start, end);
 	}
-	list_for_each_entry(mo, &cancelled, link)
-		del_object(mo);
 	spin_unlock(&mn->lock);
 
-	if (!list_empty(&cancelled))
-		flush_workqueue(mn->wq);
+unlock:
+	if (!IS_ERR_OR_NULL(unlock))
+		mutex_unlock(unlock);
+
+	return ret;
 
-	return 0;
 }
 
 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -168,7 +189,7 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
 };
 
 static struct i915_mmu_notifier *
-i915_mmu_notifier_create(struct mm_struct *mm)
+i915_mmu_notifier_create(struct i915_mm_struct *mm)
 {
 	struct i915_mmu_notifier *mn;
 
@@ -179,13 +200,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
 	spin_lock_init(&mn->lock);
 	mn->mn.ops = &i915_gem_userptr_notifier;
 	mn->objects = RB_ROOT_CACHED;
-	mn->wq = alloc_workqueue("i915-userptr-release",
-				 WQ_UNBOUND | WQ_MEM_RECLAIM,
-				 0);
-	if (mn->wq == NULL) {
-		kfree(mn);
-		return ERR_PTR(-ENOMEM);
-	}
+	mn->mm = mm;
 
 	return mn;
 }
@@ -195,16 +210,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
 	struct i915_mmu_object *mo;
 
-	mo = obj->userptr.mmu_object;
-	if (mo == NULL)
+	mo = fetch_and_zero(&obj->userptr.mmu_object);
+	if (!mo)
 		return;
 
 	spin_lock(&mo->mn->lock);
 	del_object(mo);
 	spin_unlock(&mo->mn->lock);
 	kfree(mo);
-
-	obj->userptr.mmu_object = NULL;
 }
 
 static struct i915_mmu_notifier *
@@ -217,7 +230,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
 	if (mn)
 		return mn;
 
-	mn = i915_mmu_notifier_create(mm->mm);
+	mn = i915_mmu_notifier_create(mm);
 	if (IS_ERR(mn))
 		err = PTR_ERR(mn);
 
@@ -240,10 +253,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
 	mutex_unlock(&mm->i915->mm_lock);
 	up_write(&mm->mm->mmap_sem);
 
-	if (mn && !IS_ERR(mn)) {
-		destroy_workqueue(mn->wq);
+	if (mn && !IS_ERR(mn))
 		kfree(mn);
-	}
 
 	return err ? ERR_PTR(err) : mm->mn;
 }
@@ -266,14 +277,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 		return PTR_ERR(mn);
 
 	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
-	if (mo == NULL)
+	if (!mo)
 		return -ENOMEM;
 
 	mo->mn = mn;
 	mo->obj = obj;
 	mo->it.start = obj->userptr.ptr;
 	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
-	INIT_WORK(&mo->work, cancel_userptr);
+	RB_CLEAR_NODE(&mo->it.rb);
 
 	obj->userptr.mmu_object = mo;
 	return 0;
@@ -287,12 +298,16 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
 		return;
 
 	mmu_notifier_unregister(&mn->mn, mm);
-	destroy_workqueue(mn->wq);
 	kfree(mn);
 }
 
 #else
 
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
+{
+}
+
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
@@ -461,42 +476,6 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
 	return st;
 }
 
-static int
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
-			      bool value)
-{
-	int ret = 0;
-
-	/* During mm_invalidate_range we need to cancel any userptr that
-	 * overlaps the range being invalidated. Doing so requires the
-	 * struct_mutex, and that risks recursion. In order to cause
-	 * recursion, the user must alias the userptr address space with
-	 * a GTT mmapping (possible with a MAP_FIXED) - then when we have
-	 * to invalidate that mmaping, mm_invalidate_range is called with
-	 * the userptr address *and* the struct_mutex held.  To prevent that
-	 * we set a flag under the i915_mmu_notifier spinlock to indicate
-	 * whether this object is valid.
-	 */
-#if defined(CONFIG_MMU_NOTIFIER)
-	if (obj->userptr.mmu_object == NULL)
-		return 0;
-
-	spin_lock(&obj->userptr.mmu_object->mn->lock);
-	/* In order to serialise get_pages with an outstanding
-	 * cancel_userptr, we must drop the struct_mutex and try again.
-	 */
-	if (!value)
-		del_object(obj->userptr.mmu_object);
-	else if (!work_pending(&obj->userptr.mmu_object->work))
-		add_object(obj->userptr.mmu_object);
-	else
-		ret = -EAGAIN;
-	spin_unlock(&obj->userptr.mmu_object->mn->lock);
-#endif
-
-	return ret;
-}
-
 static void
 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
@@ -682,8 +661,11 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 	struct sgt_iter sgt_iter;
 	struct page *page;
 
-	BUG_ON(obj->userptr.work != NULL);
+	/* Cancel any inflight work and force them to restart their gup */
+	obj->userptr.work = NULL;
 	__i915_gem_userptr_set_active(obj, false);
+	if (!pages)
+		return;
 
 	if (obj->mm.madv != I915_MADV_WILLNEED)
 		obj->mm.dirty = false;
@@ -721,7 +703,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
-		 I915_GEM_OBJECT_IS_SHRINKABLE,
+		 I915_GEM_OBJECT_IS_SHRINKABLE |
+		 I915_GEM_OBJECT_ASYNC_CANCEL,
 	.get_pages = i915_gem_userptr_get_pages,
 	.put_pages = i915_gem_userptr_put_pages,
 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
-- 
2.19.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/4] drm/i915/selftests: Test vm isolation
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (2 preceding siblings ...)
  2018-10-29 10:59 ` [PATCH 4/4] drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start Chris Wilson
@ 2018-10-29 11:02 ` Chris Wilson
  2018-10-29 11:06 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] " Patchwork
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 11:02 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

Quoting Chris Wilson (2018-10-29 10:59:23)
> The vm of two contexts are supposed to be independent, such that a stray
> write by one cannot be detected by another. Normally the GTT is filled
> explicitly by userspace, but the space in between objects is filled with
> a scratch page -- and that scratch page should be able to form an
not! should not be able...
> inter-context backchannel.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Test vm isolation
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (3 preceding siblings ...)
  2018-10-29 11:02 ` [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
@ 2018-10-29 11:06 ` Patchwork
  2018-10-29 11:31 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2018-10-29 11:06 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Test vm isolation
URL   : https://patchwork.freedesktop.org/series/51663/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
7c8c266ad618 drm/i915/selftests: Test vm isolation
-:241: WARNING:LINE_SPACING: Missing a blank line after declarations
#241: FILE: drivers/gpu/drm/i915/selftests/i915_gem_context.c:980:
+	struct drm_file *file;
+	I915_RND_STATE(prng);

total: 0 errors, 1 warnings, 0 checks, 330 lines checked
5c70619a3cfb drm/i915/gtt: Record the scratch pte
c01a83f13023 drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories
45836f8b6d00 drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start
-:18: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#18: 
References: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")

-:18: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")'
#18: 
References: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")

-:218: ERROR:LOCKING: recursive locking is bad, do not use this ever.
#218: FILE: drivers/gpu/drm/i915/i915_gem_userptr.c:108:
+	switch (mutex_trylock_recursive(m)) {

total: 2 errors, 1 warnings, 0 checks, 423 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [1/4] drm/i915/selftests: Test vm isolation
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (4 preceding siblings ...)
  2018-10-29 11:06 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] " Patchwork
@ 2018-10-29 11:31 ` Patchwork
  2018-10-29 12:42 ` ✓ Fi.CI.IGT: " Patchwork
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2018-10-29 11:31 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Test vm isolation
URL   : https://patchwork.freedesktop.org/series/51663/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_5044 -> Patchwork_10621 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/51663/revisions/1/mbox/

== Known issues ==

  Here are the changes found in Patchwork_10621 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@gem_exec_suspend@basic-s4-devices:
      fi-blb-e6850:       NOTRUN -> INCOMPLETE (fdo#107718)

    igt@kms_flip@basic-flip-vs-dpms:
      fi-glk-j4005:       PASS -> DMESG-WARN (fdo#106097, fdo#106000)

    igt@kms_flip@basic-flip-vs-wf_vblank:
      fi-glk-j4005:       PASS -> FAIL (fdo#100368)

    igt@kms_flip@basic-plain-flip:
      fi-glk-j4005:       PASS -> DMESG-WARN (fdo#106097) +1

    igt@pm_rpm@module-reload:
      fi-skl-iommu:       PASS -> INCOMPLETE (fdo#107807)
      fi-glk-j4005:       PASS -> FAIL (fdo#104767)

    
    ==== Possible fixes ====

    igt@gem_exec_suspend@basic-s3:
      fi-blb-e6850:       INCOMPLETE (fdo#107718) -> PASS

    igt@gem_sync@basic-each:
      fi-glk-j4005:       DMESG-WARN (fdo#105719) -> PASS

    igt@kms_flip@basic-flip-vs-modeset:
      fi-glk-j4005:       DMESG-WARN (fdo#106000) -> PASS

    igt@kms_pipe_crc_basic@read-crc-pipe-b-frame-sequence:
      fi-byt-clapper:     FAIL (fdo#103191, fdo#107362) -> PASS

    igt@kms_pipe_crc_basic@read-crc-pipe-c-frame-sequence:
      fi-glk-j4005:       DMESG-FAIL (fdo#106000) -> PASS

    
  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
  fdo#104767 https://bugs.freedesktop.org/show_bug.cgi?id=104767
  fdo#105719 https://bugs.freedesktop.org/show_bug.cgi?id=105719
  fdo#106000 https://bugs.freedesktop.org/show_bug.cgi?id=106000
  fdo#106097 https://bugs.freedesktop.org/show_bug.cgi?id=106097
  fdo#107362 https://bugs.freedesktop.org/show_bug.cgi?id=107362
  fdo#107718 https://bugs.freedesktop.org/show_bug.cgi?id=107718
  fdo#107807 https://bugs.freedesktop.org/show_bug.cgi?id=107807


== Participating hosts (47 -> 43) ==

  Additional (5): fi-gdg-551 fi-cfl-8109u fi-ivb-3770 fi-pnv-d510 fi-snb-2600 
  Missing    (9): fi-ilk-m540 fi-hsw-4200u fi-bsw-cyan fi-ilk-650 fi-ctg-p8600 fi-kbl-8809g fi-elk-e7500 fi-icl-u fi-byt-n2820 


== Build changes ==

    * Linux: CI_DRM_5044 -> Patchwork_10621

  CI_DRM_5044: c4487dca27970879bf67f331614142c749984d65 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4698: af57164fcb16950187ad402ed31f565e88c42a78 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10621: 45836f8b6d00ba865ca054f5d7d2ffab00b6c97f @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

45836f8b6d00 drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start
c01a83f13023 drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories
5c70619a3cfb drm/i915/gtt: Record the scratch pte
7c8c266ad618 drm/i915/selftests: Test vm isolation

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10621/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [1/4] drm/i915/selftests: Test vm isolation
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (5 preceding siblings ...)
  2018-10-29 11:31 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2018-10-29 12:42 ` Patchwork
  2018-10-29 13:13 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2) Patchwork
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2018-10-29 12:42 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Test vm isolation
URL   : https://patchwork.freedesktop.org/series/51663/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_5044_full -> Patchwork_10621_full =

== Summary - WARNING ==

  Minor unknown changes coming with Patchwork_10621_full need to be verified
  manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_10621_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_10621_full:

  === IGT changes ===

    ==== Warnings ====

    igt@pm_rc6_residency@rc6-accuracy:
      shard-snb:          SKIP -> PASS

    
== Known issues ==

  Here are the changes found in Patchwork_10621_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@drv_suspend@shrink:
      shard-skl:          NOTRUN -> INCOMPLETE (fdo#106886)

    igt@gem_ctx_isolation@bcs0-s3:
      shard-kbl:          PASS -> INCOMPLETE (fdo#103665)

    igt@gem_exec_schedule@pi-ringfull-bsd:
      shard-skl:          NOTRUN -> FAIL (fdo#103158) +2

    igt@gem_ppgtt@blt-vs-render-ctxn:
      shard-skl:          NOTRUN -> TIMEOUT (fdo#108039)

    igt@gem_userptr_blits@readonly-unsync:
      shard-skl:          NOTRUN -> INCOMPLETE (fdo#108074)

    igt@kms_busy@extended-modeset-hang-newfb-render-a:
      shard-skl:          NOTRUN -> DMESG-WARN (fdo#107956) +5

    igt@kms_cursor_crc@cursor-128x128-random:
      shard-apl:          PASS -> FAIL (fdo#103232) +1

    igt@kms_cursor_crc@cursor-64x21-offscreen:
      shard-skl:          NOTRUN -> FAIL (fdo#103232)

    igt@kms_cursor_crc@cursor-64x64-sliding:
      shard-glk:          PASS -> FAIL (fdo#103232) +3

    igt@kms_fbcon_fbt@psr-suspend:
      shard-skl:          NOTRUN -> FAIL (fdo#107882) +1

    igt@kms_flip@dpms-vs-vblank-race-interruptible:
      shard-apl:          PASS -> DMESG-WARN (fdo#108549) +11

    igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-mmap-cpu:
      shard-glk:          PASS -> FAIL (fdo#103167) +8

    igt@kms_plane@pixel-format-pipe-c-planes:
      shard-kbl:          NOTRUN -> FAIL (fdo#103166)
      shard-skl:          NOTRUN -> DMESG-FAIL (fdo#106885, fdo#103166)

    igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
      shard-glk:          PASS -> INCOMPLETE (k.org#198133, fdo#103359)

    igt@kms_plane_alpha_blend@pipe-a-alpha-7efc:
      shard-skl:          NOTRUN -> FAIL (fdo#108145, fdo#107815) +2

    igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb:
      shard-skl:          NOTRUN -> FAIL (fdo#108145) +8

    igt@kms_plane_multiple@atomic-pipe-b-tiling-x:
      shard-apl:          PASS -> FAIL (fdo#103166)

    igt@kms_plane_multiple@atomic-pipe-c-tiling-x:
      shard-glk:          PASS -> FAIL (fdo#103166) +1

    igt@kms_plane_multiple@atomic-pipe-c-tiling-y:
      shard-skl:          NOTRUN -> FAIL (fdo#107815, fdo#103166)

    igt@kms_vblank@pipe-c-ts-continuation-dpms-suspend:
      shard-apl:          PASS -> DMESG-FAIL (fdo#108549)

    
    ==== Possible fixes ====

    igt@drv_suspend@debugfs-reader:
      shard-skl:          INCOMPLETE (fdo#104108) -> PASS

    igt@gem_workarounds@suspend-resume-context:
      shard-kbl:          INCOMPLETE (fdo#103665) -> PASS

    igt@kms_busy@extended-pageflip-modeset-hang-oldfb-render-b:
      shard-glk:          DMESG-WARN (fdo#107956) -> PASS

    igt@kms_ccs@pipe-b-crc-sprite-planes-basic:
      shard-glk:          FAIL (fdo#108145) -> PASS

    igt@kms_color@pipe-b-ctm-max:
      shard-apl:          DMESG-WARN (fdo#108549) -> PASS +13

    igt@kms_color@pipe-b-legacy-gamma:
      shard-skl:          FAIL (fdo#104782) -> PASS

    igt@kms_cursor_crc@cursor-256x85-onscreen:
      shard-glk:          FAIL (fdo#103232) -> PASS

    igt@kms_cursor_crc@cursor-64x64-onscreen:
      shard-apl:          FAIL (fdo#103232) -> PASS +1

    igt@kms_cursor_crc@cursor-64x64-suspend:
      shard-apl:          FAIL (fdo#103191, fdo#103232) -> PASS

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-mmap-cpu:
      shard-glk:          DMESG-WARN (fdo#106538, fdo#105763) -> PASS +1

    igt@kms_frontbuffer_tracking@fbc-2p-primscrn-spr-indfb-draw-blt:
      shard-glk:          FAIL (fdo#103167) -> PASS

    igt@kms_plane@plane-panning-top-left-pipe-b-planes:
      shard-apl:          INCOMPLETE (fdo#103927) -> PASS

    igt@kms_plane@plane-position-covered-pipe-a-planes:
      shard-glk:          FAIL (fdo#103166) -> PASS +1

    igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
      shard-skl:          FAIL (fdo#107815) -> PASS

    igt@kms_plane_multiple@atomic-pipe-a-tiling-x:
      shard-apl:          FAIL (fdo#103166) -> PASS +1

    igt@kms_vblank@pipe-b-ts-continuation-dpms-suspend:
      shard-snb:          DMESG-WARN (fdo#102365) -> PASS

    igt@kms_vblank@pipe-c-ts-continuation-dpms-rpm:
      shard-apl:          DMESG-FAIL (fdo#108549) -> PASS

    igt@pm_rpm@dpms-lpsp:
      shard-skl:          INCOMPLETE (fdo#107807) -> PASS

    
    ==== Warnings ====

    igt@kms_cursor_crc@cursor-128x42-sliding:
      shard-apl:          DMESG-WARN (fdo#108549) -> FAIL (fdo#103232)

    igt@kms_cursor_crc@cursor-256x256-dpms:
      shard-apl:          DMESG-WARN (fdo#108549) -> DMESG-FAIL (fdo#103232, fdo#108549)

    igt@kms_cursor_crc@cursor-64x21-sliding:
      shard-apl:          FAIL (fdo#103232) -> DMESG-FAIL (fdo#103232, fdo#108549)

    igt@kms_cursor_crc@cursor-64x64-sliding:
      shard-apl:          DMESG-FAIL (fdo#103232, fdo#108549) -> DMESG-WARN (fdo#108549)

    
  fdo#102365 https://bugs.freedesktop.org/show_bug.cgi?id=102365
  fdo#103158 https://bugs.freedesktop.org/show_bug.cgi?id=103158
  fdo#103166 https://bugs.freedesktop.org/show_bug.cgi?id=103166
  fdo#103167 https://bugs.freedesktop.org/show_bug.cgi?id=103167
  fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
  fdo#103232 https://bugs.freedesktop.org/show_bug.cgi?id=103232
  fdo#103359 https://bugs.freedesktop.org/show_bug.cgi?id=103359
  fdo#103665 https://bugs.freedesktop.org/show_bug.cgi?id=103665
  fdo#103927 https://bugs.freedesktop.org/show_bug.cgi?id=103927
  fdo#104108 https://bugs.freedesktop.org/show_bug.cgi?id=104108
  fdo#104782 https://bugs.freedesktop.org/show_bug.cgi?id=104782
  fdo#105763 https://bugs.freedesktop.org/show_bug.cgi?id=105763
  fdo#106538 https://bugs.freedesktop.org/show_bug.cgi?id=106538
  fdo#106885 https://bugs.freedesktop.org/show_bug.cgi?id=106885
  fdo#106886 https://bugs.freedesktop.org/show_bug.cgi?id=106886
  fdo#107807 https://bugs.freedesktop.org/show_bug.cgi?id=107807
  fdo#107815 https://bugs.freedesktop.org/show_bug.cgi?id=107815
  fdo#107882 https://bugs.freedesktop.org/show_bug.cgi?id=107882
  fdo#107956 https://bugs.freedesktop.org/show_bug.cgi?id=107956
  fdo#108039 https://bugs.freedesktop.org/show_bug.cgi?id=108039
  fdo#108074 https://bugs.freedesktop.org/show_bug.cgi?id=108074
  fdo#108145 https://bugs.freedesktop.org/show_bug.cgi?id=108145
  fdo#108549 https://bugs.freedesktop.org/show_bug.cgi?id=108549
  k.org#198133 https://bugzilla.kernel.org/show_bug.cgi?id=198133


== Participating hosts (6 -> 6) ==

  No changes in participating hosts


== Build changes ==

    * Linux: CI_DRM_5044 -> Patchwork_10621

  CI_DRM_5044: c4487dca27970879bf67f331614142c749984d65 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4698: af57164fcb16950187ad402ed31f565e88c42a78 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10621: 45836f8b6d00ba865ca054f5d7d2ffab00b6c97f @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10621/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/i915/gtt: Record the scratch pte
  2018-10-29 10:59 ` [PATCH 2/4] drm/i915/gtt: Record the scratch pte Chris Wilson
@ 2018-10-29 12:50   ` Matthew Auld
  2018-10-29 12:57     ` Chris Wilson
  2018-10-29 13:06   ` [PATCH v2] " Chris Wilson
  1 sibling, 1 reply; 15+ messages in thread
From: Matthew Auld @ 2018-10-29 12:50 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development, Matthew Auld

On Mon, 29 Oct 2018 at 10:59, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> Record the scratch PTE encoding upon creation rather than recomputing
> the bits everytime. This is important for the next patch where we forgo
> having a valid scratch page with which we may compute the bits and so
> require keeping the PTE value instead.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>

Can we also update the scrub_64K bit, I don't think it should be
accessing scratch_page.daddr after the next patch?

Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories
  2018-10-29 10:59 ` [PATCH 3/4] drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories Chris Wilson
@ 2018-10-29 12:50   ` Matthew Auld
  0 siblings, 0 replies; 15+ messages in thread
From: Matthew Auld @ 2018-10-29 12:50 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development, Matthew Auld

On Mon, 29 Oct 2018 at 11:00, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> If we can prevent stray writes from landing in the scratch page, we can
> reuse the same page and same scratch PT for all contexts without fear of
> information leaks and side-channels.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/i915/gtt: Record the scratch pte
  2018-10-29 12:50   ` Matthew Auld
@ 2018-10-29 12:57     ` Chris Wilson
  0 siblings, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 12:57 UTC (permalink / raw)
  To: Matthew Auld; +Cc: Intel Graphics Development, Matthew Auld

Quoting Matthew Auld (2018-10-29 12:50:12)
> On Mon, 29 Oct 2018 at 10:59, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >
> > Record the scratch PTE encoding upon creation rather than recomputing
> > the bits everytime. This is important for the next patch where we forgo
> > having a valid scratch page with which we may compute the bits and so
> > require keeping the PTE value instead.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Matthew Auld <matthew.auld@intel.com>
> 
> Can we also update the scrub_64K bit, I don't think it should be
> accessing scratch_page.daddr after the next patch?

The I915_SELFTEST_ONLY(scrub_64k)? Yup, missed that one as it should now
be encode = vma->vm->scratch_pte.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH v2] drm/i915/gtt: Record the scratch pte
  2018-10-29 10:59 ` [PATCH 2/4] drm/i915/gtt: Record the scratch pte Chris Wilson
  2018-10-29 12:50   ` Matthew Auld
@ 2018-10-29 13:06   ` Chris Wilson
  1 sibling, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2018-10-29 13:06 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

Record the scratch PTE encoding upon creation rather than recomputing
the bits everytime. This is important for the next patch where we forgo
having a valid scratch page with which we may compute the bits and so
require keeping the PTE value instead.

v2: Fix up scrub_64K to use scratch_pte as well.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 85 +++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_gtt.h | 11 ++--
 2 files changed, 50 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 19b2d991b5d8..afe45cbcd762 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -186,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
 	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
 }
 
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
 {
 	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
 
@@ -225,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
 #define gen8_pdpe_encode gen8_pde_encode
 #define gen8_pml4e_encode gen8_pde_encode
 
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -247,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -271,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -287,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -300,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level,
-				  u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+			   enum i915_cache_level level,
+			   u32 flags)
 {
 	gen6_pte_t pte = GEN6_PTE_VALID;
 	pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -666,14 +666,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
 static void gen8_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	fill_px(vm, pt,
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+	fill_px(vm, pt, vm->scratch_pte);
 }
 
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
-	fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+	fill32_px(vm, pt, vm->scratch_pte);
 }
 
 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -807,15 +806,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
 /* Removes entries from a single page table, releasing it if it's empty.
  * Caller can use the return value to update higher-level entries.
  */
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
 				struct i915_page_table *pt,
 				u64 start, u64 length)
 {
 	unsigned int num_entries = gen8_pte_count(start, length);
 	unsigned int pte = gen8_pte_index(start);
 	unsigned int pte_end = pte + num_entries;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 	gen8_pte_t *vaddr;
 
 	GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -826,7 +823,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 
 	vaddr = kmap_atomic_px(pt);
 	while (pte < pte_end)
-		vaddr[pte++] = scratch_pte;
+		vaddr[pte++] = vm->scratch_pte;
 	kunmap_atomic(vaddr);
 
 	return false;
@@ -1159,7 +1156,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
 				u16 i;
 
-				encode = pte_encode | vma->vm->scratch_page.daddr;
+				encode = vma->vm->scratch_pte;
 				vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
 
 				for (i = 1; i < index; i += 16)
@@ -1216,6 +1213,11 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	if (ret)
 		return ret;
 
+	vm->scratch_pte =
+		gen8_pte_encode(vm->scratch_page.daddr,
+				I915_CACHE_LLC,
+				PTE_READ_ONLY);
+
 	vm->scratch_pt = alloc_pt(vm);
 	if (IS_ERR(vm->scratch_pt)) {
 		ret = PTR_ERR(vm->scratch_pt);
@@ -1524,8 +1526,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
 static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
 	struct i915_address_space *vm = &ppgtt->vm;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+	const gen8_pte_t scratch_pte = vm->scratch_pte;
 	u64 start = 0, length = ppgtt->vm.total;
 
 	if (use_4lvl(vm)) {
@@ -1672,7 +1673,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
 {
 	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
-	const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+	const gen6_pte_t scratch_pte = base->vm.scratch_pte;
 	struct i915_page_table *pt;
 	u32 pte, pde;
 
@@ -1785,7 +1786,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
 	unsigned int pde = first_entry / GEN6_PTES;
 	unsigned int pte = first_entry % GEN6_PTES;
 	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+	const gen6_pte_t scratch_pte = vm->scratch_pte;
 
 	while (num_entries) {
 		struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1876,7 +1877,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 			if (IS_ERR(pt))
 				goto unwind_out;
 
-			gen6_initialize_pt(ppgtt, pt);
+			gen6_initialize_pt(vm, pt);
 			ppgtt->base.pd.page_table[pde] = pt;
 
 			if (i915_vma_is_bound(ppgtt->vma,
@@ -1914,9 +1915,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 	if (ret)
 		return ret;
 
-	ppgtt->scratch_pte =
-		vm->pte_encode(vm->scratch_page.daddr,
-			       I915_CACHE_NONE, PTE_READ_ONLY);
+	vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+					 I915_CACHE_NONE,
+					 PTE_READ_ONLY);
 
 	vm->scratch_pt = alloc_pt(vm);
 	if (IS_ERR(vm->scratch_pt)) {
@@ -1924,7 +1925,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
 		return PTR_ERR(vm->scratch_pt);
 	}
 
-	gen6_initialize_pt(ppgtt, vm->scratch_pt);
+	gen6_initialize_pt(vm, vm->scratch_pt);
 	gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
 		ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
 
@@ -2469,8 +2470,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
 	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
-	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+	const gen8_pte_t scratch_pte = vm->scratch_pte;
 	gen8_pte_t __iomem *gtt_base =
 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2595,8 +2595,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
 		 first_entry, num_entries, max_entries))
 		num_entries = max_entries;
 
-	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
-				     I915_CACHE_LLC, 0);
+	scratch_pte = vm->scratch_pte;
 
 	for (i = 0; i < num_entries; i++)
 		iowrite32(scratch_pte, &gtt_base[i]);
@@ -3002,6 +3001,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
 		return ret;
 	}
 
+	ggtt->vm.scratch_pte =
+		ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+				    I915_CACHE_NONE, 0);
+
 	return 0;
 }
 
@@ -3348,6 +3351,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
+	ggtt->vm.pte_encode = gen8_pte_encode;
+
 	setup_private_pat(dev_priv);
 
 	return ggtt_probe_common(ggtt, size);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 5d2c5ba55ad8..a030299b79cd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,7 @@ struct i915_address_space {
 
 	struct mutex mutex; /* protects vma and our lists */
 
+	u64 scratch_pte;
 	struct i915_page_dma scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -335,12 +336,11 @@ struct i915_address_space {
 	/* Some systems support read-only mappings for GGTT and/or PPGTT */
 	bool has_read_only:1;
 
-	/* FIXME: Need a more generic return type */
-	gen6_pte_t (*pte_encode)(dma_addr_t addr,
-				 enum i915_cache_level level,
-				 u32 flags); /* Create a valid PTE */
-	/* flags for pte_encode */
+	u64 (*pte_encode)(dma_addr_t addr,
+			  enum i915_cache_level level,
+			  u32 flags); /* Create a valid PTE */
 #define PTE_READ_ONLY	(1<<0)
+
 	int (*allocate_va_range)(struct i915_address_space *vm,
 				 u64 start, u64 length);
 	void (*clear_range)(struct i915_address_space *vm,
@@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
 
 	struct i915_vma *vma;
 	gen6_pte_t __iomem *pd_addr;
-	gen6_pte_t scratch_pte;
 
 	unsigned int pin_count;
 	bool scan_for_unused_pt;
-- 
2.19.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2)
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (6 preceding siblings ...)
  2018-10-29 12:42 ` ✓ Fi.CI.IGT: " Patchwork
@ 2018-10-29 13:13 ` Patchwork
  2018-10-29 13:31 ` ✗ Fi.CI.BAT: failure " Patchwork
  2018-10-29 16:13 ` [PATCH 1/4] drm/i915/selftests: Test vm isolation Matthew Auld
  9 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2018-10-29 13:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2)
URL   : https://patchwork.freedesktop.org/series/51663/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
1b3dc084cdde drm/i915/selftests: Test vm isolation
-:241: WARNING:LINE_SPACING: Missing a blank line after declarations
#241: FILE: drivers/gpu/drm/i915/selftests/i915_gem_context.c:980:
+	struct drm_file *file;
+	I915_RND_STATE(prng);

total: 0 errors, 1 warnings, 0 checks, 330 lines checked
5b80cd2d2c3e drm/i915/gtt: Record the scratch pte
c55548f46915 drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories
2882f16c3e0a drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start
-:18: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#18: 
References: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")

-:18: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")'
#18: 
References: 93065ac753e4 ("mm, oom: distinguish blockable mode for mmu notifiers")

-:218: ERROR:LOCKING: recursive locking is bad, do not use this ever.
#218: FILE: drivers/gpu/drm/i915/i915_gem_userptr.c:108:
+	switch (mutex_trylock_recursive(m)) {

total: 2 errors, 1 warnings, 0 checks, 423 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2)
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (7 preceding siblings ...)
  2018-10-29 13:13 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2) Patchwork
@ 2018-10-29 13:31 ` Patchwork
  2018-10-29 16:13 ` [PATCH 1/4] drm/i915/selftests: Test vm isolation Matthew Auld
  9 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2018-10-29 13:31 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2)
URL   : https://patchwork.freedesktop.org/series/51663/
State : failure

== Summary ==

= CI Bug Log - changes from CI_DRM_5044 -> Patchwork_10622 =

== Summary - FAILURE ==

  Serious unknown changes coming with Patchwork_10622 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_10622, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/51663/revisions/2/mbox/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_10622:

  === IGT changes ===

    ==== Possible regressions ====

    igt@drv_selftest@live_contexts:
      fi-icl-u:           PASS -> INCOMPLETE

    
    ==== Warnings ====

    igt@drv_selftest@live_guc:
      fi-icl-u:           PASS -> SKIP +2

    
== Known issues ==

  Here are the changes found in Patchwork_10622 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@drv_module_reload@basic-reload-inject:
      fi-glk-j4005:       PASS -> DMESG-WARN (fdo#106000)

    igt@gem_exec_suspend@basic-s3:
      fi-cfl-8109u:       NOTRUN -> DMESG-WARN (fdo#107345) +1

    igt@kms_frontbuffer_tracking@basic:
      fi-byt-clapper:     PASS -> FAIL (fdo#103167)

    igt@kms_pipe_crc_basic@nonblocking-crc-pipe-b-frame-sequence:
      fi-byt-clapper:     PASS -> FAIL (fdo#107362, fdo#103191)

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
      fi-cfl-8109u:       NOTRUN -> INCOMPLETE (fdo#106070, fdo#108126)

    
    ==== Possible fixes ====

    igt@drv_module_reload@basic-reload:
      fi-glk-j4005:       DMESG-WARN (fdo#106725, fdo#106248) -> PASS

    igt@gem_exec_suspend@basic-s3:
      fi-blb-e6850:       INCOMPLETE (fdo#107718) -> PASS

    igt@gem_sync@basic-each:
      fi-glk-j4005:       DMESG-WARN (fdo#105719) -> PASS

    igt@kms_flip@basic-flip-vs-modeset:
      fi-glk-j4005:       DMESG-WARN (fdo#106000) -> PASS

    igt@kms_pipe_crc_basic@read-crc-pipe-b-frame-sequence:
      fi-byt-clapper:     FAIL (fdo#107362, fdo#103191) -> PASS

    igt@kms_pipe_crc_basic@read-crc-pipe-c-frame-sequence:
      fi-glk-j4005:       DMESG-FAIL (fdo#106000) -> PASS

    
  fdo#103167 https://bugs.freedesktop.org/show_bug.cgi?id=103167
  fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
  fdo#105719 https://bugs.freedesktop.org/show_bug.cgi?id=105719
  fdo#106000 https://bugs.freedesktop.org/show_bug.cgi?id=106000
  fdo#106070 https://bugs.freedesktop.org/show_bug.cgi?id=106070
  fdo#106248 https://bugs.freedesktop.org/show_bug.cgi?id=106248
  fdo#106725 https://bugs.freedesktop.org/show_bug.cgi?id=106725
  fdo#107345 https://bugs.freedesktop.org/show_bug.cgi?id=107345
  fdo#107362 https://bugs.freedesktop.org/show_bug.cgi?id=107362
  fdo#107718 https://bugs.freedesktop.org/show_bug.cgi?id=107718
  fdo#108126 https://bugs.freedesktop.org/show_bug.cgi?id=108126


== Participating hosts (47 -> 43) ==

  Additional (4): fi-cfl-8109u fi-gdg-551 fi-ivb-3770 fi-snb-2600 
  Missing    (8): fi-ilk-m540 fi-hsw-4200u fi-bsw-cyan fi-ilk-650 fi-ctg-p8600 fi-kbl-8809g fi-elk-e7500 fi-byt-n2820 


== Build changes ==

    * Linux: CI_DRM_5044 -> Patchwork_10622

  CI_DRM_5044: c4487dca27970879bf67f331614142c749984d65 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4698: af57164fcb16950187ad402ed31f565e88c42a78 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10622: 2882f16c3e0a082c0106dcaa727076f8bd6ea79e @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

2882f16c3e0a drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start
c55548f46915 drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories
5b80cd2d2c3e drm/i915/gtt: Record the scratch pte
1b3dc084cdde drm/i915/selftests: Test vm isolation

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10622/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/4] drm/i915/selftests: Test vm isolation
  2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
                   ` (8 preceding siblings ...)
  2018-10-29 13:31 ` ✗ Fi.CI.BAT: failure " Patchwork
@ 2018-10-29 16:13 ` Matthew Auld
  9 siblings, 0 replies; 15+ messages in thread
From: Matthew Auld @ 2018-10-29 16:13 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development, Matthew Auld

On Mon, 29 Oct 2018 at 10:59, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> The vm of two contexts are supposed to be independent, such that a stray
> write by one cannot be detected by another. Normally the GTT is filled
> explicitly by userspace, but the space in between objects is filled with
> a scratch page -- and that scratch page should be able to form an
> inter-context backchannel.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2018-10-29 16:14 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-29 10:59 [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
2018-10-29 10:59 ` [PATCH 2/4] drm/i915/gtt: Record the scratch pte Chris Wilson
2018-10-29 12:50   ` Matthew Auld
2018-10-29 12:57     ` Chris Wilson
2018-10-29 13:06   ` [PATCH v2] " Chris Wilson
2018-10-29 10:59 ` [PATCH 3/4] drm/i915/gtt: Reuse the read-only 64KiB scratch page and directories Chris Wilson
2018-10-29 12:50   ` Matthew Auld
2018-10-29 10:59 ` [PATCH 4/4] drm/i915/userptr: Avoid struct_mutex recursion for mmu_invalidate_range_start Chris Wilson
2018-10-29 11:02 ` [PATCH 1/4] drm/i915/selftests: Test vm isolation Chris Wilson
2018-10-29 11:06 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] " Patchwork
2018-10-29 11:31 ` ✓ Fi.CI.BAT: success " Patchwork
2018-10-29 12:42 ` ✓ Fi.CI.IGT: " Patchwork
2018-10-29 13:13 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Test vm isolation (rev2) Patchwork
2018-10-29 13:31 ` ✗ Fi.CI.BAT: failure " Patchwork
2018-10-29 16:13 ` [PATCH 1/4] drm/i915/selftests: Test vm isolation Matthew Auld

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.