All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
@ 2016-09-09 19:48 Dave Gordon
  2016-09-09 20:49 ` ✗ Fi.CI.BAT: failure for " Patchwork
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Dave Gordon @ 2016-09-09 19:48 UTC (permalink / raw)
  To: intel-gfx

This just hides the existing obj->dirty flag inside a trivial inline
setter, to discourage non-GEM code from looking too closely. The
flag is renamed to emphasise that it is private to the GEM memory-
management code and ensure that no legacy code continues to use it
directly.

v2:
  Use Chris Wilson's preferred names for flag-related functions

Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 22 +++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 25 ++++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_context.c    |  7 +++++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c    | 12 +++++++-----
 drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 29 ++++++++++++++++-------------
 8 files changed, 66 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 02b627e..b77fc27 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -160,7 +160,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 		   i915_gem_active_get_seqno(&obj->last_write,
 					     &obj->base.dev->struct_mutex),
 		   i915_cache_level_str(dev_priv, obj->cache_level),
-		   obj->dirty ? " dirty" : "",
+		   i915_gem_object_is_dirty(obj) ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f39bede..333e21b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2209,7 +2209,7 @@ struct drm_i915_gem_object {
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
-	unsigned int dirty:1;
+	unsigned int __dirty:1;
 
 	/**
 	 * Advice: are the backing pages purgeable?
@@ -3156,6 +3156,26 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 	obj->pages_pin_count++;
 }
 
+/*
+ * Flag the object content as having changed since the last call to
+ * i915_gem_object_pin_pages() above, so that the new content is not
+ * lost after the next call to i915_gem_object_unpin_pages() below
+ */
+static inline void i915_gem_object_set_dirty(struct drm_i915_gem_object *obj)
+{
+	obj->__dirty = true;
+}
+
+static inline void i915_gem_object_clear_dirty(struct drm_i915_gem_object *obj)
+{
+	obj->__dirty = false;
+}
+
+static inline bool i915_gem_object_is_dirty(struct drm_i915_gem_object *obj)
+{
+	return obj->__dirty;
+}
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 	BUG_ON(obj->pages_pin_count == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2401818..f571a02 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -234,9 +234,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 	}
 
 	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
-
-	if (obj->dirty) {
+		i915_gem_object_clear_dirty(obj);
+	else if (i915_gem_object_is_dirty(obj)) {
 		struct address_space *mapping = obj->base.filp->f_mapping;
 		char *vaddr = obj->phys_handle->vaddr;
 		int i;
@@ -260,7 +259,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 			put_page(page);
 			vaddr += PAGE_SIZE;
 		}
-		obj->dirty = 0;
+		i915_gem_object_clear_dirty(obj);
 	}
 
 	sg_free_table(obj->pages);
@@ -703,7 +702,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 		obj->cache_dirty = true;
 
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = 1;
+	i915_gem_object_set_dirty(obj);
 	/* return with the pages pinned */
 	return 0;
 
@@ -1156,7 +1155,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 		goto out_unpin;
 
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = true;
+	i915_gem_object_set_dirty(obj);
 
 	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
@@ -1327,6 +1326,8 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 	offset = args->offset;
 	remain = args->size;
 
+	i915_gem_object_set_dirty(obj);
+
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
 			 offset >> PAGE_SHIFT) {
 		struct page *page = sg_page_iter_page(&sg_iter);
@@ -2133,6 +2134,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
+	bool dirty;
 	int ret;
 
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2152,10 +2154,11 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 		i915_gem_object_save_bit_17_swizzle(obj);
 
 	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
+		i915_gem_object_clear_dirty(obj);
 
+	dirty = i915_gem_object_is_dirty(obj);
 	for_each_sgt_page(page, sgt_iter, obj->pages) {
-		if (obj->dirty)
+		if (dirty)
 			set_page_dirty(page);
 
 		if (obj->madv == I915_MADV_WILLNEED)
@@ -2163,7 +2166,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 
 		put_page(page);
 	}
-	obj->dirty = 0;
+	i915_gem_object_clear_dirty(obj);
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
@@ -3321,7 +3324,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 	if (write) {
 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-		obj->dirty = 1;
+		i915_gem_object_set_dirty(obj);
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -4789,7 +4792,7 @@ struct drm_i915_gem_object *
 	i915_gem_object_pin_pages(obj);
 	sg = obj->pages;
 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-	obj->dirty = 1;		/* Backing store is now out of date */
+	i915_gem_object_set_dirty(obj); /* Backing store is now out of date */
 	i915_gem_object_unpin_pages(obj);
 
 	if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 35950ee..aa99bc3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -834,6 +834,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
+		struct i915_vma *from_vma = from->engine[RCS].state;
+
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -841,9 +843,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+		i915_vma_move_to_active(from_vma, req, 0);
+		i915_gem_object_set_dirty(from_vma->obj);
 		/* state is kept alive until the next request */
-		i915_vma_unpin(from->engine[RCS].state);
+		i915_vma_unpin(from_vma);
 		i915_gem_context_put(from);
 	}
 	engine->last_context = i915_gem_context_get(to);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9432d4c..5ca7ba3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1278,7 +1278,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	obj->dirty = 1; /* be paranoid  */
+	i915_gem_object_set_dirty(obj); /* be paranoid  */
 
 	/* Add a reference if we're newly entering the active list.
 	 * The order in which we add operations to the retirement queue is
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index be54825..a78abe2 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -674,23 +674,25 @@ struct get_pages_work {
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
+	bool dirty;
 
 	BUG_ON(obj->userptr.work != NULL);
 	__i915_gem_userptr_set_active(obj, false);
 
-	if (obj->madv != I915_MADV_WILLNEED)
-		obj->dirty = 0;
-
 	i915_gem_gtt_finish_object(obj);
 
+	if (obj->madv != I915_MADV_WILLNEED)
+		i915_gem_object_clear_dirty(obj);
+
+	dirty = i915_gem_object_is_dirty(obj);
 	for_each_sgt_page(page, sgt_iter, obj->pages) {
-		if (obj->dirty)
+		if (dirty)
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
 		put_page(page);
 	}
-	obj->dirty = 0;
+	i915_gem_object_clear_dirty(obj);
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15d..257dde1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -804,7 +804,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->write_domain = obj->base.write_domain;
 	err->fence_reg = vma->fence ? vma->fence->id : -1;
 	err->tiling = i915_gem_object_get_tiling(obj);
-	err->dirty = obj->dirty;
+	err->dirty = i915_gem_object_is_dirty(obj);
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->cache_level = obj->cache_level;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 92bfe47..1f48291 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -764,6 +764,8 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
 	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_ring *ring = ce->ring;
+	struct drm_i915_gem_object *ctx_obj;
 	void *vaddr;
 	u32 *lrc_reg_state;
 	int ret;
@@ -778,24 +780,24 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 	if (ret)
 		goto err;
 
-	vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+	ctx_obj = ce->state->obj;
+	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
 		goto unpin_vma;
 	}
 
-	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-	ret = intel_ring_pin(ce->ring);
+	ret = intel_ring_pin(ring);
 	if (ret)
 		goto unpin_map;
 
-	intel_lr_context_descriptor_update(ctx, engine);
+	i915_gem_object_set_dirty(ctx_obj);
 
-	lrc_reg_state[CTX_RING_BUFFER_START+1] =
-		i915_ggtt_offset(ce->ring->vma);
+	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+	lrc_reg_state[CTX_RING_BUFFER_START+1] = i915_ggtt_offset(ring->vma);
 	ce->lrc_reg_state = lrc_reg_state;
-	ce->state->obj->dirty = true;
+
+	intel_lr_context_descriptor_update(ctx, engine);
 
 	/* Invalidate GuC TLB. */
 	if (i915.enable_guc_submission) {
@@ -1969,7 +1971,7 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-	ctx_obj->dirty = true;
+	i915_gem_object_set_dirty(ctx_obj);
 
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
@@ -2182,23 +2184,24 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
 
 	for_each_engine(engine, dev_priv) {
 		struct intel_context *ce = &ctx->engine[engine->id];
+		struct drm_i915_gem_object *ctx_obj;
 		void *vaddr;
 		uint32_t *reg_state;
 
 		if (!ce->state)
 			continue;
 
-		vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+		ctx_obj = ce->state->obj;
+		vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
 		if (WARN_ON(IS_ERR(vaddr)))
 			continue;
 
 		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
 		reg_state[CTX_RING_HEAD+1] = 0;
 		reg_state[CTX_RING_TAIL+1] = 0;
 
-		ce->state->obj->dirty = true;
-		i915_gem_object_unpin_map(ce->state->obj);
+		i915_gem_object_set_dirty(ctx_obj);
+		i915_gem_object_unpin_map(ctx_obj);
 
 		ce->ring->head = 0;
 		ce->ring->tail = 0;
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
  2016-09-09 19:48 [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() Dave Gordon
@ 2016-09-09 20:49 ` Patchwork
  2016-09-12 15:48 ` [PATCH] " Tvrtko Ursulin
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2016-09-09 20:49 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
URL   : https://patchwork.freedesktop.org/series/12262/
State : failure

== Summary ==

Series 12262v1 drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
http://patchwork.freedesktop.org/api/1.0/series/12262/revisions/1/mbox/

Test kms_cursor_legacy:
        Subgroup basic-cursor-vs-flip-legacy:
                fail       -> PASS       (fi-bsw-n3050)
        Subgroup basic-cursor-vs-flip-varying-size:
                pass       -> FAIL       (fi-ilk-650)
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-c:
                pass       -> INCOMPLETE (fi-hsw-4770k)

fi-bdw-5557u     total:254  pass:239  dwarn:0   dfail:0   fail:0   skip:15 
fi-bsw-n3050     total:254  pass:208  dwarn:0   dfail:0   fail:0   skip:46 
fi-byt-n2820     total:254  pass:212  dwarn:0   dfail:0   fail:1   skip:41 
fi-hsw-4770k     total:218  pass:197  dwarn:0   dfail:0   fail:0   skip:20 
fi-hsw-4770r     total:254  pass:228  dwarn:0   dfail:0   fail:0   skip:26 
fi-ilk-650       total:254  pass:184  dwarn:0   dfail:0   fail:2   skip:68 
fi-ivb-3520m     total:254  pass:223  dwarn:0   dfail:0   fail:0   skip:31 
fi-ivb-3770      total:254  pass:223  dwarn:0   dfail:0   fail:0   skip:31 
fi-skl-6260u     total:254  pass:240  dwarn:0   dfail:0   fail:0   skip:14 
fi-skl-6700k     total:254  pass:225  dwarn:1   dfail:0   fail:0   skip:28 
fi-snb-2520m     total:254  pass:209  dwarn:0   dfail:0   fail:0   skip:45 
fi-snb-2600      total:254  pass:209  dwarn:0   dfail:0   fail:0   skip:45 
fi-skl-6700hq failed to collect. IGT log at Patchwork_2505/fi-skl-6700hq/igt.log

Results at /archive/results/CI_IGT_test/Patchwork_2505/

d0f480a8832b9839bb3dda33ad6615fd8cdf9a44 drm-intel-nightly: 2016y-09m-09d-19h-23m-11s UTC integration manifest
0fe1084 drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
  2016-09-09 19:48 [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() Dave Gordon
  2016-09-09 20:49 ` ✗ Fi.CI.BAT: failure for " Patchwork
@ 2016-09-12 15:48 ` Tvrtko Ursulin
  2016-09-12 19:44   ` Chris Wilson
  2016-09-14 14:41   ` Dave Gordon
  2016-09-14 15:20 ` ✗ Fi.CI.BAT: failure for drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2) Patchwork
  2016-09-15  7:20 ` ✗ Fi.CI.BAT: warning " Patchwork
  3 siblings, 2 replies; 8+ messages in thread
From: Tvrtko Ursulin @ 2016-09-12 15:48 UTC (permalink / raw)
  To: Dave Gordon, intel-gfx


Hi,

On 09/09/16 20:48, Dave Gordon wrote:
> This just hides the existing obj->dirty flag inside a trivial inline
> setter, to discourage non-GEM code from looking too closely. The
> flag is renamed to emphasise that it is private to the GEM memory-
> management code and ensure that no legacy code continues to use it
> directly.
>
> v2:
>    Use Chris Wilson's preferred names for flag-related functions
>
> Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
>   drivers/gpu/drm/i915/i915_drv.h            | 22 +++++++++++++++++++++-
>   drivers/gpu/drm/i915/i915_gem.c            | 25 ++++++++++++++-----------
>   drivers/gpu/drm/i915/i915_gem_context.c    |  7 +++++--
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
>   drivers/gpu/drm/i915/i915_gem_userptr.c    | 12 +++++++-----
>   drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
>   drivers/gpu/drm/i915/intel_lrc.c           | 29 ++++++++++++++++-------------
>   8 files changed, 66 insertions(+), 35 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 02b627e..b77fc27 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -160,7 +160,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
>   		   i915_gem_active_get_seqno(&obj->last_write,
>   					     &obj->base.dev->struct_mutex),
>   		   i915_cache_level_str(dev_priv, obj->cache_level),
> -		   obj->dirty ? " dirty" : "",
> +		   i915_gem_object_is_dirty(obj) ? " dirty" : "",
>   		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>   	if (obj->base.name)
>   		seq_printf(m, " (name: %d)", obj->base.name);
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f39bede..333e21b 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2209,7 +2209,7 @@ struct drm_i915_gem_object {
>   	 * This is set if the object has been written to since last bound
>   	 * to the GTT
>   	 */
> -	unsigned int dirty:1;
> +	unsigned int __dirty:1;
>
>   	/**
>   	 * Advice: are the backing pages purgeable?
> @@ -3156,6 +3156,26 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
>   	obj->pages_pin_count++;
>   }
>
> +/*
> + * Flag the object content as having changed since the last call to
> + * i915_gem_object_pin_pages() above, so that the new content is not
> + * lost after the next call to i915_gem_object_unpin_pages() below
> + */
> +static inline void i915_gem_object_set_dirty(struct drm_i915_gem_object *obj)
> +{
> +	obj->__dirty = true;
> +}
> +
> +static inline void i915_gem_object_clear_dirty(struct drm_i915_gem_object *obj)
> +{
> +	obj->__dirty = false;
> +}
> +
> +static inline bool i915_gem_object_is_dirty(struct drm_i915_gem_object *obj)
> +{
> +	return obj->__dirty;
> +}
> +
>   static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>   {
>   	BUG_ON(obj->pages_pin_count == 0);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 2401818..f571a02 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -234,9 +234,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
>   	}
>
>   	if (obj->madv == I915_MADV_DONTNEED)
> -		obj->dirty = 0;
> -
> -	if (obj->dirty) {
> +		i915_gem_object_clear_dirty(obj);
> +	else if (i915_gem_object_is_dirty(obj)) {
>   		struct address_space *mapping = obj->base.filp->f_mapping;
>   		char *vaddr = obj->phys_handle->vaddr;
>   		int i;
> @@ -260,7 +259,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
>   			put_page(page);
>   			vaddr += PAGE_SIZE;
>   		}
> -		obj->dirty = 0;
> +		i915_gem_object_clear_dirty(obj);
>   	}
>
>   	sg_free_table(obj->pages);
> @@ -703,7 +702,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
>   		obj->cache_dirty = true;
>
>   	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
> -	obj->dirty = 1;
> +	i915_gem_object_set_dirty(obj);
>   	/* return with the pages pinned */
>   	return 0;
>
> @@ -1156,7 +1155,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
>   		goto out_unpin;

I wonder why diff got so confused with this one, because this isn't 
i915_gem_obj_prepare_shmem_write any longer.

>
>   	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
> -	obj->dirty = true;
> +	i915_gem_object_set_dirty(obj);
>
>   	user_data = u64_to_user_ptr(args->data_ptr);
>   	offset = args->offset;
> @@ -1327,6 +1326,8 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
>   	offset = args->offset;
>   	remain = args->size;
>
> +	i915_gem_object_set_dirty(obj);
> +

This is in i915_gem_shmem_pwrite by the look of it. It will have dirtied 
the object already via i915_gem_obj_prepare_shmem_write. You added it 
here for some specific reason?

>   	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
>   			 offset >> PAGE_SHIFT) {
>   		struct page *page = sg_page_iter_page(&sg_iter);
> @@ -2133,6 +2134,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
>   {
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
> +	bool dirty;
>   	int ret;
>
>   	BUG_ON(obj->madv == __I915_MADV_PURGED);
> @@ -2152,10 +2154,11 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
>   		i915_gem_object_save_bit_17_swizzle(obj);
>
>   	if (obj->madv == I915_MADV_DONTNEED)
> -		obj->dirty = 0;
> +		i915_gem_object_clear_dirty(obj);
>
> +	dirty = i915_gem_object_is_dirty(obj);
>   	for_each_sgt_page(page, sgt_iter, obj->pages) {
> -		if (obj->dirty)
> +		if (dirty)
>   			set_page_dirty(page);
>
>   		if (obj->madv == I915_MADV_WILLNEED)
> @@ -2163,7 +2166,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
>
>   		put_page(page);
>   	}
> -	obj->dirty = 0;
> +	i915_gem_object_clear_dirty(obj);
>
>   	sg_free_table(obj->pages);
>   	kfree(obj->pages);
> @@ -3321,7 +3324,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
>   	if (write) {
>   		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
>   		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
> -		obj->dirty = 1;
> +		i915_gem_object_set_dirty(obj);
>   	}
>
>   	trace_i915_gem_object_change_domain(obj,
> @@ -4789,7 +4792,7 @@ struct drm_i915_gem_object *
>   	i915_gem_object_pin_pages(obj);
>   	sg = obj->pages;
>   	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
> -	obj->dirty = 1;		/* Backing store is now out of date */
> +	i915_gem_object_set_dirty(obj); /* Backing store is now out of date */
>   	i915_gem_object_unpin_pages(obj);
>
>   	if (WARN_ON(bytes != size)) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 35950ee..aa99bc3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -834,6 +834,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   	 * MI_SET_CONTEXT instead of when the next seqno has completed.
>   	 */
>   	if (from != NULL) {
> +		struct i915_vma *from_vma = from->engine[RCS].state;
> +
>   		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
>   		 * whole damn pipeline, we don't need to explicitly mark the
>   		 * object dirty. The only exception is that the context must be
> @@ -841,9 +843,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
>   		 * able to defer doing this until we know the object would be
>   		 * swapped, but there is no way to do that yet.
>   		 */
> -		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
> +		i915_vma_move_to_active(from_vma, req, 0);
> +		i915_gem_object_set_dirty(from_vma->obj);

Commit message should mention this.

>   		/* state is kept alive until the next request */
> -		i915_vma_unpin(from->engine[RCS].state);
> +		i915_vma_unpin(from_vma);
>   		i915_gem_context_put(from);
>   	}
>   	engine->last_context = i915_gem_context_get(to);
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 9432d4c..5ca7ba3 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1278,7 +1278,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
>
>   	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
>
> -	obj->dirty = 1; /* be paranoid  */
> +	i915_gem_object_set_dirty(obj); /* be paranoid  */
>
>   	/* Add a reference if we're newly entering the active list.
>   	 * The order in which we add operations to the retirement queue is
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index be54825..a78abe2 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -674,23 +674,25 @@ struct get_pages_work {
>   {
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
> +	bool dirty;
>
>   	BUG_ON(obj->userptr.work != NULL);
>   	__i915_gem_userptr_set_active(obj, false);
>
> -	if (obj->madv != I915_MADV_WILLNEED)
> -		obj->dirty = 0;
> -
>   	i915_gem_gtt_finish_object(obj);
>
> +	if (obj->madv != I915_MADV_WILLNEED)
> +		i915_gem_object_clear_dirty(obj);
> +

Looks like harmless reordering.

> +	dirty = i915_gem_object_is_dirty(obj);
>   	for_each_sgt_page(page, sgt_iter, obj->pages) {
> -		if (obj->dirty)
> +		if (dirty)
>   			set_page_dirty(page);
>
>   		mark_page_accessed(page);
>   		put_page(page);
>   	}
> -	obj->dirty = 0;
> +	i915_gem_object_clear_dirty(obj);
>
>   	sg_free_table(obj->pages);
>   	kfree(obj->pages);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 334f15d..257dde1 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -804,7 +804,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>   	err->write_domain = obj->base.write_domain;
>   	err->fence_reg = vma->fence ? vma->fence->id : -1;
>   	err->tiling = i915_gem_object_get_tiling(obj);
> -	err->dirty = obj->dirty;
> +	err->dirty = i915_gem_object_is_dirty(obj);
>   	err->purgeable = obj->madv != I915_MADV_WILLNEED;
>   	err->userptr = obj->userptr.mm != NULL;
>   	err->cache_level = obj->cache_level;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 92bfe47..1f48291 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -764,6 +764,8 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>   				struct intel_engine_cs *engine)
>   {
>   	struct intel_context *ce = &ctx->engine[engine->id];
> +	struct intel_ring *ring = ce->ring;
> +	struct drm_i915_gem_object *ctx_obj;
>   	void *vaddr;
>   	u32 *lrc_reg_state;
>   	int ret;
> @@ -778,24 +780,24 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>   	if (ret)
>   		goto err;
>
> -	vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
> +	ctx_obj = ce->state->obj;
> +	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
>   	if (IS_ERR(vaddr)) {
>   		ret = PTR_ERR(vaddr);
>   		goto unpin_vma;
>   	}
>
> -	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
> -
> -	ret = intel_ring_pin(ce->ring);
> +	ret = intel_ring_pin(ring);
>   	if (ret)
>   		goto unpin_map;
>
> -	intel_lr_context_descriptor_update(ctx, engine);
> +	i915_gem_object_set_dirty(ctx_obj);
>
> -	lrc_reg_state[CTX_RING_BUFFER_START+1] =
> -		i915_ggtt_offset(ce->ring->vma);
> +	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
> +	lrc_reg_state[CTX_RING_BUFFER_START+1] = i915_ggtt_offset(ring->vma);
>   	ce->lrc_reg_state = lrc_reg_state;
> -	ce->state->obj->dirty = true;
> +
> +	intel_lr_context_descriptor_update(ctx, engine);
>
>   	/* Invalidate GuC TLB. */
>   	if (i915.enable_guc_submission) {
> @@ -1969,7 +1971,7 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
>   		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
>   		return ret;
>   	}
> -	ctx_obj->dirty = true;
> +	i915_gem_object_set_dirty(ctx_obj);
>
>   	/* The second page of the context object contains some fields which must
>   	 * be set up prior to the first execution. */
> @@ -2182,23 +2184,24 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
>
>   	for_each_engine(engine, dev_priv) {
>   		struct intel_context *ce = &ctx->engine[engine->id];
> +		struct drm_i915_gem_object *ctx_obj;
>   		void *vaddr;
>   		uint32_t *reg_state;
>
>   		if (!ce->state)
>   			continue;
>
> -		vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
> +		ctx_obj = ce->state->obj;
> +		vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
>   		if (WARN_ON(IS_ERR(vaddr)))
>   			continue;
>
>   		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
> -
>   		reg_state[CTX_RING_HEAD+1] = 0;
>   		reg_state[CTX_RING_TAIL+1] = 0;
>
> -		ce->state->obj->dirty = true;
> -		i915_gem_object_unpin_map(ce->state->obj);
> +		i915_gem_object_set_dirty(ctx_obj);
> +		i915_gem_object_unpin_map(ctx_obj);
>
>   		ce->ring->head = 0;
>   		ce->ring->tail = 0;
>

Looks good to me. Just a small commit message extension required and 
clarification on added extra dirtying.

Regards,

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
  2016-09-12 15:48 ` [PATCH] " Tvrtko Ursulin
@ 2016-09-12 19:44   ` Chris Wilson
  2016-09-14 14:41   ` Dave Gordon
  1 sibling, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2016-09-12 19:44 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

On Mon, Sep 12, 2016 at 04:48:51PM +0100, Tvrtko Ursulin wrote:
> >  		 * able to defer doing this until we know the object would be
> >  		 * swapped, but there is no way to do that yet.
> >  		 */
> >-		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
> >+		i915_vma_move_to_active(from_vma, req, 0);
> >+		i915_gem_object_set_dirty(from_vma->obj);
> 
> Commit message should mention this.

No. This should not be here as we removed it because we always mark the
object as dirty when active on the GPU.

I don't see any improvement in this patch over mine, so what's the
reason for it? Certainly the lack of debug checks is odd.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
  2016-09-12 15:48 ` [PATCH] " Tvrtko Ursulin
  2016-09-12 19:44   ` Chris Wilson
@ 2016-09-14 14:41   ` Dave Gordon
  2016-09-14 14:48     ` [PATCH v3] " Dave Gordon
  1 sibling, 1 reply; 8+ messages in thread
From: Dave Gordon @ 2016-09-14 14:41 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

On 12/09/16 16:48, Tvrtko Ursulin wrote:
>
> Hi,
>
> On 09/09/16 20:48, Dave Gordon wrote:
>> This just hides the existing obj->dirty flag inside a trivial inline
>> setter, to discourage non-GEM code from looking too closely. The
>> flag is renamed to emphasise that it is private to the GEM memory-
>> management code and ensure that no legacy code continues to use it
>> directly.
>>
>> v2:
>>    Use Chris Wilson's preferred names for flag-related functions
>>
>> Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>> Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
>> ---
>>   drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
>>   drivers/gpu/drm/i915/i915_drv.h            | 22 +++++++++++++++++++++-
>>   drivers/gpu/drm/i915/i915_gem.c            | 25
>> ++++++++++++++-----------
>>   drivers/gpu/drm/i915/i915_gem_context.c    |  7 +++++--
>>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
>>   drivers/gpu/drm/i915/i915_gem_userptr.c    | 12 +++++++-----
>>   drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
>>   drivers/gpu/drm/i915/intel_lrc.c           | 29
>> ++++++++++++++++-------------
>>   8 files changed, 66 insertions(+), 35 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c
>> b/drivers/gpu/drm/i915/i915_debugfs.c
>> index 02b627e..b77fc27 100644
>> --- a/drivers/gpu/drm/i915/i915_debugfs.c
>> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
>> @@ -160,7 +160,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct
>> drm_i915_gem_object *obj)
>>              i915_gem_active_get_seqno(&obj->last_write,
>>                            &obj->base.dev->struct_mutex),
>>              i915_cache_level_str(dev_priv, obj->cache_level),
>> -           obj->dirty ? " dirty" : "",
>> +           i915_gem_object_is_dirty(obj) ? " dirty" : "",
>>              obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
>>       if (obj->base.name)
>>           seq_printf(m, " (name: %d)", obj->base.name);
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h
>> b/drivers/gpu/drm/i915/i915_drv.h
>> index f39bede..333e21b 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -2209,7 +2209,7 @@ struct drm_i915_gem_object {
>>        * This is set if the object has been written to since last bound
>>        * to the GTT
>>        */
>> -    unsigned int dirty:1;
>> +    unsigned int __dirty:1;
>>
>>       /**
>>        * Advice: are the backing pages purgeable?
>> @@ -3156,6 +3156,26 @@ static inline void
>> i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
>>       obj->pages_pin_count++;
>>   }
>>
>> +/*
>> + * Flag the object content as having changed since the last call to
>> + * i915_gem_object_pin_pages() above, so that the new content is not
>> + * lost after the next call to i915_gem_object_unpin_pages() below
>> + */
>> +static inline void i915_gem_object_set_dirty(struct
>> drm_i915_gem_object *obj)
>> +{
>> +    obj->__dirty = true;
>> +}
>> +
>> +static inline void i915_gem_object_clear_dirty(struct
>> drm_i915_gem_object *obj)
>> +{
>> +    obj->__dirty = false;
>> +}
>> +
>> +static inline bool i915_gem_object_is_dirty(struct
>> drm_i915_gem_object *obj)
>> +{
>> +    return obj->__dirty;
>> +}
>> +
>>   static inline void i915_gem_object_unpin_pages(struct
>> drm_i915_gem_object *obj)
>>   {
>>       BUG_ON(obj->pages_pin_count == 0);
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c
>> b/drivers/gpu/drm/i915/i915_gem.c
>> index 2401818..f571a02 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -234,9 +234,8 @@ int i915_mutex_lock_interruptible(struct
>> drm_device *dev)
>>       }
>>
>>       if (obj->madv == I915_MADV_DONTNEED)
>> -        obj->dirty = 0;
>> -
>> -    if (obj->dirty) {
>> +        i915_gem_object_clear_dirty(obj);
>> +    else if (i915_gem_object_is_dirty(obj)) {
>>           struct address_space *mapping = obj->base.filp->f_mapping;
>>           char *vaddr = obj->phys_handle->vaddr;
>>           int i;
>> @@ -260,7 +259,7 @@ int i915_mutex_lock_interruptible(struct
>> drm_device *dev)
>>               put_page(page);
>>               vaddr += PAGE_SIZE;
>>           }
>> -        obj->dirty = 0;
>> +        i915_gem_object_clear_dirty(obj);
>>       }
>>
>>       sg_free_table(obj->pages);
>> @@ -703,7 +702,7 @@ int i915_gem_obj_prepare_shmem_write(struct
>> drm_i915_gem_object *obj,
>>           obj->cache_dirty = true;
>>
>>       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
>> -    obj->dirty = 1;
>> +    i915_gem_object_set_dirty(obj);
>>       /* return with the pages pinned */
>>       return 0;
>>
>> @@ -1156,7 +1155,7 @@ int i915_gem_obj_prepare_shmem_write(struct
>> drm_i915_gem_object *obj,
>>           goto out_unpin;
>
> I wonder why diff got so confused with this one, because this isn't
> i915_gem_obj_prepare_shmem_write any longer.

It has to do with functions containing labels. A workaround that 
sometimes works is to tell git-diff that it's C++ code rather than C, as 
it then handles labels slightly differently, in a way that usually 
happens to fix the misidentification of which function the code is in.

>>       intel_fb_obj_invalidate(obj, ORIGIN_CPU);
>> -    obj->dirty = true;
>> +    i915_gem_object_set_dirty(obj);
>>
>>       user_data = u64_to_user_ptr(args->data_ptr);
>>       offset = args->offset;
>> @@ -1327,6 +1326,8 @@ int i915_gem_obj_prepare_shmem_write(struct
>> drm_i915_gem_object *obj,
>>       offset = args->offset;
>>       remain = args->size;
>>
>> +    i915_gem_object_set_dirty(obj);
>> +
>
> This is in i915_gem_shmem_pwrite by the look of it. It will have dirtied
> the object already via i915_gem_obj_prepare_shmem_write. You added it
> here for some specific reason?

Nope, I think it must have been left over from an earlier version in 
which not every path to here had already set it. I'll get rid of it.

>>       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
>>                offset >> PAGE_SHIFT) {
>>           struct page *page = sg_page_iter_page(&sg_iter);
>> @@ -2133,6 +2134,7 @@ static void
>> i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
>>   {
>>       struct sgt_iter sgt_iter;
>>       struct page *page;
>> +    bool dirty;
>>       int ret;
>>
>>       BUG_ON(obj->madv == __I915_MADV_PURGED);
>> @@ -2152,10 +2154,11 @@ static void
>> i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
>>           i915_gem_object_save_bit_17_swizzle(obj);
>>
>>       if (obj->madv == I915_MADV_DONTNEED)
>> -        obj->dirty = 0;
>> +        i915_gem_object_clear_dirty(obj);
>>
>> +    dirty = i915_gem_object_is_dirty(obj);
>>       for_each_sgt_page(page, sgt_iter, obj->pages) {
>> -        if (obj->dirty)
>> +        if (dirty)
>>               set_page_dirty(page);
>>
>>           if (obj->madv == I915_MADV_WILLNEED)
>> @@ -2163,7 +2166,7 @@ static void
>> i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
>>
>>           put_page(page);
>>       }
>> -    obj->dirty = 0;
>> +    i915_gem_object_clear_dirty(obj);
>>
>>       sg_free_table(obj->pages);
>>       kfree(obj->pages);
>> @@ -3321,7 +3324,7 @@ static void
>> i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
>>       if (write) {
>>           obj->base.read_domains = I915_GEM_DOMAIN_GTT;
>>           obj->base.write_domain = I915_GEM_DOMAIN_GTT;
>> -        obj->dirty = 1;
>> +        i915_gem_object_set_dirty(obj);
>>       }
>>
>>       trace_i915_gem_object_change_domain(obj,
>> @@ -4789,7 +4792,7 @@ struct drm_i915_gem_object *
>>       i915_gem_object_pin_pages(obj);
>>       sg = obj->pages;
>>       bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data,
>> size);
>> -    obj->dirty = 1;        /* Backing store is now out of date */
>> +    i915_gem_object_set_dirty(obj); /* Backing store is now out of
>> date */
>>       i915_gem_object_unpin_pages(obj);
>>
>>       if (WARN_ON(bytes != size)) {
>> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c
>> b/drivers/gpu/drm/i915/i915_gem_context.c
>> index 35950ee..aa99bc3 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_context.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
>> @@ -834,6 +834,8 @@ static int do_rcs_switch(struct
>> drm_i915_gem_request *req)
>>        * MI_SET_CONTEXT instead of when the next seqno has completed.
>>        */
>>       if (from != NULL) {
>> +        struct i915_vma *from_vma = from->engine[RCS].state;
>> +
>>           /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
>>            * whole damn pipeline, we don't need to explicitly mark the
>>            * object dirty. The only exception is that the context must be
>> @@ -841,9 +843,10 @@ static int do_rcs_switch(struct
>> drm_i915_gem_request *req)
>>            * able to defer doing this until we know the object would be
>>            * swapped, but there is no way to do that yet.
>>            */
>> -        i915_vma_move_to_active(from->engine[RCS].state, req, 0);
>> +        i915_vma_move_to_active(from_vma, req, 0);
>> +        i915_gem_object_set_dirty(from_vma->obj);
>
> Commit message should mention this.

Hmm .. that wasn't supposed to be there, I think that's also left from a 
previous iteration. *This* patch was supposed to be just (nearly-) 
mechanical transformations.

>>           /* state is kept alive until the next request */
>> -        i915_vma_unpin(from->engine[RCS].state);
>> +        i915_vma_unpin(from_vma);
>>           i915_gem_context_put(from);
>>       }
>>       engine->last_context = i915_gem_context_get(to);
>> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> index 9432d4c..5ca7ba3 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>> @@ -1278,7 +1278,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
>>
>>       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
>>
>> -    obj->dirty = 1; /* be paranoid  */
>> +    i915_gem_object_set_dirty(obj); /* be paranoid  */
>>
>>       /* Add a reference if we're newly entering the active list.
>>        * The order in which we add operations to the retirement queue is
>> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c
>> b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> index be54825..a78abe2 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> @@ -674,23 +674,25 @@ struct get_pages_work {
>>   {
>>       struct sgt_iter sgt_iter;
>>       struct page *page;
>> +    bool dirty;
>>
>>       BUG_ON(obj->userptr.work != NULL);
>>       __i915_gem_userptr_set_active(obj, false);
>>
>> -    if (obj->madv != I915_MADV_WILLNEED)
>> -        obj->dirty = 0;
>> -
>>       i915_gem_gtt_finish_object(obj);
>>
>> +    if (obj->madv != I915_MADV_WILLNEED)
>> +        i915_gem_object_clear_dirty(obj);
>> +
>
> Looks like harmless reordering.

Yes, it might help the compiler if we keep all the dirty-related code 
close together. Besides, I don't want to make any assumption about what 
i915_gem_gtt_finish_object(obj) will do e.g. it could change obj->madv!

.Dave.

>> +    dirty = i915_gem_object_is_dirty(obj);
>>       for_each_sgt_page(page, sgt_iter, obj->pages) {
>> -        if (obj->dirty)
>> +        if (dirty)
>>               set_page_dirty(page);
>>
>>           mark_page_accessed(page);
>>           put_page(page);
>>       }
>> -    obj->dirty = 0;
>> +    i915_gem_object_clear_dirty(obj);
>>
>>       sg_free_table(obj->pages);
>>       kfree(obj->pages);
>> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c
>> b/drivers/gpu/drm/i915/i915_gpu_error.c
>> index 334f15d..257dde1 100644
>> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
>> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
>> @@ -804,7 +804,7 @@ static void capture_bo(struct
>> drm_i915_error_buffer *err,
>>       err->write_domain = obj->base.write_domain;
>>       err->fence_reg = vma->fence ? vma->fence->id : -1;
>>       err->tiling = i915_gem_object_get_tiling(obj);
>> -    err->dirty = obj->dirty;
>> +    err->dirty = i915_gem_object_is_dirty(obj);
>>       err->purgeable = obj->madv != I915_MADV_WILLNEED;
>>       err->userptr = obj->userptr.mm != NULL;
>>       err->cache_level = obj->cache_level;
>> diff --git a/drivers/gpu/drm/i915/intel_lrc.c
>> b/drivers/gpu/drm/i915/intel_lrc.c
>> index 92bfe47..1f48291 100644
>> --- a/drivers/gpu/drm/i915/intel_lrc.c
>> +++ b/drivers/gpu/drm/i915/intel_lrc.c
>> @@ -764,6 +764,8 @@ static int intel_lr_context_pin(struct
>> i915_gem_context *ctx,
>>                   struct intel_engine_cs *engine)
>>   {
>>       struct intel_context *ce = &ctx->engine[engine->id];
>> +    struct intel_ring *ring = ce->ring;
>> +    struct drm_i915_gem_object *ctx_obj;
>>       void *vaddr;
>>       u32 *lrc_reg_state;
>>       int ret;
>> @@ -778,24 +780,24 @@ static int intel_lr_context_pin(struct
>> i915_gem_context *ctx,
>>       if (ret)
>>           goto err;
>>
>> -    vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
>> +    ctx_obj = ce->state->obj;
>> +    vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
>>       if (IS_ERR(vaddr)) {
>>           ret = PTR_ERR(vaddr);
>>           goto unpin_vma;
>>       }
>>
>> -    lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>> -
>> -    ret = intel_ring_pin(ce->ring);
>> +    ret = intel_ring_pin(ring);
>>       if (ret)
>>           goto unpin_map;
>>
>> -    intel_lr_context_descriptor_update(ctx, engine);
>> +    i915_gem_object_set_dirty(ctx_obj);
>>
>> -    lrc_reg_state[CTX_RING_BUFFER_START+1] =
>> -        i915_ggtt_offset(ce->ring->vma);
>> +    lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>> +    lrc_reg_state[CTX_RING_BUFFER_START+1] =
>> i915_ggtt_offset(ring->vma);
>>       ce->lrc_reg_state = lrc_reg_state;
>> -    ce->state->obj->dirty = true;
>> +
>> +    intel_lr_context_descriptor_update(ctx, engine);
>>
>>       /* Invalidate GuC TLB. */
>>       if (i915.enable_guc_submission) {
>> @@ -1969,7 +1971,7 @@ static u32 intel_lr_indirect_ctx_offset(struct
>> intel_engine_cs *engine)
>>           DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
>>           return ret;
>>       }
>> -    ctx_obj->dirty = true;
>> +    i915_gem_object_set_dirty(ctx_obj);
>>
>>       /* The second page of the context object contains some fields
>> which must
>>        * be set up prior to the first execution. */
>> @@ -2182,23 +2184,24 @@ void intel_lr_context_reset(struct
>> drm_i915_private *dev_priv,
>>
>>       for_each_engine(engine, dev_priv) {
>>           struct intel_context *ce = &ctx->engine[engine->id];
>> +        struct drm_i915_gem_object *ctx_obj;
>>           void *vaddr;
>>           uint32_t *reg_state;
>>
>>           if (!ce->state)
>>               continue;
>>
>> -        vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
>> +        ctx_obj = ce->state->obj;
>> +        vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
>>           if (WARN_ON(IS_ERR(vaddr)))
>>               continue;
>>
>>           reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>> -
>>           reg_state[CTX_RING_HEAD+1] = 0;
>>           reg_state[CTX_RING_TAIL+1] = 0;
>>
>> -        ce->state->obj->dirty = true;
>> -        i915_gem_object_unpin_map(ce->state->obj);
>> +        i915_gem_object_set_dirty(ctx_obj);
>> +        i915_gem_object_unpin_map(ctx_obj);
>>
>>           ce->ring->head = 0;
>>           ce->ring->tail = 0;
>>
>
> Looks good to me. Just a small commit message extension required and
> clarification on added extra dirtying.
>
> Regards,
> Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v3] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
  2016-09-14 14:41   ` Dave Gordon
@ 2016-09-14 14:48     ` Dave Gordon
  0 siblings, 0 replies; 8+ messages in thread
From: Dave Gordon @ 2016-09-14 14:48 UTC (permalink / raw)
  To: intel-gfx

This just hides the existing obj->dirty flag inside a trivial inline
setter, to discourage non-GEM code from looking too closely. The
flag is renamed to emphasise that it is private to the GEM memory-
management code and ensure that no legacy code continues to use it
directly.

v2:
Use Chris Wilson's preferred names for flag-related functions

v3:
Remove a couple of changes left over from a prototype version

Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 22 +++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 23 ++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_context.c    |  6 ++++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c    | 12 +++++++-----
 drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 29 ++++++++++++++++-------------
 8 files changed, 63 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 64702cc..8acf281 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -160,7 +160,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 		   i915_gem_active_get_seqno(&obj->last_write,
 					     &obj->base.dev->struct_mutex),
 		   i915_cache_level_str(dev_priv, obj->cache_level),
-		   obj->dirty ? " dirty" : "",
+		   i915_gem_object_is_dirty(obj) ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e2dda8..3fed004 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2212,7 +2212,7 @@ struct drm_i915_gem_object {
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
-	unsigned int dirty:1;
+	unsigned int __dirty:1;
 
 	/**
 	 * Advice: are the backing pages purgeable?
@@ -3159,6 +3159,26 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 	obj->pages_pin_count++;
 }
 
+/*
+ * Flag the object content as having changed since the last call to
+ * i915_gem_object_pin_pages() above, so that the new content is not
+ * lost after the next call to i915_gem_object_unpin_pages() below
+ */
+static inline void i915_gem_object_set_dirty(struct drm_i915_gem_object *obj)
+{
+	obj->__dirty = true;
+}
+
+static inline void i915_gem_object_clear_dirty(struct drm_i915_gem_object *obj)
+{
+	obj->__dirty = false;
+}
+
+static inline bool i915_gem_object_is_dirty(struct drm_i915_gem_object *obj)
+{
+	return obj->__dirty;
+}
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 	BUG_ON(obj->pages_pin_count == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c8bd022..08c8f6b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -234,9 +234,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 	}
 
 	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
-
-	if (obj->dirty) {
+		i915_gem_object_clear_dirty(obj);
+	else if (i915_gem_object_is_dirty(obj)) {
 		struct address_space *mapping = obj->base.filp->f_mapping;
 		char *vaddr = obj->phys_handle->vaddr;
 		int i;
@@ -260,7 +259,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 			put_page(page);
 			vaddr += PAGE_SIZE;
 		}
-		obj->dirty = 0;
+		i915_gem_object_clear_dirty(obj);
 	}
 
 	sg_free_table(obj->pages);
@@ -704,7 +703,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 		obj->cache_dirty = true;
 
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = 1;
+	i915_gem_object_set_dirty(obj);
 	/* return with the pages pinned */
 	return 0;
 
@@ -1157,7 +1156,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 		goto out_unpin;
 
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = true;
+	i915_gem_object_set_dirty(obj);
 
 	user_data = u64_to_user_ptr(args->data_ptr);
 	offset = args->offset;
@@ -2134,6 +2133,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
+	bool dirty;
 	int ret;
 
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2153,10 +2153,11 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 		i915_gem_object_save_bit_17_swizzle(obj);
 
 	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
+		i915_gem_object_clear_dirty(obj);
 
+	dirty = i915_gem_object_is_dirty(obj);
 	for_each_sgt_page(page, sgt_iter, obj->pages) {
-		if (obj->dirty)
+		if (dirty)
 			set_page_dirty(page);
 
 		if (obj->madv == I915_MADV_WILLNEED)
@@ -2164,7 +2165,7 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
 
 		put_page(page);
 	}
-	obj->dirty = 0;
+	i915_gem_object_clear_dirty(obj);
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
@@ -3265,7 +3266,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 	if (write) {
 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-		obj->dirty = 1;
+		i915_gem_object_set_dirty(obj);
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -4743,7 +4744,7 @@ struct drm_i915_gem_object *
 	i915_gem_object_pin_pages(obj);
 	sg = obj->pages;
 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-	obj->dirty = 1;		/* Backing store is now out of date */
+	i915_gem_object_set_dirty(obj); /* Backing store is now out of date */
 	i915_gem_object_unpin_pages(obj);
 
 	if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index df10f4e9..4b9c9ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -818,6 +818,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
+		struct i915_vma *from_vma = from->engine[RCS].state;
+
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -825,9 +827,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		i915_vma_move_to_active(from->engine[RCS].state, req, 0);
+		i915_vma_move_to_active(from_vma, req, 0);
 		/* state is kept alive until the next request */
-		i915_vma_unpin(from->engine[RCS].state);
+		i915_vma_unpin(from_vma);
 		i915_gem_context_put(from);
 	}
 	engine->last_context = i915_gem_context_get(to);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33c8522..239b430 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1290,7 +1290,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	obj->dirty = 1; /* be paranoid  */
+	i915_gem_object_set_dirty(obj); /* be paranoid  */
 
 	/* Add a reference if we're newly entering the active list.
 	 * The order in which we add operations to the retirement queue is
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930..1707aaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -674,23 +674,25 @@ struct get_pages_work {
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
+	bool dirty;
 
 	BUG_ON(obj->userptr.work != NULL);
 	__i915_gem_userptr_set_active(obj, false);
 
-	if (obj->madv != I915_MADV_WILLNEED)
-		obj->dirty = 0;
-
 	i915_gem_gtt_finish_object(obj);
 
+	if (obj->madv != I915_MADV_WILLNEED)
+		i915_gem_object_clear_dirty(obj);
+
+	dirty = i915_gem_object_is_dirty(obj);
 	for_each_sgt_page(page, sgt_iter, obj->pages) {
-		if (obj->dirty)
+		if (dirty)
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
 		put_page(page);
 	}
-	obj->dirty = 0;
+	i915_gem_object_clear_dirty(obj);
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 334f15d..257dde1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -804,7 +804,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->write_domain = obj->base.write_domain;
 	err->fence_reg = vma->fence ? vma->fence->id : -1;
 	err->tiling = i915_gem_object_get_tiling(obj);
-	err->dirty = obj->dirty;
+	err->dirty = i915_gem_object_is_dirty(obj);
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->cache_level = obj->cache_level;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 16d7cdd..cb915af 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -707,6 +707,8 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
 	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_ring *ring = ce->ring;
+	struct drm_i915_gem_object *ctx_obj;
 	void *vaddr;
 	u32 *lrc_reg_state;
 	int ret;
@@ -721,24 +723,24 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 	if (ret)
 		goto err;
 
-	vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+	ctx_obj = ce->state->obj;
+	vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
 		goto unpin_vma;
 	}
 
-	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-	ret = intel_ring_pin(ce->ring);
+	ret = intel_ring_pin(ring);
 	if (ret)
 		goto unpin_map;
 
-	intel_lr_context_descriptor_update(ctx, engine);
+	i915_gem_object_set_dirty(ctx_obj);
 
-	lrc_reg_state[CTX_RING_BUFFER_START+1] =
-		i915_ggtt_offset(ce->ring->vma);
+	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+	lrc_reg_state[CTX_RING_BUFFER_START+1] = i915_ggtt_offset(ring->vma);
 	ce->lrc_reg_state = lrc_reg_state;
-	ce->state->obj->dirty = true;
+
+	intel_lr_context_descriptor_update(ctx, engine);
 
 	/* Invalidate GuC TLB. */
 	if (i915.enable_guc_submission) {
@@ -1921,7 +1923,7 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-	ctx_obj->dirty = true;
+	i915_gem_object_set_dirty(ctx_obj);
 
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
@@ -2134,23 +2136,24 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 
 	for_each_engine(engine, dev_priv) {
 		struct intel_context *ce = &ctx->engine[engine->id];
+		struct drm_i915_gem_object *ctx_obj;
 		void *vaddr;
 		uint32_t *reg_state;
 
 		if (!ce->state)
 			continue;
 
-		vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
+		ctx_obj = ce->state->obj;
+		vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
 		if (WARN_ON(IS_ERR(vaddr)))
 			continue;
 
 		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
 		reg_state[CTX_RING_HEAD+1] = 0;
 		reg_state[CTX_RING_TAIL+1] = 0;
 
-		ce->state->obj->dirty = true;
-		i915_gem_object_unpin_map(ce->state->obj);
+		i915_gem_object_set_dirty(ctx_obj);
+		i915_gem_object_unpin_map(ctx_obj);
 
 		ce->ring->head = 0;
 		ce->ring->tail = 0;
-- 
1.9.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2)
  2016-09-09 19:48 [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() Dave Gordon
  2016-09-09 20:49 ` ✗ Fi.CI.BAT: failure for " Patchwork
  2016-09-12 15:48 ` [PATCH] " Tvrtko Ursulin
@ 2016-09-14 15:20 ` Patchwork
  2016-09-15  7:20 ` ✗ Fi.CI.BAT: warning " Patchwork
  3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2016-09-14 15:20 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2)
URL   : https://patchwork.freedesktop.org/series/12262/
State : failure

== Summary ==

Series 12262v2 drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
https://patchwork.freedesktop.org/api/1.0/series/12262/revisions/2/mbox/

Test drv_module_reload_basic:
                dmesg-warn -> PASS       (fi-skl-6770hq)
Test gem_exec_suspend:
        Subgroup basic-s3:
                incomplete -> PASS       (fi-hsw-4770k)
Test kms_busy:
        Subgroup basic-flip-default-a:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-default-b:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-default-c:
                skip       -> PASS       (fi-skl-6770hq)
Test kms_cursor_legacy:
        Subgroup basic-flip-after-cursor-legacy:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-after-cursor-varying-size:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-before-cursor-legacy:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-before-cursor-varying-size:
                skip       -> PASS       (fi-skl-6770hq)
Test kms_flip:
        Subgroup basic-flip-vs-dpms:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-vs-modeset:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-flip-vs-wf_vblank:
                skip       -> DMESG-WARN (fi-skl-6770hq)
        Subgroup basic-plain-flip:
                skip       -> PASS       (fi-skl-6770hq)
Test kms_frontbuffer_tracking:
        Subgroup basic:
                skip       -> FAIL       (fi-skl-6770hq)
Test kms_pipe_crc_basic:
        Subgroup hang-read-crc-pipe-a:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup hang-read-crc-pipe-b:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup hang-read-crc-pipe-c:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup nonblocking-crc-pipe-a:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup nonblocking-crc-pipe-a-frame-sequence:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup nonblocking-crc-pipe-b:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup nonblocking-crc-pipe-b-frame-sequence:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup nonblocking-crc-pipe-c:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup nonblocking-crc-pipe-c-frame-sequence:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup read-crc-pipe-a:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup read-crc-pipe-a-frame-sequence:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup read-crc-pipe-b:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup read-crc-pipe-b-frame-sequence:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup read-crc-pipe-c:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup read-crc-pipe-c-frame-sequence:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup suspend-read-crc-pipe-a:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup suspend-read-crc-pipe-b:
                dmesg-warn -> PASS       (fi-byt-j1900)
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup suspend-read-crc-pipe-c:
                skip       -> PASS       (fi-skl-6770hq)
Test pm_rpm:
        Subgroup basic-pci-d3-state:
                skip       -> PASS       (fi-skl-6770hq)
        Subgroup basic-rte:
                skip       -> PASS       (fi-skl-6770hq)
Test prime_vgem:
        Subgroup basic-fence-flip:
                skip       -> PASS       (fi-skl-6770hq)

fi-bdw-5557u     total:244  pass:229  dwarn:0   dfail:0   fail:0   skip:15 
fi-bsw-n3050     total:244  pass:202  dwarn:0   dfail:0   fail:0   skip:42 
fi-byt-j1900     total:244  pass:211  dwarn:1   dfail:0   fail:1   skip:31 
fi-byt-n2820     total:244  pass:208  dwarn:0   dfail:0   fail:1   skip:35 
fi-hsw-4770k     total:244  pass:226  dwarn:0   dfail:0   fail:0   skip:18 
fi-hsw-4770r     total:244  pass:222  dwarn:0   dfail:0   fail:0   skip:22 
fi-ilk-650       total:244  pass:183  dwarn:0   dfail:0   fail:1   skip:60 
fi-ivb-3520m     total:244  pass:219  dwarn:0   dfail:0   fail:0   skip:25 
fi-ivb-3770      total:244  pass:207  dwarn:0   dfail:0   fail:0   skip:37 
fi-skl-6260u     total:244  pass:230  dwarn:0   dfail:0   fail:0   skip:14 
fi-skl-6700hq    total:244  pass:221  dwarn:0   dfail:0   fail:1   skip:22 
fi-skl-6700k     total:244  pass:219  dwarn:1   dfail:0   fail:0   skip:24 
fi-skl-6770hq    total:244  pass:227  dwarn:2   dfail:0   fail:1   skip:14 
fi-snb-2520m     total:244  pass:208  dwarn:0   dfail:0   fail:0   skip:36 
fi-snb-2600      total:244  pass:207  dwarn:0   dfail:0   fail:0   skip:37 

Results at /archive/results/CI_IGT_test/Patchwork_2534/

9aa8c0cdbc076bcc0486d7a31922a0f77c032fe7 drm-intel-nightly: 2016y-09m-14d-09h-19m-25s UTC integration manifest
4d942c8 drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.BAT: warning for drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2)
  2016-09-09 19:48 [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() Dave Gordon
                   ` (2 preceding siblings ...)
  2016-09-14 15:20 ` ✗ Fi.CI.BAT: failure for drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2) Patchwork
@ 2016-09-15  7:20 ` Patchwork
  3 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2016-09-15  7:20 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2)
URL   : https://patchwork.freedesktop.org/series/12262/
State : warning

== Summary ==

Series 12262v2 drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()
https://patchwork.freedesktop.org/api/1.0/series/12262/revisions/2/mbox/

Test gem_exec_suspend:
        Subgroup basic-s3:
                incomplete -> PASS       (fi-hsw-4770k)
Test kms_flip:
        Subgroup basic-flip-vs-wf_vblank:
                pass       -> DMESG-WARN (fi-skl-6770hq)
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-b:
                dmesg-warn -> PASS       (fi-byt-j1900)

fi-bdw-5557u     total:244  pass:229  dwarn:0   dfail:0   fail:0   skip:15 
fi-bsw-n3050     total:244  pass:202  dwarn:0   dfail:0   fail:0   skip:42 
fi-byt-j1900     total:244  pass:211  dwarn:1   dfail:0   fail:1   skip:31 
fi-byt-n2820     total:244  pass:208  dwarn:0   dfail:0   fail:1   skip:35 
fi-hsw-4770k     total:244  pass:226  dwarn:0   dfail:0   fail:0   skip:18 
fi-hsw-4770r     total:244  pass:222  dwarn:0   dfail:0   fail:0   skip:22 
fi-ilk-650       total:244  pass:183  dwarn:0   dfail:0   fail:1   skip:60 
fi-ivb-3520m     total:244  pass:219  dwarn:0   dfail:0   fail:0   skip:25 
fi-ivb-3770      total:244  pass:207  dwarn:0   dfail:0   fail:0   skip:37 
fi-skl-6260u     total:244  pass:230  dwarn:0   dfail:0   fail:0   skip:14 
fi-skl-6700hq    total:244  pass:221  dwarn:0   dfail:0   fail:1   skip:22 
fi-skl-6700k     total:244  pass:219  dwarn:1   dfail:0   fail:0   skip:24 
fi-skl-6770hq    total:244  pass:227  dwarn:2   dfail:0   fail:1   skip:14 
fi-snb-2520m     total:244  pass:208  dwarn:0   dfail:0   fail:0   skip:36 
fi-snb-2600      total:244  pass:207  dwarn:0   dfail:0   fail:0   skip:37 

Results at /archive/results/CI_IGT_test/Patchwork_2534/

9aa8c0cdbc076bcc0486d7a31922a0f77c032fe7 drm-intel-nightly: 2016y-09m-14d-09h-19m-25s UTC integration manifest
4d942c8 drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty()

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2016-09-15  7:20 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-09 19:48 [PATCH] drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() Dave Gordon
2016-09-09 20:49 ` ✗ Fi.CI.BAT: failure for " Patchwork
2016-09-12 15:48 ` [PATCH] " Tvrtko Ursulin
2016-09-12 19:44   ` Chris Wilson
2016-09-14 14:41   ` Dave Gordon
2016-09-14 14:48     ` [PATCH v3] " Dave Gordon
2016-09-14 15:20 ` ✗ Fi.CI.BAT: failure for drm/i915: introduce & use i915_gem_object_{set, clear, is}_dirty() (rev2) Patchwork
2016-09-15  7:20 ` ✗ Fi.CI.BAT: warning " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.