All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 50/64] drm/i915: Prepare i915_gem_active for annotations
Date: Wed, 13 Jul 2016 16:40:03 +0100	[thread overview]
Message-ID: <578660D3.1090904@linux.intel.com> (raw)
In-Reply-To: <1467880930-23082-50-git-send-email-chris@chris-wilson.co.uk>


On 07/07/16 09:41, Chris Wilson wrote:
> In the future, we will want to add annotations to the i915_gem_active
> struct. The API is thus expanded to hide direct access to the contents
> of i915_gem_active and mediated instead through a number of helpers.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c     |  13 ++--
>   drivers/gpu/drm/i915/i915_gem.c         |  91 +++++++++++++----------
>   drivers/gpu/drm/i915/i915_gem_dmabuf.c  |   4 +-
>   drivers/gpu/drm/i915/i915_gem_fence.c   |  11 ++-
>   drivers/gpu/drm/i915/i915_gem_request.h | 128 +++++++++++++++++++++++++++++++-
>   drivers/gpu/drm/i915/i915_gem_tiling.c  |   2 +-
>   drivers/gpu/drm/i915/i915_gem_userptr.c |   8 +-
>   drivers/gpu/drm/i915/i915_gpu_error.c   |   9 ++-
>   drivers/gpu/drm/i915/intel_display.c    |  15 ++--
>   9 files changed, 208 insertions(+), 73 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index dd832eace487..ae1c640fc1c8 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -155,10 +155,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		   obj->base.write_domain);
>   	for_each_engine_id(engine, dev_priv, id)
>   		seq_printf(m, "%x ",
> -			   i915_gem_request_get_seqno(obj->last_read[id].request));
> +			   i915_gem_active_get_seqno(&obj->last_read[id]));
>   	seq_printf(m, "] %x %x%s%s%s",
> -		   i915_gem_request_get_seqno(obj->last_write.request),
> -		   i915_gem_request_get_seqno(obj->last_fence.request),
> +		   i915_gem_active_get_seqno(&obj->last_write),
> +		   i915_gem_active_get_seqno(&obj->last_fence),
>   		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
>   		   obj->dirty ? " dirty" : "",
>   		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
> @@ -195,8 +195,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		*t = '\0';
>   		seq_printf(m, " (%s mappable)", s);
>   	}
> -	if (obj->last_write.request)
> -		seq_printf(m, " (%s)", obj->last_write.request->engine->name);
> +
> +	engine = i915_gem_active_get_engine(&obj->last_write);
> +	if (engine)
> +		seq_printf(m, " (%s)", engine->name);
> +
>   	if (obj->frontbuffer_bits)
>   		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
>   }
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 5f302faf86e7..9c371e84b1bb 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1349,27 +1349,30 @@ int
>   i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
>   			       bool readonly)
>   {
> +	struct drm_i915_gem_request *request;
>   	struct reservation_object *resv;
>   	int ret, i;
>
>   	if (readonly) {
> -		if (obj->last_write.request) {
> -			ret = i915_wait_request(obj->last_write.request);
> +		request = i915_gem_active_peek(&obj->last_write);

Why not get_request since you have get_engine? Or later there will be 
get_request with different semantics?

> +		if (request) {
> +			ret = i915_wait_request(request);
>   			if (ret)
>   				return ret;
>
> -			i = obj->last_write.request->engine->id;
> -			if (obj->last_read[i].request == obj->last_write.request)
> +			i = request->engine->id;
> +			if (i915_gem_active_peek(&obj->last_read[i]) == request)
>   				i915_gem_object_retire__read(obj, i);
>   			else
>   				i915_gem_object_retire__write(obj);
>   		}
>   	} else {
>   		for (i = 0; i < I915_NUM_ENGINES; i++) {
> -			if (!obj->last_read[i].request)
> +			request = i915_gem_active_peek(&obj->last_read[i]);
> +			if (!request)
>   				continue;
>
> -			ret = i915_wait_request(obj->last_read[i].request);
> +			ret = i915_wait_request(request);
>   			if (ret)
>   				return ret;
>
> @@ -1397,9 +1400,9 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
>   {
>   	int ring = req->engine->id;
>
> -	if (obj->last_read[ring].request == req)
> +	if (i915_gem_active_peek(&obj->last_read[ring]) == req)
>   		i915_gem_object_retire__read(obj, ring);
> -	else if (obj->last_write.request == req)
> +	else if (i915_gem_active_peek(&obj->last_write) == req)
>   		i915_gem_object_retire__write(obj);
>
>   	if (!i915_reset_in_progress(&req->i915->gpu_error))
> @@ -1428,20 +1431,20 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
>   	if (readonly) {
>   		struct drm_i915_gem_request *req;
>
> -		req = obj->last_write.request;
> +		req = i915_gem_active_peek(&obj->last_write);
>   		if (req == NULL)
>   			return 0;
>
> -		requests[n++] = i915_gem_request_get(req);
> +		requests[n++] = req;

It used to take a reference and now it doesn't.

>   	} else {
>   		for (i = 0; i < I915_NUM_ENGINES; i++) {
>   			struct drm_i915_gem_request *req;
>
> -			req = obj->last_read[i].request;
> +			req = i915_gem_active_peek(&obj->last_read[i]);
>   			if (req == NULL)
>   				continue;
>
> -			requests[n++] = i915_gem_request_get(req);
> +			requests[n++] = req;
>   		}
>   	}
>
> @@ -2383,25 +2386,27 @@ void i915_vma_move_to_active(struct i915_vma *vma,
>   static void
>   i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
>   {
> -	GEM_BUG_ON(!obj->last_write.request);
> -	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write.request->engine)));
> +	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_write));
> +	GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));
>
> -	i915_gem_request_assign(&obj->last_write.request, NULL);
> +	i915_gem_active_set(&obj->last_write, NULL);

Aha!

>   	intel_fb_obj_flush(obj, true, ORIGIN_CS);
>   }
>
>   static void
>   i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>   {
> +	struct intel_engine_cs *engine;
>   	struct i915_vma *vma;
>
> -	GEM_BUG_ON(!obj->last_read[ring].request);
> +	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_read[ring]));
>   	GEM_BUG_ON(!(obj->active & (1 << ring)));
>
>   	list_del_init(&obj->engine_list[ring]);
> -	i915_gem_request_assign(&obj->last_read[ring].request, NULL);
> +	i915_gem_active_set(&obj->last_read[ring], NULL);
>
> -	if (obj->last_write.request && obj->last_write.request->engine->id == ring)
> +	engine = i915_gem_active_get_engine(&obj->last_write);
> +	if (engine && engine->id == ring)
>   		i915_gem_object_retire__write(obj);
>
>   	obj->active &= ~(1 << ring);
> @@ -2420,7 +2425,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
>   			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>   	}
>
> -	i915_gem_request_assign(&obj->last_fence.request, NULL);
> +	i915_gem_active_set(&obj->last_fence, NULL);
>   	i915_gem_object_put(obj);
>   }
>
> @@ -2618,7 +2623,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
>   				       struct drm_i915_gem_object,
>   				       engine_list[engine->id]);
>
> -		if (!list_empty(&obj->last_read[engine->id].request->list))
> +		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
>   			break;
>
>   		i915_gem_object_retire__read(obj, engine->id);
> @@ -2746,7 +2751,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
>   	for (i = 0; i < I915_NUM_ENGINES; i++) {
>   		struct drm_i915_gem_request *req;
>
> -		req = obj->last_read[i].request;
> +		req = i915_gem_active_peek(&obj->last_read[i]);
>   		if (req == NULL)
>   			continue;
>
> @@ -2786,7 +2791,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>   {
>   	struct drm_i915_gem_wait *args = data;
>   	struct drm_i915_gem_object *obj;
> -	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
> +	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
>   	int i, n = 0;
>   	int ret;
>
> @@ -2822,20 +2827,21 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
>   	i915_gem_object_put(obj);
>
>   	for (i = 0; i < I915_NUM_ENGINES; i++) {
> -		if (!obj->last_read[i].request)
> -			continue;
> +		struct drm_i915_gem_request *req;
>
> -		req[n++] = i915_gem_request_get(obj->last_read[i].request);
> +		req = i915_gem_active_get(&obj->last_read[i]);

Oh right there is a get request one, this time preserving the reference 
taking behaviour.

> +		if (req)
> +			requests[n++] = req;
>   	}
>
>   	mutex_unlock(&dev->struct_mutex);
>
>   	for (i = 0; i < n; i++) {
>   		if (ret == 0)
> -			ret = __i915_wait_request(req[i], true,
> +			ret = __i915_wait_request(requests[i], true,
>   						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
>   						  to_rps_client(file));
> -		i915_gem_request_put(req[i]);
> +		i915_gem_request_put(requests[i]);
>   	}
>   	return ret;
>
> @@ -2908,7 +2914,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
>   		     struct drm_i915_gem_request *to)
>   {
>   	const bool readonly = obj->base.pending_write_domain == 0;
> -	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
> +	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
>   	int ret, i, n;
>
>   	if (!obj->active)
> @@ -2916,15 +2922,22 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
>
>   	n = 0;
>   	if (readonly) {
> -		if (obj->last_write.request)
> -			req[n++] = obj->last_write.request;
> +		struct drm_i915_gem_request *req;
> +
> +		req = i915_gem_active_peek(&obj->last_write);
> +		if (req)
> +			requests[n++] = req;
>   	} else {
> -		for (i = 0; i < I915_NUM_ENGINES; i++)
> -			if (obj->last_read[i].request)
> -				req[n++] = obj->last_read[i].request;
> +		for (i = 0; i < I915_NUM_ENGINES; i++) {
> +			struct drm_i915_gem_request *req;
> +
> +			req = i915_gem_active_peek(&obj->last_read[i]);
> +			if (req)
> +				requests[n++] = req;
> +		}
>   	}
>   	for (i = 0; i < n; i++) {
> -		ret = __i915_gem_object_sync(obj, to, req[i]);
> +		ret = __i915_gem_object_sync(obj, to, requests[i]);
>   		if (ret)
>   			return ret;
>   	}
> @@ -4017,17 +4030,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
>
>   	args->busy = 0;
>   	if (obj->active) {
> +		struct drm_i915_gem_request *req;
>   		int i;
>
>   		for (i = 0; i < I915_NUM_ENGINES; i++) {
> -			struct drm_i915_gem_request *req;
> -
> -			req = obj->last_read[i].request;
> +			req = i915_gem_active_peek(&obj->last_read[i]);
>   			if (req)
>   				args->busy |= 1 << (16 + req->engine->exec_id);
>   		}
> -		if (obj->last_write.request)
> -			args->busy |= obj->last_write.request->engine->exec_id;
> +		req = i915_gem_active_peek(&obj->last_write);
> +		if (req)
> +			args->busy |= req->engine->exec_id;
>   	}
>
>   unref:
> diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> index aa767ca28532..38000c59d456 100644
> --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> @@ -235,7 +235,7 @@ static void export_fences(struct drm_i915_gem_object *obj,
>
>   	active = obj->active;
>   	for_each_active(active, idx) {
> -		req = obj->last_read[idx].request;
> +		req = i915_gem_active_peek(&obj->last_read[idx]);
>   		if (!req)
>   			continue;
>
> @@ -243,7 +243,7 @@ static void export_fences(struct drm_i915_gem_object *obj,
>   			reservation_object_add_shared_fence(resv, &req->fence);
>   	}
>
> -	req = obj->last_write.request;
> +	req = i915_gem_active_peek(&obj->last_write);
>   	if (req)
>   		reservation_object_add_excl_fence(resv, &req->fence);
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> index 9838046801bd..9fdbd66128a6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> @@ -261,14 +261,13 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
>   static int
>   i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
>   {
> -	if (obj->last_fence.request) {
> -		int ret = i915_wait_request(obj->last_fence.request);
> -		if (ret)
> -			return ret;
> +	int ret;
>
> -		i915_gem_request_assign(&obj->last_fence.request, NULL);
> -	}
> +	ret = i915_gem_active_wait(&obj->last_fence);
> +	if (ret)
> +		return ret;
>
> +	i915_gem_active_set(&obj->last_fence, NULL);
>   	return 0;
>   }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index ff8c54fa955f..46d9b00a28d7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -275,14 +275,138 @@ static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
>    * resource including itself.
>    */
>   struct i915_gem_active {
> -	struct drm_i915_gem_request *request;
> +	struct drm_i915_gem_request *__request;
>   };
>
> +/**
> + * i915_gem_active_set - updates the tracker to watch the current request
> + * @active - the active tracker
> + * @request - the request to watch
> + *
> + * i915_gem_active_set() watches the given @request for completion. Whilst
> + * that @request is busy, the @active reports busy. When that @request is
> + * retired, the @active tracker is updated to report idle.
> + */
>   static inline void
>   i915_gem_active_set(struct i915_gem_active *active,
>   		    struct drm_i915_gem_request *request)
>   {
> -	i915_gem_request_assign(&active->request, request);
> +	i915_gem_request_assign(&active->__request, request);
> +}
> +
> +/**
> + * i915_gem_active_peek - report the request being monitored
> + * @active - the active tracker
> + *
> + * i915_gem_active_peek() returns the current request being tracked, or NULL.
> + * It does not obtain a reference on the request for the caller, so the
> + * caller must hold struct_mutex.
> + */
> +static inline struct drm_i915_gem_request *
> +i915_gem_active_peek(const struct i915_gem_active *active)
> +{
> +	return active->__request;
> +}
> +
> +/**
> + * i915_gem_active_get - return a reference to the active request
> + * @active - the active tracker
> + *
> + * i915_gem_active_get() returns a reference to the active request, or NULL
> + * if the active tracker is idle. The caller must hold struct_mutex.
> + */
> +static inline struct drm_i915_gem_request *
> +i915_gem_active_get(const struct i915_gem_active *active)
> +{
> +	struct drm_i915_gem_request *request;
> +
> +	request = i915_gem_active_peek(active);
> +	if (!request || i915_gem_request_completed(request))

The check for request_completed feels like a hack - why it is needed?

> +		return NULL;
> +
> +	return i915_gem_request_get(request);
> +}
> +
> +/**
> + * __i915_gem_active_is_busy - report whether the active tracker is assigned
> + * @active - the active tracker
> + *
> + * __i915_gem_active_is_busy() returns true if the active tracker is currently
> + * assigned to a request. Due to the lazy retiring, that request may be idle
> + * and this may report stale information.
> + */
> +static inline bool
> +__i915_gem_active_is_busy(const struct i915_gem_active *active)
> +{
> +	return i915_gem_active_peek(active);
> +}
> +
> +/**
> + * i915_gem_active_is_idle - report whether the active tracker is idle
> + * @active - the active tracker
> + *
> + * i915_gem_active_is_idle() returns true if the active tracker is currently
> + * unassigned or if the request is complete (but not yet retired). Requires
> + * the caller to hold struct_mutex (but that can be relaxed if desired).
> + */
> +static inline bool
> +i915_gem_active_is_idle(const struct i915_gem_active *active)
> +{
> +	struct drm_i915_gem_request *request;
> +
> +	request = i915_gem_active_peek(active);
> +	if (!request || i915_gem_request_completed(request))
> +		return true;
> +
> +	return false;
> +}
> +
> +/**
> + * i915_gem_active_wait - waits until the request is completed
> + * @active - the active request on which to wait
> + *
> + * i915_gem_active_wait() waits until the request is completed before
> + * returning.
> + */
> +static inline int __must_check
> +i915_gem_active_wait(const struct i915_gem_active *active)
> +{
> +	struct drm_i915_gem_request *request;
> +
> +	request = i915_gem_active_peek(active);
> +	if (!request)
> +		return 0;
> +
> +	return i915_wait_request(request);
> +}
> +
> +/**
> + * i915_gem_active_retire - waits until the request is retired
> + * @active - the active request on which to wait
> + *
> + * Unlike i915_gem_active_eait(), this i915_gem_active_retire() will

s/eait/wait/

> + * make sure the request is retired before returning.
> + */
> +static inline int __must_check
> +i915_gem_active_retire(const struct i915_gem_active *active)
> +{
> +	return i915_gem_active_wait(active);
> +}

But how does it ensure anything different than i915_gem_active_wait when 
it calls the very function? Maybe in future patches there will be a 
difference?

> +
> +/* Convenience functions for peeking at state inside active's request whilst
> + * guarded by the struct_mutex.
> + */
> +
> +static inline uint32_t
> +i915_gem_active_get_seqno(const struct i915_gem_active *active)
> +{
> +	return i915_gem_request_get_seqno(i915_gem_active_peek(active));
> +}
> +
> +static inline struct intel_engine_cs *
> +i915_gem_active_get_engine(const struct i915_gem_active *active)
> +{
> +	return i915_gem_request_get_engine(i915_gem_active_peek(active));
>   }
>
>   #define for_each_active(mask, idx) \
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 00d796da65fb..8cef2d6b291a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -242,7 +242,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
>   			}
>
>   			obj->fence_dirty =
> -				obj->last_fence.request ||
> +				!i915_gem_active_is_idle(&obj->last_fence) ||
>   				obj->fence_reg != I915_FENCE_REG_NONE;
>
>   			obj->tiling_mode = args->tiling_mode;
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index 32f50a70ea42..00ab5e9d2eb7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -74,11 +74,9 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
>   	for (i = 0; i < I915_NUM_ENGINES; i++) {
>   		struct drm_i915_gem_request *req;
>
> -		req = obj->last_read[i].request;
> -		if (req == NULL)
> -			continue;
> -
> -		requests[n++] = i915_gem_request_get(req);
> +		req = i915_gem_active_get(&obj->last_read[i]);
> +		if (req)
> +			requests[n++] = req;
>   	}
>
>   	mutex_unlock(&dev->struct_mutex);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 5e12b8ee49d2..9e1949f2f4dd 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -744,13 +744,14 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>   		       struct i915_vma *vma)
>   {
>   	struct drm_i915_gem_object *obj = vma->obj;
> +	struct intel_engine_cs *engine;
>   	int i;
>
>   	err->size = obj->base.size;
>   	err->name = obj->base.name;
>   	for (i = 0; i < I915_NUM_ENGINES; i++)
> -		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read[i].request);
> -	err->wseqno = i915_gem_request_get_seqno(obj->last_write.request);
> +		err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]);
> +	err->wseqno = i915_gem_active_get_seqno(&obj->last_write);
>   	err->gtt_offset = vma->node.start;
>   	err->read_domains = obj->base.read_domains;
>   	err->write_domain = obj->base.write_domain;
> @@ -762,8 +763,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>   	err->dirty = obj->dirty;
>   	err->purgeable = obj->madv != I915_MADV_WILLNEED;
>   	err->userptr = obj->userptr.mm != NULL;
> -	err->ring = obj->last_write.request ? obj->last_write.request->engine->id : -1;
>   	err->cache_level = obj->cache_level;
> +
> +	engine = i915_gem_active_get_engine(&obj->last_write);
> +	err->ring = engine ? engine->id : -1;
>   }
>
>   static u32 capture_active_bo(struct drm_i915_error_buffer *err,
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 27ac6db4e26a..a0abe5588d17 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -11465,7 +11465,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
>   	if (resv && !reservation_object_test_signaled_rcu(resv, false))
>   		return true;
>
> -	return engine != i915_gem_request_get_engine(obj->last_write.request);
> +	return engine != i915_gem_active_get_engine(&obj->last_write);
>   }
>
>   static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
> @@ -11768,7 +11768,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>   	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
>   		engine = &dev_priv->engine[BCS];
>   	} else if (INTEL_INFO(dev)->gen >= 7) {
> -		engine = i915_gem_request_get_engine(obj->last_write.request);
> +		engine = i915_gem_active_get_engine(&obj->last_write);
>   		if (engine == NULL || engine->id != RCS)
>   			engine = &dev_priv->engine[BCS];
>   	} else {
> @@ -11789,9 +11789,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>   	if (mmio_flip) {
>   		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
>
> -		i915_gem_request_assign(&work->flip_queued_req,
> -					obj->last_write.request);
> -
> +		work->flip_queued_req = i915_gem_active_get(&obj->last_write);
>   		schedule_work(&work->mmio_work);
>   	} else {
>   		request = i915_gem_request_alloc(engine, engine->last_context);
> @@ -14092,11 +14090,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
>   	}
>
>   	if (ret == 0) {
> -		struct intel_plane_state *plane_state =
> -			to_intel_plane_state(new_state);
> -
> -		i915_gem_request_assign(&plane_state->wait_req,
> -					obj->last_write.request);
> +		to_intel_plane_state(new_state)->wait_req =
> +			i915_gem_active_get(&obj->last_write);
>   	}
>
>   	return ret;
>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2016-07-13 15:40 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-07-07  8:41 [PATCH 01/64] drm/i915/breadcrumbs: Queue hangcheck before sleeping Chris Wilson
2016-07-07  8:41 ` [PATCH 02/64] drm/i915: Preserve current RPS frequency across init Chris Wilson
2016-07-07  8:41 ` [PATCH 03/64] drm/i915: Remove superfluous powersave work flushing Chris Wilson
2016-07-07  8:41 ` [PATCH 04/64] drm/i915: Defer enabling rc6 til after we submit the first batch/context Chris Wilson
2016-07-07  8:41 ` [PATCH 05/64] drm/i915: Remove temporary RPM wakeref assert disables Chris Wilson
2016-07-07  8:41 ` [PATCH 06/64] drm: Restore double clflush on the last partial cacheline Chris Wilson
2016-07-12 13:57   ` Daniel Vetter
2016-07-12 13:57     ` Daniel Vetter
2016-07-13  8:07     ` [Intel-gfx] " Mika Kuoppala
2016-07-13  8:07       ` Mika Kuoppala
2016-07-07  8:41 ` [PATCH 07/64] drm/i915: Move GEM request routines to i915_gem_request.c Chris Wilson
2016-07-07  8:41 ` [PATCH 08/64] drm/i915: Retire oldest completed request before allocating next Chris Wilson
2016-07-07  9:41   ` Tvrtko Ursulin
2016-07-07  9:45     ` Chris Wilson
2016-07-07 10:03       ` Tvrtko Ursulin
2016-07-07 10:10         ` Chris Wilson
2016-07-08  9:21   ` Mika Kuoppala
2016-07-08  9:28     ` Chris Wilson
2016-07-07  8:41 ` [PATCH 09/64] drm/i915: Mark all current requests as complete before resetting them Chris Wilson
2016-07-07 16:09   ` Mika Kuoppala
2016-07-07  8:41 ` [PATCH 10/64] drm/i915: Derive GEM requests from dma-fence Chris Wilson
2016-07-07  8:41 ` [PATCH 11/64] drm/i915: Disable waitboosting for fence_wait() Chris Wilson
2016-07-07  8:41 ` [PATCH 12/64] drm/i915: Disable waitboosting for mmioflips/semaphores Chris Wilson
2016-07-07  8:41 ` [PATCH 13/64] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
2016-07-07  8:41 ` [PATCH 14/64] drm/i915: Wait on external rendering for GEM objects Chris Wilson
2016-07-07  8:41 ` [PATCH 15/64] drm/i915: Mark imported dma-buf objects as being coherent Chris Wilson
2016-07-07  8:41 ` [PATCH 16/64] drm/i915: Rename request reference/unreference to get/put Chris Wilson
2016-07-07  8:41 ` [PATCH 17/64] drm/i915: Rename i915_gem_context_reference/unreference() Chris Wilson
2016-07-07  8:41 ` [PATCH 18/64] drm/i915: Wrap drm_gem_object_lookup in i915_gem_object_lookup Chris Wilson
2016-07-07  8:41 ` [PATCH 19/64] drm/i915: Wrap drm_gem_object_reference in i915_gem_object_get Chris Wilson
2016-07-07  8:41 ` [PATCH 20/64] drm/i915: Rename drm_gem_object_unreference in preparation for lockless free Chris Wilson
2016-07-07  8:41 ` [PATCH 21/64] drm/i915: Rename drm_gem_object_unreference_unlocked " Chris Wilson
2016-07-07  8:41 ` [PATCH 22/64] drm/i915: Treat ringbuffer writes as write to normal memory Chris Wilson
2016-07-07  8:41 ` [PATCH 23/64] drm/i915: Rename ring->virtual_start as ring->vaddr Chris Wilson
2016-07-07  8:41 ` [PATCH 24/64] drm/i915: Convert i915_semaphores_is_enabled over to early sanitize Chris Wilson
2016-07-07  8:41 ` [PATCH 25/64] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
2016-07-07  8:41 ` [PATCH 26/64] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
2016-07-07  8:41 ` [PATCH 27/64] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
2016-07-07  8:41 ` [PATCH 28/64] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
2016-07-07  8:41 ` [PATCH 29/64] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
2016-07-07  8:41 ` [PATCH 30/64] drm/i915: Rename residual ringbuf parameters Chris Wilson
2016-07-07  8:41 ` [PATCH 31/64] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
2016-07-07  8:41 ` [PATCH 32/64] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
2016-07-07  8:41 ` [PATCH 33/64] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
2016-07-07  8:41 ` [PATCH 34/64] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
2016-07-07  8:41 ` [PATCH 35/64] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
2016-07-07  8:41 ` [PATCH 36/64] drm/i915: Unify request submission Chris Wilson
2016-07-07  8:41 ` [PATCH 37/64] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
2016-07-07  8:41 ` [PATCH 38/64] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
2016-07-07  8:41 ` [PATCH 39/64] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
2016-07-07  8:41 ` [PATCH 40/64] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
2016-07-07  8:41 ` [PATCH 41/64] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
2016-07-07  8:41 ` [PATCH 42/64] drm/i915: Simplify calling engine->sync_to Chris Wilson
2016-07-07  8:41 ` [PATCH 43/64] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers Chris Wilson
2016-07-07  8:41 ` [PATCH 44/64] drm/i915: Split early global GTT initialisation Chris Wilson
2016-07-07  8:41 ` [PATCH 45/64] drm/i915: Store owning file on the i915_address_space Chris Wilson
2016-07-07  8:41 ` [PATCH 46/64] drm/i915: Count how many VMA are bound for an object Chris Wilson
2016-07-12 14:30   ` Tvrtko Ursulin
2016-07-12 14:38     ` Chris Wilson
2016-07-12 15:12       ` Tvrtko Ursulin
2016-07-12 16:08         ` Chris Wilson
2016-07-07  8:41 ` [PATCH 47/64] drm/i915: Be more careful when unbinding vma Chris Wilson
2016-07-12 15:04   ` Tvrtko Ursulin
2016-07-12 16:42     ` Chris Wilson
2016-07-13  8:53       ` Tvrtko Ursulin
2016-07-07  8:41 ` [PATCH 48/64] drm/i915: Kill drop_pages() Chris Wilson
2016-07-12 15:14   ` Tvrtko Ursulin
2016-07-07  8:41 ` [PATCH 49/64] drm/i915: Introduce i915_gem_active for request tracking Chris Wilson
2016-07-12 16:05   ` Tvrtko Ursulin
2016-07-12 16:30     ` Chris Wilson
2016-07-13  8:54       ` Tvrtko Ursulin
2016-07-07  8:41 ` [PATCH 50/64] drm/i915: Prepare i915_gem_active for annotations Chris Wilson
2016-07-13 15:40   ` Tvrtko Ursulin [this message]
2016-07-13 15:58     ` Chris Wilson
2016-07-14  9:32       ` Tvrtko Ursulin
2016-07-14 10:04         ` Chris Wilson
2016-07-07  8:41 ` [PATCH 51/64] drm/i915: Mark up i915_gem_active for locking annotation Chris Wilson
2016-07-07  8:41 ` [PATCH 52/64] drm/i915: Refactor blocking waits Chris Wilson
2016-07-07  8:41 ` [PATCH 53/64] drm/i915: Rename request->list to link for consistency Chris Wilson
2016-07-07  8:42 ` [PATCH 54/64] drm/i915: Remove obsolete i915_gem_object_flush_active() Chris Wilson
2016-07-07  8:42 ` [PATCH 55/64] drm/i915: Refactor activity tracking for requests Chris Wilson
2016-07-07  8:42 ` [PATCH 56/64] drm/i915: Convert intel_overlay to request tracking Chris Wilson
2016-07-07  8:42 ` [PATCH 57/64] drm/i915: Move the special case wait-request handling to its one caller Chris Wilson
2016-07-07  8:42 ` [PATCH 58/64] drm/i915: Double check activity before relocations Chris Wilson
2016-07-07  8:42 ` [PATCH 59/64] drm/i915: Move request list retirement to i915_gem_request.c Chris Wilson
2016-07-07  8:42 ` [PATCH 60/64] drm/i915: i915_vma_move_to_active prep patch Chris Wilson
2016-07-07  8:42 ` [PATCH 61/64] drm/i915: Track active vma requests Chris Wilson
2016-07-07  8:42 ` [PATCH 62/64] drm/i915: Release vma when the handle is closed Chris Wilson
2016-07-07  8:42 ` [PATCH 63/64] drm/i915: Mark the context and address space as closed Chris Wilson
2016-07-07  8:42 ` [PATCH 64/64] Revert "drm/i915: Clean up associated VMAs on context destruction" Chris Wilson
2016-07-07  9:12 ` ✗ Ro.CI.BAT: warning for series starting with [01/64] drm/i915/breadcrumbs: Queue hangcheck before sleeping Patchwork
2016-07-07  9:28   ` Chris Wilson
2016-07-08  8:44     ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=578660D3.1090904@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.