All of lore.kernel.org
 help / color / mirror / Atom feed
From: Dave Gordon <david.s.gordon@intel.com>
To: yu.dai@intel.com, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH v3] drm/i915/guc: Move GuC wq_check_space to alloc_request_extras
Date: Tue, 15 Dec 2015 15:45:38 +0000	[thread overview]
Message-ID: <567035A2.9000200@intel.com> (raw)
In-Reply-To: <1449796655-21384-1-git-send-email-yu.dai@intel.com>

On 11/12/15 01:17, yu.dai@intel.com wrote:
> From: Alex Dai <yu.dai@intel.com>
>
> Split GuC work queue space checking from submission and move it to
> ring_alloc_request_extras. The reason is that failure in later
> i915_add_request() won't be handled. In the case timeout happens,
> driver can return early in order to handle the error.
>
> v1: Move wq_reserve_space to ring_reserve_space
> v2: Move wq_reserve_space to alloc_request_extras (Chris Wilson)
> v3: The work queue head pointer is cached by driver now. So we can
>      quickly return if space is available.
>      s/reserve/check/g (Dave Gordon)
>
> Signed-off-by: Alex Dai <yu.dai@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_guc_submission.c | 34 ++++++++++++++++++------------
>   drivers/gpu/drm/i915/intel_guc.h           |  2 ++
>   drivers/gpu/drm/i915/intel_lrc.c           |  6 ++++++
>   3 files changed, 28 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 226e9c0..cb8e1f71 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -207,6 +207,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   	/* Update the tail so it is visible to GuC */
>   	desc->tail = gc->wq_tail;
>
> +	/* Cache the head where GuC is processing */
> +	gc->wq_head = desc->head;
> +

This could be at the end of this function, thus:

		/* update the cookie to newly read cookie from GuC */
		db_cmp.cookie = db_ret.cookie;
		db_exc.cookie = db_ret.cookie + 1;
		if (db_exc.cookie == 0)
			db_exc.cookie = 1;
	}

+	/* Finally, update the cached copy of the GuC's WQ head */
+	gc->wq_head = desc->head;
+
	kunmap_atomic(base);
	return ret;
}

which might increase the chance that we will get an updated value and 
thus avoid having to spinwait on the next check().

>   	/* current cookie */
>   	db_cmp.db_status = GUC_DOORBELL_ENABLED;
>   	db_cmp.cookie = gc->cookie;
> @@ -472,28 +475,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
>   			     sizeof(desc) * client->ctx_index);
>   }
>
> -/* Get valid workqueue item and return it back to offset */
> -static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
> +int i915_guc_wq_check_space(struct i915_guc_client *gc)
>   {
>   	struct guc_process_desc *desc;
>   	void *base;
>   	u32 size = sizeof(struct guc_wq_item);
>   	int ret = -ETIMEDOUT, timeout_counter = 200;
>
> +	if (!gc)
> +		return 0;
> +
> +	/* Quickly return if wq space is available since last time we cache the
> +	 * head position. */
> +	if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
> +		return 0;
> +
>   	base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
>   	desc = base + gc->proc_desc_offset;
>
>   	while (timeout_counter-- > 0) {
> -		if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
> -			*offset = gc->wq_tail;
> +		gc->wq_head = desc->head;
>
> -			/* advance the tail for next workqueue item */
> -			gc->wq_tail += size;
> -			gc->wq_tail &= gc->wq_size - 1;
> -
> -			/* this will break the loop */
> -			timeout_counter = 0;
> +		if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
>   			ret = 0;
> +			break;
>   		}
>
>   		if (timeout_counter)
> @@ -512,11 +517,8 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
>   	struct guc_wq_item *wqi;
>   	void *base;
>   	u32 tail, wq_len, wq_off = 0;
> -	int ret;
>
> -	ret = guc_get_workqueue_space(gc, &wq_off);
> -	if (ret)
> -		return ret;
> +	wq_off = gc->wq_tail;

Conversely, I'd prefer the updates below to be here instead; also the 
check that I mentioned last time:

+	u32 space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+	if (WARN_ON(space < sizeof(struct guc_wq_item))
+		return -ENOSPC;		/* shouldn't happen	*/
+
+	/* postincrement WQ tail for next time */
+	wq_off = gc->wq_tail;
+	gc->wq_tail += sizeof(struct guc_wq_item);
+	gc->wq_tail &= gc->wq_size - 1;

>
>   	/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
>   	 * should not have the case where structure wqi is across page, neither
> @@ -551,6 +553,10 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
>
>   	kunmap_atomic(base);
>
> +	/* advance the tail for next workqueue item */
> +	gc->wq_tail += sizeof(struct guc_wq_item);
> +	gc->wq_tail &= gc->wq_size - 1;

(moved to above)

> +
>   	return 0;
>   }
>
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 0e048bf..5cf555d 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -43,6 +43,7 @@ struct i915_guc_client {
>   	uint32_t wq_offset;
>   	uint32_t wq_size;
>   	uint32_t wq_tail;
> +	uint32_t wq_head;
>
>   	/* GuC submission statistics & status */
>   	uint64_t submissions[I915_NUM_RINGS];
> @@ -123,5 +124,6 @@ int i915_guc_submit(struct i915_guc_client *client,
>   		    struct drm_i915_gem_request *rq);
>   void i915_guc_submission_disable(struct drm_device *dev);
>   void i915_guc_submission_fini(struct drm_device *dev);
> +int i915_guc_wq_check_space(struct i915_guc_client *client);
>
>   #endif
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index f96fb51..9605ce9 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -667,6 +667,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>   			return ret;
>   	}
>
> +	/* Reserve GuC WQ space here (one request needs one WQ item) because
> +	 * the later i915_add_request() call can't fail. */
> +	ret = i915_guc_wq_check_space(request->i915->guc.execbuf_client);
> +	if (ret)
> +		return ret;
> +
>   	return 0;
>   }

It looks odd to call a GuC-only function regardless of whether we're 
using the GuC, so:

+	if (i915.enable_guc_submission) {
+		/*
+		 * Check that the GuC has space for the request before
+		 * going any further, as the i915_add_request() call
+		 * later on mustn't fail ...
+		 */
+		struct intel_guc *guc = &request->i915->guc;
+
+		ret = i915_guc_wq_check_space(guc->execbuf_client);
+		if (ret)
+			return ret;
+	}

.Dave.
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2015-12-15 15:45 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-09  1:04 [PATCH] drm/i915/guc: Move GuC wq_reserve_space to ring_reserve_space yu.dai
2015-12-09  9:05 ` Chris Wilson
2015-12-09 17:08   ` Yu Dai
2015-12-09 18:50 ` [PATCH v2] drm/i915/guc: Move GuC wq_reserve_space to alloc_request_extras yu.dai
2015-12-10 17:14   ` Dave Gordon
2015-12-11  0:22     ` Yu Dai
2015-12-11  1:17 ` [PATCH v3] drm/i915/guc: Move GuC wq_check_space " yu.dai
2015-12-15 15:45   ` Dave Gordon [this message]
2015-12-16 19:45 ` [PATCH v4] " yu.dai
2015-12-23 15:39   ` Dave Gordon
2016-01-05 10:07     ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=567035A2.9000200@intel.com \
    --to=david.s.gordon@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=yu.dai@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.