All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 2/8] drm/i915: Stop retiring along engine
Date: Fri, 14 Jun 2019 17:23:15 +0300	[thread overview]
Message-ID: <87muik8cvg.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <20190612093111.11684-3-chris@chris-wilson.co.uk>

Chris Wilson <chris@chris-wilson.co.uk> writes:

> We no longer track the execution order along the engine and so no longer
> need to enforce ordering of retire along the engine.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_request.c | 128 +++++++++++-----------------
>  1 file changed, 52 insertions(+), 76 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 9eff9de7fa10..9c58ae6e4afb 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -183,72 +183,23 @@ static void free_capture_list(struct i915_request *request)
>  	}
>  }
>  
> -static void __retire_engine_request(struct intel_engine_cs *engine,
> -				    struct i915_request *rq)
> -{
> -	GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
> -		  __func__, engine->name,
> -		  rq->fence.context, rq->fence.seqno,
> -		  hwsp_seqno(rq));
> -
> -	GEM_BUG_ON(!i915_request_completed(rq));
> -
> -	local_irq_disable();
> -
> -	spin_lock(&engine->timeline.lock);
> -	GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
> -	list_del_init(&rq->link);
> -	spin_unlock(&engine->timeline.lock);
> -
> -	spin_lock(&rq->lock);
> -	i915_request_mark_complete(rq);
> -	if (!i915_request_signaled(rq))
> -		dma_fence_signal_locked(&rq->fence);
> -	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
> -		i915_request_cancel_breadcrumb(rq);
> -	if (rq->waitboost) {
> -		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
> -		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
> -	}
> -	spin_unlock(&rq->lock);
> -
> -	local_irq_enable();
> -}
> -
> -static void __retire_engine_upto(struct intel_engine_cs *engine,
> -				 struct i915_request *rq)
> -{
> -	struct i915_request *tmp;
> -
> -	if (list_empty(&rq->link))
> -		return;
> -
> -	do {
> -		tmp = list_first_entry(&engine->timeline.requests,
> -				       typeof(*tmp), link);
> -
> -		GEM_BUG_ON(tmp->engine != engine);
> -		__retire_engine_request(engine, tmp);
> -	} while (tmp != rq);
> -}
> -
> -static void i915_request_retire(struct i915_request *request)
> +static bool i915_request_retire(struct i915_request *rq)
>  {
>  	struct i915_active_request *active, *next;
>  
> -	GEM_TRACE("%s fence %llx:%lld, current %d\n",
> -		  request->engine->name,
> -		  request->fence.context, request->fence.seqno,
> -		  hwsp_seqno(request));
> +	lockdep_assert_held(&rq->i915->drm.struct_mutex);
> +	if (!i915_request_completed(rq))
> +		return false;
>  
> -	lockdep_assert_held(&request->i915->drm.struct_mutex);
> -	GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
> -	GEM_BUG_ON(!i915_request_completed(request));
> +	GEM_TRACE("%s fence %llx:%lld, current %d\n",
> +		  rq->engine->name,
> +		  rq->fence.context, rq->fence.seqno,
> +		  hwsp_seqno(rq));
>  
> -	trace_i915_request_retire(request);
> +	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
> +	trace_i915_request_retire(rq);
>  
> -	advance_ring(request);
> -	free_capture_list(request);
> +	advance_ring(rq);
>  
>  	/*
>  	 * Walk through the active list, calling retire on each. This allows
> @@ -260,7 +211,7 @@ static void i915_request_retire(struct i915_request *request)
>  	 * pass along the auxiliary information (to avoid dereferencing
>  	 * the node after the callback).
>  	 */
> -	list_for_each_entry_safe(active, next, &request->active_list, link) {
> +	list_for_each_entry_safe(active, next, &rq->active_list, link) {
>  		/*
>  		 * In microbenchmarks or focusing upon time inside the kernel,
>  		 * we may spend an inordinate amount of time simply handling
> @@ -276,18 +227,39 @@ static void i915_request_retire(struct i915_request *request)
>  		INIT_LIST_HEAD(&active->link);
>  		RCU_INIT_POINTER(active->request, NULL);
>  
> -		active->retire(active, request);
> +		active->retire(active, rq);
> +	}
> +
> +	local_irq_disable();
> +
> +	spin_lock(&rq->engine->timeline.lock);
> +	list_del(&rq->link);
> +	spin_unlock(&rq->engine->timeline.lock);
> +
> +	spin_lock(&rq->lock);
> +	i915_request_mark_complete(rq);
> +	if (!i915_request_signaled(rq))
> +		dma_fence_signal_locked(&rq->fence);
> +	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
> +		i915_request_cancel_breadcrumb(rq);
> +	if (rq->waitboost) {
> +		GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
> +		atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
>  	}
> +	spin_unlock(&rq->lock);
> +
> +	local_irq_enable();
>  
> -	i915_request_remove_from_client(request);
> +	intel_context_exit(rq->hw_context);
> +	intel_context_unpin(rq->hw_context);
>  
> -	__retire_engine_upto(request->engine, request);
> +	i915_request_remove_from_client(rq);
>  
> -	intel_context_exit(request->hw_context);
> -	intel_context_unpin(request->hw_context);
> +	free_capture_list(rq);
> +	i915_sched_node_fini(&rq->sched);
> +	i915_request_put(rq);
>  
> -	i915_sched_node_fini(&request->sched);
> -	i915_request_put(request);
> +	return true;
>  }
>  
>  void i915_request_retire_upto(struct i915_request *rq)
> @@ -309,9 +281,7 @@ void i915_request_retire_upto(struct i915_request *rq)
>  	do {
>  		tmp = list_first_entry(&ring->request_list,
>  				       typeof(*tmp), ring_link);
> -
> -		i915_request_retire(tmp);
> -	} while (tmp != rq);
> +	} while (i915_request_retire(tmp) && tmp != rq);

The semantics does change a little for this function. But
looking at the callsites it doesn't matter.

>  }
>  
>  static void irq_execute_cb(struct irq_work *wrk)
> @@ -600,12 +570,9 @@ static void ring_retire_requests(struct intel_ring *ring)
>  {
>  	struct i915_request *rq, *rn;
>  
> -	list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
> -		if (!i915_request_completed(rq))
> +	list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
> +		if (!i915_request_retire(rq))
>  			break;
> -
> -		i915_request_retire(rq);
> -	}
>  }
>  
>  static noinline struct i915_request *
> @@ -620,6 +587,15 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
>  	if (!gfpflags_allow_blocking(gfp))
>  		goto out;
>  
> +	/* Move our oldest request to the slab-cache (if not in use!) */
> +	rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
> +	i915_request_retire(rq);

Ok this is just for kick.

> +
> +	rq = kmem_cache_alloc(global.slab_requests,
> +			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);

Only one callsite for this so you go cleaner by using gfp only if
you so desire.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> +	if (rq)
> +		return rq;
> +
>  	/* Ratelimit ourselves to prevent oom from malicious clients */
>  	rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
>  	cond_synchronize_rcu(rq->rcustate);
> -- 
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-06-14 14:23 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-12  9:31 Endless busyness, the forecoming Chris Wilson
2019-06-12  9:31 ` [PATCH 1/8] drm/i915: Keep contexts pinned until after the next kernel context switch Chris Wilson
2019-06-12 13:29   ` Mika Kuoppala
2019-06-12 13:42     ` Chris Wilson
2019-06-12 14:09       ` Mika Kuoppala
2019-06-12 14:17         ` Chris Wilson
2019-06-12 14:26   ` [PATCH v2] " Chris Wilson
2019-06-14  9:22     ` Mika Kuoppala
2019-06-14  9:34       ` Chris Wilson
2019-06-14 10:18         ` Mika Kuoppala
2019-06-12  9:31 ` [PATCH 2/8] drm/i915: Stop retiring along engine Chris Wilson
2019-06-14 14:23   ` Mika Kuoppala [this message]
2019-06-12  9:31 ` [PATCH 3/8] drm/i915: Replace engine->timeline with a plain list Chris Wilson
2019-06-14 14:34   ` Mika Kuoppala
2019-06-14 14:44     ` Chris Wilson
2019-06-14 15:50   ` Mika Kuoppala
2019-06-14 15:58     ` Chris Wilson
2019-06-14 16:18       ` Mika Kuoppala
2019-06-12  9:31 ` [PATCH 4/8] drm/i915: Flush the execution-callbacks on retiring Chris Wilson
2019-06-12  9:31 ` [PATCH 5/8] drm/i915/execlists: Preempt-to-busy Chris Wilson
2019-06-12  9:31 ` [PATCH 6/8] drm/i915/execlists: Minimalistic timeslicing Chris Wilson
2019-06-12  9:31 ` [PATCH 7/8] drm/i915/execlists: Force preemption Chris Wilson
2019-06-12  9:31 ` [PATCH 8/8] drm/i915: Add a label for config DRM_I915_SPIN_REQUEST Chris Wilson
2019-06-12  9:53 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/8] drm/i915: Keep contexts pinned until after the next kernel context switch Patchwork
2019-06-12  9:57 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-12 10:16 ` ✓ Fi.CI.BAT: success " Patchwork
2019-06-12 15:29 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v2] drm/i915: Keep contexts pinned until after the next kernel context switch (rev2) Patchwork
2019-06-12 15:33 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-12 16:00 ` ✓ Fi.CI.BAT: success " Patchwork
2019-06-13  6:16 ` ✗ Fi.CI.IGT: failure for series starting with [1/8] drm/i915: Keep contexts pinned until after the next kernel context switch Patchwork
2019-06-14  9:58 ` ✗ Fi.CI.IGT: failure for series starting with [v2] drm/i915: Keep contexts pinned until after the next kernel context switch (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87muik8cvg.fsf@gaia.fi.intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.