Intel-GFX Archive on lore.kernel.org
 help / color / Atom feed
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [Intel-gfx] [PATCH 26/57] drm/i915: Move finding the current active request to the scheduler
Date: Thu, 4 Feb 2021 14:30:18 +0000
Message-ID: <79b782ae-dbe5-2d3c-7093-c1f9364eba55@linux.intel.com> (raw)
In-Reply-To: <20210201085715.27435-26-chris@chris-wilson.co.uk>


On 01/02/2021 08:56, Chris Wilson wrote:
> Since finding the currently active request starts by walking the
> scheduler lists under the scheduler lock, move the routine to the
> scheduler.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gt/intel_engine.h        |  3 -
>   drivers/gpu/drm/i915/gt/intel_engine_cs.c     | 71 ++--------------
>   .../drm/i915/gt/intel_execlists_submission.c  | 83 ++++++++++++++++++-
>   drivers/gpu/drm/i915/i915_gpu_error.c         | 18 ++--
>   drivers/gpu/drm/i915/i915_gpu_error.h         |  4 +-
>   drivers/gpu/drm/i915/i915_request.c           | 71 +---------------
>   drivers/gpu/drm/i915/i915_request.h           |  8 ++
>   drivers/gpu/drm/i915/i915_scheduler.c         | 50 +++++++++++
>   drivers/gpu/drm/i915/i915_scheduler_types.h   |  4 +
>   9 files changed, 162 insertions(+), 150 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> index 52bba16c62e8..c530839627bb 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> @@ -230,9 +230,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
>   				   ktime_t *now);
>   
> -struct i915_request *
> -intel_engine_find_active_request(struct intel_engine_cs *engine);
> -
>   u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
>   
>   void intel_engine_init_active(struct intel_engine_cs *engine,
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index b5b957283f2c..5751a529b2df 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -1277,7 +1277,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
>   	}
>   }
>   
> -static struct intel_timeline *get_timeline(struct i915_request *rq)
> +static struct intel_timeline *get_timeline(const struct i915_request *rq)
>   {
>   	struct intel_timeline *tl;
>   
> @@ -1505,7 +1505,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
>   	}
>   }
>   
> -static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
> +static void
> +print_request_ring(struct drm_printer *m, const struct i915_request *rq)
>   {
>   	void *ring;
>   	int size;
> @@ -1590,7 +1591,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   {
>   	struct i915_gpu_error * const error = &engine->i915->gpu_error;
>   	struct i915_sched *se = intel_engine_get_scheduler(engine);
> -	struct i915_request *rq;
> +	const struct i915_request *rq;
>   	intel_wakeref_t wakeref;
>   	unsigned long flags;
>   	ktime_t dummy;
> @@ -1631,8 +1632,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   
>   	drm_printf(m, "\tRequests:\n");
>   
> +	rcu_read_lock();
>   	spin_lock_irqsave(&se->lock, flags);
> -	rq = intel_engine_find_active_request(engine);
> +	rq = se->active_request(se);

Wrap with i915_sched_find_active_request perhaps?

>   	if (rq) {
>   		struct intel_timeline *tl = get_timeline(rq);
>   
> @@ -1664,6 +1666,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   	}
>   	drm_printf(m, "\tOn hold?: %lu\n", list_count(&se->hold));
>   	spin_unlock_irqrestore(&se->lock, flags);
> +	rcu_read_unlock();
>   
>   	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
>   	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
> @@ -1712,66 +1715,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
>   	return ktime_add(total, start);
>   }
>   
> -static bool match_ring(struct i915_request *rq)
> -{
> -	u32 ring = ENGINE_READ(rq->engine, RING_START);
> -
> -	return ring == i915_ggtt_offset(rq->ring->vma);
> -}
> -
> -struct i915_request *
> -intel_engine_find_active_request(struct intel_engine_cs *engine)
> -{
> -	struct i915_sched *se = intel_engine_get_scheduler(engine);
> -	struct i915_request *request, *active = NULL;
> -
> -	/*
> -	 * We are called by the error capture, reset and to dump engine
> -	 * state at random points in time. In particular, note that neither is
> -	 * crucially ordered with an interrupt. After a hang, the GPU is dead
> -	 * and we assume that no more writes can happen (we waited long enough
> -	 * for all writes that were in transaction to be flushed) - adding an
> -	 * extra delay for a recent interrupt is pointless. Hence, we do
> -	 * not need an engine->irq_seqno_barrier() before the seqno reads.
> -	 * At all other times, we must assume the GPU is still running, but
> -	 * we only care about the snapshot of this moment.
> -	 */
> -	lockdep_assert_held(&se->lock);
> -
> -	rcu_read_lock();
> -	request = execlists_active(&engine->execlists);
> -	if (request) {
> -		struct intel_timeline *tl = request->context->timeline;
> -
> -		list_for_each_entry_from_reverse(request, &tl->requests, link) {
> -			if (__i915_request_is_complete(request))
> -				break;
> -
> -			active = request;
> -		}
> -	}
> -	rcu_read_unlock();
> -	if (active)
> -		return active;
> -
> -	list_for_each_entry(request, &se->requests, sched.link) {
> -		if (__i915_request_is_complete(request))
> -			continue;
> -
> -		if (!__i915_request_has_started(request))
> -			continue;
> -
> -		/* More than one preemptible request may match! */
> -		if (!match_ring(request))
> -			continue;
> -
> -		active = request;
> -		break;
> -	}
> -
> -	return active;
> -}
> -
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>   #include "mock_engine.c"
>   #include "selftest_engine.c"
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index 33c1a833df20..8b848adb65b7 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -2336,7 +2336,7 @@ static void sanitize_hwsp(struct intel_engine_cs *engine)
>   
>   static void execlists_sanitize(struct intel_engine_cs *engine)
>   {
> -	GEM_BUG_ON(execlists_active(&engine->execlists));
> +	GEM_BUG_ON(*engine->execlists.active);
>   
>   	/*
>   	 * Poison residual state on resume, in case the suspend didn't!
> @@ -2755,6 +2755,85 @@ static void execlists_park(struct intel_engine_cs *engine)
>   	cancel_timer(&engine->execlists.preempt);
>   }
>   
> +static const struct i915_request *
> +execlists_active_request(struct i915_sched *se)
> +{
> +	struct intel_engine_cs *engine =
> +		container_of(se, typeof(*engine), sched);
> +	struct i915_request *rq;
> +
> +	rq = execlists_active(&engine->execlists);
> +	if (rq)
> +		rq = active_request(rq->context->timeline, rq);

Now which active_request() is this? The one local to 
intel_execlist_submission.c and not the common part extracted to 
i915_scheduler.c? What is the latter used for then?

> +
> +	return rq;
> +}
> +
> +static bool execlists_is_executing(const struct i915_request *rq)
> +{
> +	struct i915_sched *se = i915_request_get_scheduler(rq);
> +	struct intel_engine_execlists *el =
> +		&container_of(se, struct intel_engine_cs, sched)->execlists;
> +	struct i915_request * const *port, *p;
> +	bool inflight = false;
> +
> +	if (!i915_request_is_ready(rq))
> +		return false;
> +
> +	/*
> +	 * Even if we have unwound the request, it may still be on
> +	 * the GPU (preempt-to-busy). If that request is inside an
> +	 * unpreemptible critical section, it will not be removed. Some
> +	 * GPU functions may even be stuck waiting for the paired request
> +	 * (__await_execution) to be submitted and cannot be preempted
> +	 * until the bond is executing.
> +	 *
> +	 * As we know that there are always preemption points between
> +	 * requests, we know that only the currently executing request
> +	 * may be still active even though we have cleared the flag.
> +	 * However, we can't rely on our tracking of ELSP[0] to know
> +	 * which request is currently active and so maybe stuck, as
> +	 * the tracking maybe an event behind. Instead assume that
> +	 * if the context is still inflight, then it is still active
> +	 * even if the active flag has been cleared.
> +	 *
> +	 * To further complicate matters, if there a pending promotion, the HW
> +	 * may either perform a context switch to the second inflight execlists,
> +	 * or it may switch to the pending set of execlists. In the case of the
> +	 * latter, it may send the ACK and we process the event copying the
> +	 * pending[] over top of inflight[], _overwriting_ our *active. Since
> +	 * this implies the HW is arbitrating and not struck in *active, we do
> +	 * not worry about complete accuracy, but we do require no read/write
> +	 * tearing of the pointer [the read of the pointer must be valid, even
> +	 * as the array is being overwritten, for which we require the writes
> +	 * to avoid tearing.]
> +	 *
> +	 * Note that the read of *execlists->active may race with the promotion
> +	 * of execlists->pending[] to execlists->inflight[], overwriting
> +	 * the value at *execlists->active. This is fine. The promotion implies
> +	 * that we received an ACK from the HW, and so the context is not
> +	 * stuck -- if we do not see ourselves in *active, the inflight status
> +	 * is valid. If instead we see ourselves being copied into *active,
> +	 * we are inflight and may signal the callback.
> +	 */
> +	if (!intel_context_inflight(rq->context))
> +		return false;
> +
> +	rcu_read_lock();
> +	for (port = READ_ONCE(el->active);
> +	     (p = READ_ONCE(*port)); /* may race with promotion of pending[] */
> +	     port++) {
> +		if (p->context == rq->context) {
> +			inflight = i915_seqno_passed(p->fence.seqno,
> +						     rq->fence.seqno);
> +			break;
> +		}
> +	}
> +	rcu_read_unlock();
> +
> +	return inflight;
> +}
> +
>   static bool can_preempt(struct intel_engine_cs *engine)
>   {
>   	if (INTEL_GEN(engine->i915) > 8)
> @@ -2890,6 +2969,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
>   	struct intel_uncore *uncore = engine->uncore;
>   	u32 base = engine->mmio_base;
>   
> +	engine->sched.active_request = execlists_active_request;
> +	engine->sched.is_executing = execlists_is_executing;
>   	tasklet_setup(&engine->sched.tasklet, execlists_submission_tasklet);
>   	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
>   	timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index f2e4f0232b87..0c5adca7994f 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1262,15 +1262,11 @@ static bool record_context(struct i915_gem_context_coredump *e,
>   	struct i915_gem_context *ctx;
>   	bool simulated;
>   
> -	rcu_read_lock();
> -
>   	ctx = rcu_dereference(rq->context->gem_context);
>   	if (ctx && !kref_get_unless_zero(&ctx->ref))
>   		ctx = NULL;
> -	if (!ctx) {
> -		rcu_read_unlock();
> +	if (!ctx)
>   		return true;
> -	}
>   
>   	if (I915_SELFTEST_ONLY(!ctx->client)) {
>   		strcpy(e->comm, "[kernel]");
> @@ -1279,8 +1275,6 @@ static bool record_context(struct i915_gem_context_coredump *e,
>   		e->pid = pid_nr(i915_drm_client_pid(ctx->client));
>   	}
>   
> -	rcu_read_unlock();
> -
>   	e->sched_attr = ctx->sched;
>   	e->guilty = atomic_read(&ctx->guilty_count);
>   	e->active = atomic_read(&ctx->active_count);
> @@ -1368,12 +1362,14 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
>   
>   struct intel_engine_capture_vma *
>   intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
> -				  struct i915_request *rq,
> +				  const struct i915_request *rq,
>   				  gfp_t gfp)
>   {
>   	struct intel_engine_capture_vma *vma = NULL;
>   
> +	rcu_read_lock();
>   	ee->simulated |= record_context(&ee->context, rq);
> +	rcu_read_unlock();
>   	if (ee->simulated)
>   		return NULL;
>   
> @@ -1436,19 +1432,21 @@ capture_engine(struct intel_engine_cs *engine,
>   	struct i915_sched *se = intel_engine_get_scheduler(engine);
>   	struct intel_engine_capture_vma *capture = NULL;
>   	struct intel_engine_coredump *ee;
> -	struct i915_request *rq;
> +	const struct i915_request *rq;
>   	unsigned long flags;
>   
>   	ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
>   	if (!ee)
>   		return NULL;
>   
> +	rcu_read_lock();
>   	spin_lock_irqsave(&se->lock, flags);
> -	rq = intel_engine_find_active_request(engine);
> +	rq = se->active_request(se);
>   	if (rq)
>   		capture = intel_engine_coredump_add_request(ee, rq,
>   							    ATOMIC_MAYFAIL);
>   	spin_unlock_irqrestore(&se->lock, flags);
> +	rcu_read_unlock();
>   	if (!capture) {
>   		kfree(ee);
>   		return NULL;
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index 1764fd254df3..2d8debabfe28 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -235,7 +235,7 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp);
>   
>   struct intel_engine_capture_vma *
>   intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
> -				  struct i915_request *rq,
> +				  const struct i915_request *rq,
>   				  gfp_t gfp);
>   
>   void intel_engine_coredump_add_vma(struct intel_engine_coredump *ee,
> @@ -299,7 +299,7 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp)
>   
>   static inline struct intel_engine_capture_vma *
>   intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
> -				  struct i915_request *rq,
> +				  const struct i915_request *rq,
>   				  gfp_t gfp)
>   {
>   	return NULL;
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index c03d3cedf497..792dd0bbea3b 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -349,74 +349,6 @@ void i915_request_retire_upto(struct i915_request *rq)
>   	} while (i915_request_retire(tmp) && tmp != rq);
>   }
>   
> -static struct i915_request * const *
> -__engine_active(struct intel_engine_cs *engine)
> -{
> -	return READ_ONCE(engine->execlists.active);
> -}
> -
> -static bool __request_in_flight(const struct i915_request *signal)
> -{
> -	struct i915_request * const *port, *rq;
> -	bool inflight = false;
> -
> -	if (!i915_request_is_ready(signal))
> -		return false;
> -
> -	/*
> -	 * Even if we have unwound the request, it may still be on
> -	 * the GPU (preempt-to-busy). If that request is inside an
> -	 * unpreemptible critical section, it will not be removed. Some
> -	 * GPU functions may even be stuck waiting for the paired request
> -	 * (__await_execution) to be submitted and cannot be preempted
> -	 * until the bond is executing.
> -	 *
> -	 * As we know that there are always preemption points between
> -	 * requests, we know that only the currently executing request
> -	 * may be still active even though we have cleared the flag.
> -	 * However, we can't rely on our tracking of ELSP[0] to know
> -	 * which request is currently active and so maybe stuck, as
> -	 * the tracking maybe an event behind. Instead assume that
> -	 * if the context is still inflight, then it is still active
> -	 * even if the active flag has been cleared.
> -	 *
> -	 * To further complicate matters, if there a pending promotion, the HW
> -	 * may either perform a context switch to the second inflight execlists,
> -	 * or it may switch to the pending set of execlists. In the case of the
> -	 * latter, it may send the ACK and we process the event copying the
> -	 * pending[] over top of inflight[], _overwriting_ our *active. Since
> -	 * this implies the HW is arbitrating and not struck in *active, we do
> -	 * not worry about complete accuracy, but we do require no read/write
> -	 * tearing of the pointer [the read of the pointer must be valid, even
> -	 * as the array is being overwritten, for which we require the writes
> -	 * to avoid tearing.]
> -	 *
> -	 * Note that the read of *execlists->active may race with the promotion
> -	 * of execlists->pending[] to execlists->inflight[], overwritting
> -	 * the value at *execlists->active. This is fine. The promotion implies
> -	 * that we received an ACK from the HW, and so the context is not
> -	 * stuck -- if we do not see ourselves in *active, the inflight status
> -	 * is valid. If instead we see ourselves being copied into *active,
> -	 * we are inflight and may signal the callback.
> -	 */
> -	if (!intel_context_inflight(signal->context))
> -		return false;
> -
> -	rcu_read_lock();
> -	for (port = __engine_active(signal->engine);
> -	     (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
> -	     port++) {
> -		if (rq->context == signal->context) {
> -			inflight = i915_seqno_passed(rq->fence.seqno,
> -						     signal->fence.seqno);
> -			break;
> -		}
> -	}
> -	rcu_read_unlock();
> -
> -	return inflight;
> -}
> -
>   static int
>   __await_execution(struct i915_request *rq,
>   		  struct i915_request *signal,
> @@ -460,8 +392,7 @@ __await_execution(struct i915_request *rq,
>   	 * the completed/retired request.
>   	 */
>   	if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
> -		if (i915_request_is_active(signal) ||
> -		    __request_in_flight(signal))
> +		if (i915_request_is_executing(signal))
>   			__notify_execute_cb_imm(signal);
>   	}
>   
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index c41582b96b46..8322f308b906 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -629,4 +629,12 @@ static inline bool i915_request_use_scheduler(const struct i915_request *rq)
>   	return intel_engine_has_scheduler(rq->engine);
>   }
>   
> +static inline bool i915_request_is_executing(const struct i915_request *rq)
> +{
> +	if (i915_request_is_active(rq))
> +		return true;
> +
> +	return i915_request_get_scheduler(rq)->is_executing(rq);

Isn't active flag a superset of the "is executing" check in which case 
how this makes sense? I could understand:

if !active
	return false;

return ->is_executing

> +}
> +
>   #endif /* I915_REQUEST_H */
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 620db6430a10..cb27bcb7a1f6 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -91,6 +91,54 @@ static void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
>   	ipi->list = NULL;
>   }
>   
> +static bool match_ring(struct i915_request *rq)
> +{
> +	const struct intel_engine_cs *engine = rq->engine;
> +	const struct intel_ring *ring = rq->ring;
> +
> +	return ENGINE_READ(engine, RING_START) == i915_ggtt_offset(ring->vma);
> +}

Ouchy ouch. I'll look the other way. :)

> +
> +static const struct i915_request *active_request(struct i915_sched *se)
> +{
> +	struct i915_request *request, *active = NULL;
> +
> +	/*
> +	 * We are called by the error capture, reset and to dump engine
> +	 * state at random points in time. In particular, note that neither is
> +	 * crucially ordered with an interrupt. After a hang, the GPU is dead
> +	 * and we assume that no more writes can happen (we waited long enough
> +	 * for all writes that were in transaction to be flushed) - adding an
> +	 * extra delay for a recent interrupt is pointless. Hence, we do
> +	 * not need an engine->irq_seqno_barrier() before the seqno reads.
> +	 * At all other times, we must assume the GPU is still running, but
> +	 * we only care about the snapshot of this moment.
> +	 */
> +	lockdep_assert_held(&se->lock);
> +
> +	list_for_each_entry(request, &se->requests, sched.link) {
> +		if (__i915_request_is_complete(request))
> +			continue;
> +
> +		if (!__i915_request_has_started(request))
> +			continue;
> +
> +		/* More than one preemptible request may match! */
> +		if (!match_ring(request))
> +			continue;
> +
> +		active = request;
> +		break;
> +	}
> +
> +	return active;
> +}
> +
> +static bool not_executing(const struct i915_request *rq)
> +{
> +	return false;
> +}
> +
>   void i915_sched_init(struct i915_sched *se,
>   		     struct device *dev,
>   		     const char *name,
> @@ -112,6 +160,8 @@ void i915_sched_init(struct i915_sched *se,
>   	i915_sched_init_ipi(&se->ipi);
>   
>   	se->submit_request = i915_request_enqueue;
> +	se->active_request = active_request;
> +	se->is_executing = not_executing;
>   
>   	/*
>   	 * Due to an interesting quirk in lockdep's internal debug tracking,
> diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
> index effd035dcb78..9a9b8e0d78ae 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler_types.h
> +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
> @@ -37,6 +37,10 @@ struct i915_sched {
>   	 */
>   	void		(*submit_request)(struct i915_request *rq);
>   
> +	const struct i915_request *(*active_request)(struct i915_sched *se);
> +
> +	bool (*is_executing)(const struct i915_request *rq);
> +
>   	struct list_head requests; /* active request, on HW */
>   	struct list_head hold; /* ready requests, but on hold */
>   	/**
> 

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply index

Thread overview: 103+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-01  8:56 [Intel-gfx] [PATCH 01/57] drm/i915/gt: Restrict the GT clock override to just Icelake Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 02/57] drm/i915/selftests: Exercise relative mmio paths to non-privileged registers Chris Wilson
2021-02-01 14:34   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 03/57] drm/i915/selftests: Exercise cross-process context isolation Chris Wilson
2021-02-01 16:37   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 04/57] drm/i915: Protect against request freeing during cancellation on wedging Chris Wilson
2021-02-02  9:55   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 05/57] drm/i915: Take rcu_read_lock for querying fence's driver/timeline names Chris Wilson
2021-02-02 18:33   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 06/57] drm/i915/gt: Always flush the submission queue on checking for idle Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 07/57] drm/i915/gt: Move engine setup out of set_default_submission Chris Wilson
2021-02-02 11:57   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 08/57] drm/i915/gt: Move submission_method into intel_gt Chris Wilson
2021-02-02 12:03   ` Tvrtko Ursulin
2021-02-02 12:18     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 09/57] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 10/57] drm/i915: Restructure priority inheritance Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 11/57] drm/i915/selftests: Measure set-priority duration Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 12/57] drm/i915/selftests: Exercise priority inheritance around an engine loop Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 13/57] drm/i915/selftests: Force a rewind if at first we don't succeed Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 14/57] drm/i915: Improve DFS for priority inheritance Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 15/57] drm/i915: Extract request submission from execlists Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 16/57] drm/i915: Extract request rewinding " Chris Wilson
2021-02-02 13:08   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 17/57] drm/i915: Extract request suspension from the execlists Chris Wilson
2021-02-02 13:15   ` Tvrtko Ursulin
2021-02-02 13:26     ` Chris Wilson
2021-02-02 13:32       ` Tvrtko Ursulin
2021-02-02 13:27     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 18/57] drm/i915: Extract the ability to defer and rerun a request later Chris Wilson
2021-02-02 13:18   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 19/57] drm/i915: Fix the iterative dfs for defering requests Chris Wilson
2021-02-02 14:10   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 20/57] drm/i915: Wrap access to intel_engine.active Chris Wilson
2021-02-04 11:07   ` Tvrtko Ursulin
2021-02-04 11:18     ` Chris Wilson
2021-02-04 11:56       ` Chris Wilson
2021-02-04 12:08         ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 21/57] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2021-02-04 11:12   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue Chris Wilson
2021-02-04 11:19   ` Tvrtko Ursulin
2021-02-04 11:32     ` Chris Wilson
2021-02-04 11:40     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 23/57] drm/i915: Move tasklet from execlists to sched Chris Wilson
2021-02-04 14:06   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 24/57] drm/i915/gt: Only kick the scheduler on timeslice/preemption change Chris Wilson
2021-02-04 14:09   ` Tvrtko Ursulin
2021-02-04 14:43     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 25/57] drm/i915: Move submit_request to i915_sched_engine Chris Wilson
2021-02-04 14:13   ` Tvrtko Ursulin
2021-02-04 14:45     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 26/57] drm/i915: Move finding the current active request to the scheduler Chris Wilson
2021-02-04 14:30   ` Tvrtko Ursulin [this message]
2021-02-04 14:59     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 27/57] drm/i915: Show execlists queues when dumping state Chris Wilson
2021-02-04 15:04   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 28/57] drm/i915: Wrap i915_request_use_semaphores() Chris Wilson
2021-02-04 15:05   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 29/57] drm/i915: Move scheduler flags Chris Wilson
2021-02-04 15:14   ` Tvrtko Ursulin
2021-02-04 16:05     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 30/57] drm/i915: Move timeslicing flag to scheduler Chris Wilson
2021-02-04 15:18   ` Tvrtko Ursulin
2021-02-04 16:11     ` Chris Wilson
2021-02-05  9:48       ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 31/57] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
2021-02-04 15:26   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 32/57] drm/i915: Move needs-breadcrumb flags to scheduler Chris Wilson
2021-02-04 15:28   ` Tvrtko Ursulin
2021-02-04 16:12     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 33/57] drm/i915: Move busywaiting control to the scheduler Chris Wilson
2021-02-04 15:32   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 34/57] drm/i915: Move preempt-reset flag " Chris Wilson
2021-02-04 15:34   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 35/57] drm/i915: Replace priolist rbtree with a skiplist Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 36/57] drm/i915: Wrap cmpxchg64 with try_cmpxchg64() helper Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 37/57] drm/i915: Fair low-latency scheduling Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 38/57] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 39/57] drm/i915: Extend the priority boosting for the display with a deadline Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 40/57] drm/i915/gt: Support virtual engine queues Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 41/57] drm/i915: Move saturated workload detection back to the context Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 42/57] drm/i915: Bump default timeslicing quantum to 5ms Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 43/57] drm/i915/gt: Delay taking irqoff for execlists submission Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 44/57] drm/i915/gt: Wrap intel_timeline.has_initial_breadcrumb Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 45/57] drm/i915/gt: Track timeline GGTT offset separately from subpage offset Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 46/57] drm/i915/gt: Add timeline "mode" Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 47/57] drm/i915/gt: Use indices for writing into relative timelines Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 48/57] drm/i915/selftests: Exercise relative timeline modes Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 49/57] drm/i915/gt: Use ppHWSP for unshared non-semaphore related timelines Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 50/57] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 51/57] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 52/57] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 53/57] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 54/57] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 55/57] drm/i915/gt: Implement ring scheduler for gen4-7 Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 56/57] drm/i915/gt: Enable ring scheduling for gen5-7 Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 57/57] drm/i915: Support secure dispatch on gen6/gen7 Chris Wilson
2021-02-01 14:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/57] drm/i915/gt: Restrict the GT clock override to just Icelake Patchwork
2021-02-01 14:15 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-02-01 14:15 ` [Intel-gfx] [PATCH 01/57] " Mika Kuoppala
2021-02-01 14:41 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [01/57] " Patchwork
2021-02-01 19:33 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=79b782ae-dbe5-2d3c-7093-c1f9364eba55@linux.intel.com \
    --to=tvrtko.ursulin@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Intel-GFX Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/intel-gfx/0 intel-gfx/git/0.git
	git clone --mirror https://lore.kernel.org/intel-gfx/1 intel-gfx/git/1.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 intel-gfx intel-gfx/ https://lore.kernel.org/intel-gfx \
		intel-gfx@lists.freedesktop.org
	public-inbox-index intel-gfx

Example config snippet for mirrors

Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.freedesktop.lists.intel-gfx


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git