All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915: Pack params to engine->schedule() into a struct
@ 2018-04-11 17:35 Chris Wilson
  2018-04-11 18:23 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (6 more replies)
  0 siblings, 7 replies; 12+ messages in thread
From: Chris Wilson @ 2018-04-11 17:35 UTC (permalink / raw)
  To: intel-gfx

Today we only want to pass along the priority to engine->schedule(), but
in the future we want to have much more control over the various aspects
of the GPU during a context's execution, for example controlling the
frequency allowed. As we need an ever growing number of parameters for
scheduling, move those into a struct for convenience.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  3 +-
 drivers/gpu/drm/i915/i915_gem.c               | 18 +++++----
 drivers/gpu/drm/i915/i915_gem_context.c       |  8 ++--
 drivers/gpu/drm/i915/i915_gem_context.h       | 13 +------
 drivers/gpu/drm/i915/i915_gpu_error.c         |  8 ++--
 drivers/gpu/drm/i915/i915_gpu_error.h         |  5 ++-
 drivers/gpu/drm/i915/i915_request.c           |  4 +-
 drivers/gpu/drm/i915/i915_request.h           | 11 +-----
 drivers/gpu/drm/i915/i915_scheduler.h         | 39 +++++++++++++++++++
 drivers/gpu/drm/i915/intel_display.c          |  4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        | 14 +++++--
 drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 20 +++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 ++--
 17 files changed, 104 insertions(+), 63 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_scheduler.h

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..f3d21849b0cb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1135,7 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		return PTR_ERR(s->shadow_ctx);
 
 	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
-		s->shadow_ctx->priority = INT_MAX;
+		s->shadow_ctx->sched.priority = INT_MAX;
 
 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 649c0f2f3bae..fad860f54386 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -75,6 +75,7 @@
 #include "i915_gem_timeline.h"
 #include "i915_gpu_error.h"
 #include "i915_request.h"
+#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -3159,7 +3160,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 struct intel_rps_client *rps);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
-				  int priority);
+				  const struct i915_sched_attr *attr);
 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
 
 int __must_check
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60cf7cfc24ee..61e6be84362f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -563,7 +563,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+				 const struct i915_sched_attr *attr)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -576,11 +577,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(rq, prio);
+		engine->schedule(rq, attr);
 	rcu_read_unlock();
 }
 
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+			       const struct i915_sched_attr *attr)
 {
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
@@ -588,16 +590,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], prio);
+			__fence_set_priority(array->fences[i], attr);
 	} else {
-		__fence_set_priority(fence, prio);
+		__fence_set_priority(fence, attr);
 	}
 }
 
 int
 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
-			      int prio)
+			      const struct i915_sched_attr *attr)
 {
 	struct dma_fence *excl;
 
@@ -612,7 +614,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], prio);
+			fence_set_priority(shared[i], attr);
 			dma_fence_put(shared[i]);
 		}
 
@@ -622,7 +624,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, prio);
+		fence_set_priority(excl, attr);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5cfac0255758..143e6bf0d545 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -280,7 +280,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_PRIORITY_NORMAL;
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
@@ -430,7 +430,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 		return ctx;
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->priority = prio;
+	ctx->sched.priority = prio;
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -747,7 +747,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->priority;
+		args->value = ctx->sched.priority;
 		break;
 	default:
 		ret = -EINVAL;
@@ -820,7 +820,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->priority = priority;
+				ctx->sched.priority = priority;
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7854262ddfd9..b12a8a8c5af9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -137,18 +137,7 @@ struct i915_gem_context {
 	 */
 	u32 user_handle;
 
-	/**
-	 * @priority: execution and service priority
-	 *
-	 * All clients are equal, but some are more equal than others!
-	 *
-	 * Requests from a context with a greater (more positive) value of
-	 * @priority will be executed before those with a lower @priority
-	 * value, forming a simple QoS.
-	 *
-	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
-	 */
-	int priority;
+	struct i915_sched_attr sched;
 
 	/** ggtt_offset_bias: placement restriction for context objects */
 	u32 ggtt_offset_bias;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index effaf982b19b..35cf001ef26b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
 
 	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
-		   erq->context, erq->seqno, erq->priority,
+		   erq->context, erq->seqno, erq->sched.priority,
 		   jiffies_to_msecs(jiffies - erq->jiffies),
 		   erq->head, erq->tail);
 }
@@ -422,7 +422,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 {
 	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-		   ctx->priority, ctx->ban_score, bannable(ctx),
+		   ctx->sched.priority, ctx->ban_score, bannable(ctx),
 		   ctx->guilty, ctx->active);
 }
 
@@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
 	erq->context = request->ctx->hw_id;
-	erq->priority = request->priotree.priority;
+	erq->sched = request->priotree.attr;
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
@@ -1372,7 +1372,7 @@ static void record_context(struct drm_i915_error_context *e,
 
 	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
-	e->priority = ctx->priority;
+	e->sched = ctx->sched;
 	e->ban_score = atomic_read(&ctx->ban_score);
 	e->bannable = i915_gem_context_is_bannable(ctx);
 	e->guilty = atomic_read(&ctx->guilty_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index c05b6034d718..cbf33aa59b22 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -20,6 +20,7 @@
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_params.h"
+#include "i915_scheduler.h"
 
 struct drm_i915_private;
 struct intel_overlay_error_state;
@@ -122,11 +123,11 @@ struct i915_gpu_state {
 			pid_t pid;
 			u32 handle;
 			u32 hw_id;
-			int priority;
 			int ban_score;
 			int active;
 			int guilty;
 			bool bannable;
+			struct i915_sched_attr sched;
 		} context;
 
 		struct drm_i915_error_object {
@@ -147,11 +148,11 @@ struct i915_gpu_state {
 			long jiffies;
 			pid_t pid;
 			u32 context;
-			int priority;
 			int ban_score;
 			u32 seqno;
 			u32 head;
 			u32 tail;
+			struct i915_sched_attr sched;
 		} *requests, execlist[EXECLIST_MAX_PORTS];
 		unsigned int num_ports;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9ca9c24b4421..e1675d44ac92 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -191,7 +191,7 @@ i915_priotree_init(struct i915_priotree *pt)
 	INIT_LIST_HEAD(&pt->signalers_list);
 	INIT_LIST_HEAD(&pt->waiters_list);
 	INIT_LIST_HEAD(&pt->link);
-	pt->priority = I915_PRIORITY_INVALID;
+	pt->attr.priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -1062,7 +1062,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 */
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(request, request->ctx->priority);
+		engine->schedule(request, &request->ctx->sched);
 	rcu_read_unlock();
 
 	local_bh_disable();
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7d6eb82eeb91..1271cb169e76 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -29,6 +29,7 @@
 
 #include "i915_gem.h"
 #include "i915_sw_fence.h"
+#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
@@ -75,15 +76,7 @@ struct i915_priotree {
 	struct list_head signalers_list; /* those before us, we depend upon */
 	struct list_head waiters_list; /* those after us, they depend upon us */
 	struct list_head link;
-	int priority;
-};
-
-enum {
-	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
-	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
-	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
-	I915_PRIORITY_INVALID = INT_MIN
+	struct i915_sched_attr attr;
 };
 
 struct i915_capture_list {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
new file mode 100644
index 000000000000..19672f6790a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_H_
+#define _I915_SCHEDULER_H_
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+	I915_PRIORITY_INVALID = INT_MIN
+};
+
+struct i915_sched_attr {
+	/**
+	 * @priority: execution and service priority
+	 *
+	 * All clients are equal, but some are more equal than others!
+	 *
+	 * Requests from a context with a greater (more positive) value of
+	 * @priority will be executed before those with a lower @priority
+	 * value, forming a simple QoS.
+	 *
+	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
+	 */
+	int priority;
+};
+
+#define I915_SCHED_ATTR_INITIALIZER { \
+	.priority = I915_PRIORITY_NORMAL, \
+}
+
+#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 020900e08d42..7c34b8c854be 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12839,7 +12839,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+	i915_gem_object_wait_priority(obj, 0, &(struct i915_sched_attr){
+				      .priority = I915_PRIORITY_DISPLAY,
+				      });
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a217b3fe5f0b..93ca83d47d0e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1744,17 +1744,25 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 	return which;
 }
 
+static void print_sched(struct drm_printer *m,
+			const struct i915_sched_attr *sched)
+{
+	drm_printf(m, "prio=%d", sched->priority);
+}
+
 static void print_request(struct drm_printer *m,
 			  struct i915_request *rq,
 			  const char *prefix)
 {
 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
 
-	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
+	drm_printf(m, "%s%x%s [%llx:%x] ",
+		   prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
-		   rq->fence.context, rq->fence.seqno,
-		   rq->priotree.priority,
+		   rq->fence.context, rq->fence.seqno);
+	print_sched(m, &rq->priotree.attr);
+	drm_printf(m, " @ %dms: %s\n",
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 97121230656c..accbbb57b0fe 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->priotree.attr.priority;
 }
 
 static inline int port_prio(const struct execlist_port *port)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 665d9e82e954..632d0a630a83 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -176,7 +176,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->priotree.attr.priority;
 }
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
@@ -1170,11 +1170,13 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 	return engine;
 }
 
-static void execlists_schedule(struct i915_request *request, int prio)
+static void execlists_schedule(struct i915_request *request,
+			       const struct i915_sched_attr *attr)
 {
 	struct intel_engine_cs *engine;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
+	const int prio = attr->priority;
 	LIST_HEAD(dfs);
 
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
@@ -1182,7 +1184,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	if (i915_request_completed(request))
 		return;
 
-	if (prio <= READ_ONCE(request->priotree.priority))
+	if (prio <= READ_ONCE(request->priotree.attr.priority))
 		return;
 
 	/* Need BKL in order to use the temporary link inside i915_dependency */
@@ -1224,8 +1226,8 @@ static void execlists_schedule(struct i915_request *request, int prio)
 			if (i915_priotree_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->priority < pt->priority);
-			if (prio > READ_ONCE(p->signaler->priority))
+			GEM_BUG_ON(p->signaler->attr.priority < pt->attr.priority);
+			if (prio > READ_ONCE(p->signaler->attr.priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
 	}
@@ -1236,9 +1238,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (request->priotree.priority == I915_PRIORITY_INVALID) {
+	if (request->priotree.attr.priority == I915_PRIORITY_INVALID) {
 		GEM_BUG_ON(!list_empty(&request->priotree.link));
-		request->priotree.priority = prio;
+		request->priotree.attr = *attr;
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
 		__list_del_entry(&stack.dfs_link);
@@ -1255,10 +1257,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
 
 		engine = pt_lock_engine(pt, engine);
 
-		if (prio <= pt->priority)
+		if (prio <= pt->attr.priority)
 			continue;
 
-		pt->priority = prio;
+		pt->attr.priority = prio;
 		if (!list_empty(&pt->link)) {
 			__list_del_entry(&pt->link);
 			queue_request(engine, pt, prio);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 256d58487559..5bd0815037a0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,6 +14,7 @@
 #include "intel_gpu_commands.h"
 
 struct drm_printer;
+struct i915_sched_attr;
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -460,7 +461,8 @@ struct intel_engine_cs {
 	 *
 	 * Called under the struct_mutex.
 	 */
-	void		(*schedule)(struct i915_request *request, int priority);
+	void		(*schedule)(struct i915_request *request,
+				    const struct i915_sched_attr *attr);
 
 	/*
 	 * Cancel all requests on the hardware, or queued for execution.
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 24f913f26a7b..f7ee54e109ae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -628,7 +628,7 @@ static int active_engine(void *data)
 		}
 
 		if (arg->flags & TEST_PRIORITY)
-			ctx[idx]->priority =
+			ctx[idx]->sched.priority =
 				i915_prandom_u32_max_state(512, &prng);
 
 		rq[idx] = i915_request_get(new);
@@ -683,7 +683,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			return err;
 
 		if (flags & TEST_PRIORITY)
-			h.ctx->priority = 1024;
+			h.ctx->sched.priority = 1024;
 	}
 
 	for_each_engine(engine, i915, id) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 0481e2e01146..7967a7309614 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -335,12 +335,12 @@ static int live_preempt(void *arg)
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
@@ -407,6 +407,7 @@ static int live_late_preempt(void *arg)
 	struct i915_gem_context *ctx_hi, *ctx_lo;
 	struct spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = I915_SCHED_ATTR_INITIALIZER;
 	enum intel_engine_id id;
 	int err = -ENOMEM;
 
@@ -458,7 +459,8 @@ static int live_late_preempt(void *arg)
 			goto err_wedged;
 		}
 
-		engine->schedule(rq, I915_PRIORITY_MAX);
+		attr.priority = I915_PRIORITY_MAX;
+		engine->schedule(rq, &attr);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Pack params to engine->schedule() into a struct
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
@ 2018-04-11 18:23 ` Patchwork
  2018-04-11 18:39 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-04-11 18:23 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Pack params to engine->schedule() into a struct
URL   : https://patchwork.freedesktop.org/series/41567/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
203e77ff5941 drm/i915: Pack params to engine->schedule() into a struct
-:309: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#309: 
new file mode 100644

total: 0 errors, 1 warnings, 0 checks, 444 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: Pack params to engine->schedule() into a struct
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
  2018-04-11 18:23 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
@ 2018-04-11 18:39 ` Patchwork
  2018-04-11 21:51 ` ✗ Fi.CI.IGT: failure " Patchwork
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-04-11 18:39 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Pack params to engine->schedule() into a struct
URL   : https://patchwork.freedesktop.org/series/41567/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4047 -> Patchwork_8670 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8670/

== Known issues ==

  Here are the changes found in Patchwork_8670 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_chamelium@common-hpd-after-suspend:
      fi-skl-6700k2:      PASS -> INCOMPLETE (fdo#104108)

    
  fdo#104108 https://bugs.freedesktop.org/show_bug.cgi?id=104108


== Participating hosts (35 -> 32) ==

  Missing    (3): fi-ctg-p8600 fi-ilk-m540 fi-skl-6700hq 


== Build changes ==

    * Linux: CI_DRM_4047 -> Patchwork_8670

  CI_DRM_4047: b21f4ebec3a255026a780ee4a35ec5f8d9ede6cd @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4423: d502f055ac4500cada758876a512ac4f14b34851 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_8670: 203e77ff5941ac63cbb46b8f4df2f582fa4fce29 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4423: 45e115f293fd6acc0c9647cf2d3b76be78819ba5 @ git://anongit.freedesktop.org/piglit


== Linux commits ==

203e77ff5941 drm/i915: Pack params to engine->schedule() into a struct

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8670/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* ✗ Fi.CI.IGT: failure for drm/i915: Pack params to engine->schedule() into a struct
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
  2018-04-11 18:23 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
  2018-04-11 18:39 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2018-04-11 21:51 ` Patchwork
  2018-04-11 23:00 ` [PATCH v2] " Chris Wilson
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-04-11 21:51 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Pack params to engine->schedule() into a struct
URL   : https://patchwork.freedesktop.org/series/41567/
State : failure

== Summary ==

= CI Bug Log - changes from CI_DRM_4047_full -> Patchwork_8670_full =

== Summary - FAILURE ==

  Serious unknown changes coming with Patchwork_8670_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_8670_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8670/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_8670_full:

  === IGT changes ===

    ==== Possible regressions ====

    igt@perf@short-reads:
      shard-kbl:          PASS -> FAIL

    
== Known issues ==

  Here are the changes found in Patchwork_8670_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_flip@flip-vs-expired-vblank-interruptible:
      shard-kbl:          PASS -> FAIL (fdo#102887)

    igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-mmap-cpu:
      shard-kbl:          PASS -> INCOMPLETE (fdo#103665)

    
    ==== Possible fixes ====

    igt@kms_flip@2x-plain-flip-fb-recreate-interruptible:
      shard-hsw:          FAIL (fdo#100368) -> PASS

    igt@perf_pmu@rc6-runtime-pm-long:
      shard-kbl:          FAIL (fdo#105010) -> PASS

    
  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#102887 https://bugs.freedesktop.org/show_bug.cgi?id=102887
  fdo#103665 https://bugs.freedesktop.org/show_bug.cgi?id=103665
  fdo#105010 https://bugs.freedesktop.org/show_bug.cgi?id=105010


== Participating hosts (6 -> 4) ==

  Missing    (2): shard-glk shard-glkb 


== Build changes ==

    * Linux: CI_DRM_4047 -> Patchwork_8670

  CI_DRM_4047: b21f4ebec3a255026a780ee4a35ec5f8d9ede6cd @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4423: d502f055ac4500cada758876a512ac4f14b34851 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_8670: 203e77ff5941ac63cbb46b8f4df2f582fa4fce29 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4423: 45e115f293fd6acc0c9647cf2d3b76be78819ba5 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8670/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2] drm/i915: Pack params to engine->schedule() into a struct
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
                   ` (2 preceding siblings ...)
  2018-04-11 21:51 ` ✗ Fi.CI.IGT: failure " Patchwork
@ 2018-04-11 23:00 ` Chris Wilson
  2018-04-17  9:12   ` Tvrtko Ursulin
  2018-04-11 23:27 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Pack params to engine->schedule() into a struct (rev2) Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2018-04-11 23:00 UTC (permalink / raw)
  To: intel-gfx

Today we only want to pass along the priority to engine->schedule(), but
in the future we want to have much more control over the various aspects
of the GPU during a context's execution, for example controlling the
frequency allowed. As we need an ever growing number of parameters for
scheduling, move those into a struct for convenience.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  3 +-
 drivers/gpu/drm/i915/i915_gem.c               | 18 ++---
 drivers/gpu/drm/i915/i915_gem_context.c       |  8 +--
 drivers/gpu/drm/i915/i915_gem_context.h       | 13 +---
 drivers/gpu/drm/i915/i915_gpu_error.c         |  8 +--
 drivers/gpu/drm/i915/i915_gpu_error.h         |  5 +-
 drivers/gpu/drm/i915/i915_request.c           |  4 +-
 drivers/gpu/drm/i915/i915_request.h           | 39 +----------
 drivers/gpu/drm/i915/i915_scheduler.h         | 65 +++++++++++++++++++
 drivers/gpu/drm/i915/intel_display.c          |  4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        | 18 ++++-
 drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 20 +++---
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 ++-
 17 files changed, 133 insertions(+), 92 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_scheduler.h

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..f3d21849b0cb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1135,7 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		return PTR_ERR(s->shadow_ctx);
 
 	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
-		s->shadow_ctx->priority = INT_MAX;
+		s->shadow_ctx->sched.priority = INT_MAX;
 
 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 649c0f2f3bae..fad860f54386 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -75,6 +75,7 @@
 #include "i915_gem_timeline.h"
 #include "i915_gpu_error.h"
 #include "i915_request.h"
+#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -3159,7 +3160,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 struct intel_rps_client *rps);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
-				  int priority);
+				  const struct i915_sched_attr *attr);
 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
 
 int __must_check
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60cf7cfc24ee..61e6be84362f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -563,7 +563,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+				 const struct i915_sched_attr *attr)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -576,11 +577,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(rq, prio);
+		engine->schedule(rq, attr);
 	rcu_read_unlock();
 }
 
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+			       const struct i915_sched_attr *attr)
 {
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
@@ -588,16 +590,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], prio);
+			__fence_set_priority(array->fences[i], attr);
 	} else {
-		__fence_set_priority(fence, prio);
+		__fence_set_priority(fence, attr);
 	}
 }
 
 int
 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
-			      int prio)
+			      const struct i915_sched_attr *attr)
 {
 	struct dma_fence *excl;
 
@@ -612,7 +614,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], prio);
+			fence_set_priority(shared[i], attr);
 			dma_fence_put(shared[i]);
 		}
 
@@ -622,7 +624,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, prio);
+		fence_set_priority(excl, attr);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5cfac0255758..143e6bf0d545 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -280,7 +280,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_PRIORITY_NORMAL;
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
@@ -430,7 +430,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 		return ctx;
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->priority = prio;
+	ctx->sched.priority = prio;
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -747,7 +747,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->priority;
+		args->value = ctx->sched.priority;
 		break;
 	default:
 		ret = -EINVAL;
@@ -820,7 +820,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->priority = priority;
+				ctx->sched.priority = priority;
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7854262ddfd9..b12a8a8c5af9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -137,18 +137,7 @@ struct i915_gem_context {
 	 */
 	u32 user_handle;
 
-	/**
-	 * @priority: execution and service priority
-	 *
-	 * All clients are equal, but some are more equal than others!
-	 *
-	 * Requests from a context with a greater (more positive) value of
-	 * @priority will be executed before those with a lower @priority
-	 * value, forming a simple QoS.
-	 *
-	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
-	 */
-	int priority;
+	struct i915_sched_attr sched;
 
 	/** ggtt_offset_bias: placement restriction for context objects */
 	u32 ggtt_offset_bias;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index effaf982b19b..35cf001ef26b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
 
 	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
-		   erq->context, erq->seqno, erq->priority,
+		   erq->context, erq->seqno, erq->sched.priority,
 		   jiffies_to_msecs(jiffies - erq->jiffies),
 		   erq->head, erq->tail);
 }
@@ -422,7 +422,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 {
 	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-		   ctx->priority, ctx->ban_score, bannable(ctx),
+		   ctx->sched.priority, ctx->ban_score, bannable(ctx),
 		   ctx->guilty, ctx->active);
 }
 
@@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
 	erq->context = request->ctx->hw_id;
-	erq->priority = request->priotree.priority;
+	erq->sched = request->priotree.attr;
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
@@ -1372,7 +1372,7 @@ static void record_context(struct drm_i915_error_context *e,
 
 	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
-	e->priority = ctx->priority;
+	e->sched = ctx->sched;
 	e->ban_score = atomic_read(&ctx->ban_score);
 	e->bannable = i915_gem_context_is_bannable(ctx);
 	e->guilty = atomic_read(&ctx->guilty_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index c05b6034d718..cbf33aa59b22 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -20,6 +20,7 @@
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_params.h"
+#include "i915_scheduler.h"
 
 struct drm_i915_private;
 struct intel_overlay_error_state;
@@ -122,11 +123,11 @@ struct i915_gpu_state {
 			pid_t pid;
 			u32 handle;
 			u32 hw_id;
-			int priority;
 			int ban_score;
 			int active;
 			int guilty;
 			bool bannable;
+			struct i915_sched_attr sched;
 		} context;
 
 		struct drm_i915_error_object {
@@ -147,11 +148,11 @@ struct i915_gpu_state {
 			long jiffies;
 			pid_t pid;
 			u32 context;
-			int priority;
 			int ban_score;
 			u32 seqno;
 			u32 head;
 			u32 tail;
+			struct i915_sched_attr sched;
 		} *requests, execlist[EXECLIST_MAX_PORTS];
 		unsigned int num_ports;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9ca9c24b4421..e1675d44ac92 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -191,7 +191,7 @@ i915_priotree_init(struct i915_priotree *pt)
 	INIT_LIST_HEAD(&pt->signalers_list);
 	INIT_LIST_HEAD(&pt->waiters_list);
 	INIT_LIST_HEAD(&pt->link);
-	pt->priority = I915_PRIORITY_INVALID;
+	pt->attr.priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -1062,7 +1062,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 */
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(request, request->ctx->priority);
+		engine->schedule(request, &request->ctx->sched);
 	rcu_read_unlock();
 
 	local_bh_disable();
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7d6eb82eeb91..5435878addd6 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -29,6 +29,7 @@
 
 #include "i915_gem.h"
 #include "i915_sw_fence.h"
+#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
@@ -48,44 +49,6 @@ struct intel_signal_node {
 	struct list_head link;
 };
 
-struct i915_dependency {
-	struct i915_priotree *signaler;
-	struct list_head signal_link;
-	struct list_head wait_link;
-	struct list_head dfs_link;
-	unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- */
-struct i915_priotree {
-	struct list_head signalers_list; /* those before us, we depend upon */
-	struct list_head waiters_list; /* those after us, they depend upon us */
-	struct list_head link;
-	int priority;
-};
-
-enum {
-	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
-	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
-	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
-	I915_PRIORITY_INVALID = INT_MIN
-};
-
 struct i915_capture_list {
 	struct i915_capture_list *next;
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
new file mode 100644
index 000000000000..0664caae314b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -0,0 +1,65 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_H_
+#define _I915_SCHEDULER_H_
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+	I915_PRIORITY_INVALID = INT_MIN
+};
+
+struct i915_sched_attr {
+	/**
+	 * @priority: execution and service priority
+	 *
+	 * All clients are equal, but some are more equal than others!
+	 *
+	 * Requests from a context with a greater (more positive) value of
+	 * @priority will be executed before those with a lower @priority
+	 * value, forming a simple QoS.
+	 *
+	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
+	 */
+	int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+	struct list_head signalers_list; /* those before us, we depend upon */
+	struct list_head waiters_list; /* those after us, they depend upon us */
+	struct list_head link;
+	struct i915_sched_attr attr;
+};
+
+struct i915_dependency {
+	struct i915_priotree *signaler;
+	struct list_head signal_link;
+	struct list_head wait_link;
+	struct list_head dfs_link;
+	unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 020900e08d42..7c34b8c854be 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12839,7 +12839,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+	i915_gem_object_wait_priority(obj, 0, &(struct i915_sched_attr){
+				      .priority = I915_PRIORITY_DISPLAY,
+				      });
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a217b3fe5f0b..7cc2892bf93b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1744,17 +1744,29 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 	return which;
 }
 
+static void print_sched(struct drm_printer *m,
+			const struct drm_i915_private *i915,
+			const struct i915_sched_attr *sched)
+{
+	if (sched->priority == I915_PRIORITY_INVALID)
+		return;
+
+	drm_printf(m, "prio=%d", sched->priority);
+}
+
 static void print_request(struct drm_printer *m,
 			  struct i915_request *rq,
 			  const char *prefix)
 {
 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
 
-	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
+	drm_printf(m, "%s%x%s [%llx:%x] ",
+		   prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
-		   rq->fence.context, rq->fence.seqno,
-		   rq->priotree.priority,
+		   rq->fence.context, rq->fence.seqno);
+	print_sched(m, rq->i915, &rq->priotree.attr);
+	drm_printf(m, " @ %dms: %s\n",
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 97121230656c..accbbb57b0fe 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->priotree.attr.priority;
 }
 
 static inline int port_prio(const struct execlist_port *port)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 665d9e82e954..632d0a630a83 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -176,7 +176,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->priotree.attr.priority;
 }
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
@@ -1170,11 +1170,13 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 	return engine;
 }
 
-static void execlists_schedule(struct i915_request *request, int prio)
+static void execlists_schedule(struct i915_request *request,
+			       const struct i915_sched_attr *attr)
 {
 	struct intel_engine_cs *engine;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
+	const int prio = attr->priority;
 	LIST_HEAD(dfs);
 
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
@@ -1182,7 +1184,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	if (i915_request_completed(request))
 		return;
 
-	if (prio <= READ_ONCE(request->priotree.priority))
+	if (prio <= READ_ONCE(request->priotree.attr.priority))
 		return;
 
 	/* Need BKL in order to use the temporary link inside i915_dependency */
@@ -1224,8 +1226,8 @@ static void execlists_schedule(struct i915_request *request, int prio)
 			if (i915_priotree_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->priority < pt->priority);
-			if (prio > READ_ONCE(p->signaler->priority))
+			GEM_BUG_ON(p->signaler->attr.priority < pt->attr.priority);
+			if (prio > READ_ONCE(p->signaler->attr.priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
 	}
@@ -1236,9 +1238,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (request->priotree.priority == I915_PRIORITY_INVALID) {
+	if (request->priotree.attr.priority == I915_PRIORITY_INVALID) {
 		GEM_BUG_ON(!list_empty(&request->priotree.link));
-		request->priotree.priority = prio;
+		request->priotree.attr = *attr;
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
 		__list_del_entry(&stack.dfs_link);
@@ -1255,10 +1257,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
 
 		engine = pt_lock_engine(pt, engine);
 
-		if (prio <= pt->priority)
+		if (prio <= pt->attr.priority)
 			continue;
 
-		pt->priority = prio;
+		pt->attr.priority = prio;
 		if (!list_empty(&pt->link)) {
 			__list_del_entry(&pt->link);
 			queue_request(engine, pt, prio);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 256d58487559..5bd0815037a0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,6 +14,7 @@
 #include "intel_gpu_commands.h"
 
 struct drm_printer;
+struct i915_sched_attr;
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -460,7 +461,8 @@ struct intel_engine_cs {
 	 *
 	 * Called under the struct_mutex.
 	 */
-	void		(*schedule)(struct i915_request *request, int priority);
+	void		(*schedule)(struct i915_request *request,
+				    const struct i915_sched_attr *attr);
 
 	/*
 	 * Cancel all requests on the hardware, or queued for execution.
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 24f913f26a7b..f7ee54e109ae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -628,7 +628,7 @@ static int active_engine(void *data)
 		}
 
 		if (arg->flags & TEST_PRIORITY)
-			ctx[idx]->priority =
+			ctx[idx]->sched.priority =
 				i915_prandom_u32_max_state(512, &prng);
 
 		rq[idx] = i915_request_get(new);
@@ -683,7 +683,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			return err;
 
 		if (flags & TEST_PRIORITY)
-			h.ctx->priority = 1024;
+			h.ctx->sched.priority = 1024;
 	}
 
 	for_each_engine(engine, i915, id) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 0481e2e01146..ee7e22d18ff8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -335,12 +335,12 @@ static int live_preempt(void *arg)
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
@@ -407,6 +407,7 @@ static int live_late_preempt(void *arg)
 	struct i915_gem_context *ctx_hi, *ctx_lo;
 	struct spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = {};
 	enum intel_engine_id id;
 	int err = -ENOMEM;
 
@@ -458,7 +459,8 @@ static int live_late_preempt(void *arg)
 			goto err_wedged;
 		}
 
-		engine->schedule(rq, I915_PRIORITY_MAX);
+		attr.priority = I915_PRIORITY_MAX;
+		engine->schedule(rq, &attr);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Pack params to engine->schedule() into a struct (rev2)
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
                   ` (3 preceding siblings ...)
  2018-04-11 23:00 ` [PATCH v2] " Chris Wilson
@ 2018-04-11 23:27 ` Patchwork
  2018-04-11 23:43 ` ✓ Fi.CI.BAT: success " Patchwork
  2018-04-12  3:18 ` ✓ Fi.CI.IGT: " Patchwork
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-04-11 23:27 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Pack params to engine->schedule() into a struct (rev2)
URL   : https://patchwork.freedesktop.org/series/41567/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
748e33a64fe4 drm/i915: Pack params to engine->schedule() into a struct
-:337: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#337: 
new file mode 100644

total: 0 errors, 1 warnings, 0 checks, 502 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: Pack params to engine->schedule() into a struct (rev2)
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
                   ` (4 preceding siblings ...)
  2018-04-11 23:27 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Pack params to engine->schedule() into a struct (rev2) Patchwork
@ 2018-04-11 23:43 ` Patchwork
  2018-04-12  3:18 ` ✓ Fi.CI.IGT: " Patchwork
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-04-11 23:43 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Pack params to engine->schedule() into a struct (rev2)
URL   : https://patchwork.freedesktop.org/series/41567/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4047 -> Patchwork_8675 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8675/

== Known issues ==

  Here are the changes found in Patchwork_8675 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@prime_vgem@basic-fence-flip:
      fi-ilk-650:         PASS -> FAIL (fdo#104008)

    
    ==== Possible fixes ====

    igt@gem_mmap_gtt@basic-small-bo-tiledx:
      fi-gdg-551:         FAIL (fdo#102575) -> PASS

    
  fdo#102575 https://bugs.freedesktop.org/show_bug.cgi?id=102575
  fdo#104008 https://bugs.freedesktop.org/show_bug.cgi?id=104008


== Participating hosts (35 -> 31) ==

  Missing    (4): fi-ctg-p8600 fi-ilk-m540 fi-skl-6700hq fi-pnv-d510 


== Build changes ==

    * Linux: CI_DRM_4047 -> Patchwork_8675

  CI_DRM_4047: b21f4ebec3a255026a780ee4a35ec5f8d9ede6cd @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4423: d502f055ac4500cada758876a512ac4f14b34851 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_8675: 748e33a64fe482e3c732a3d4ec77a828c1b997ab @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4423: 45e115f293fd6acc0c9647cf2d3b76be78819ba5 @ git://anongit.freedesktop.org/piglit


== Linux commits ==

748e33a64fe4 drm/i915: Pack params to engine->schedule() into a struct

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8675/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* ✓ Fi.CI.IGT: success for drm/i915: Pack params to engine->schedule() into a struct (rev2)
  2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
                   ` (5 preceding siblings ...)
  2018-04-11 23:43 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2018-04-12  3:18 ` Patchwork
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-04-12  3:18 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Pack params to engine->schedule() into a struct (rev2)
URL   : https://patchwork.freedesktop.org/series/41567/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4047_full -> Patchwork_8675_full =

== Summary - WARNING ==

  Minor unknown changes coming with Patchwork_8675_full need to be verified
  manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_8675_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8675/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_8675_full:

  === IGT changes ===

    ==== Warnings ====

    igt@gem_mocs_settings@mocs-rc6-blt:
      shard-kbl:          SKIP -> PASS

    
== Known issues ==

  Here are the changes found in Patchwork_8675_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@gem_wait@wait-default:
      shard-kbl:          PASS -> DMESG-WARN (fdo#103558) +1

    igt@kms_cursor_crc@cursor-64x21-onscreen:
      shard-kbl:          PASS -> DMESG-WARN (fdo#103558, fdo#103313) +7

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-pwrite:
      shard-apl:          PASS -> FAIL (fdo#103167)

    
    ==== Possible fixes ====

    igt@kms_flip@2x-plain-flip-fb-recreate-interruptible:
      shard-hsw:          FAIL (fdo#100368) -> PASS

    igt@perf_pmu@rc6-runtime-pm-long:
      shard-kbl:          FAIL (fdo#105010) -> PASS

    
  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#103167 https://bugs.freedesktop.org/show_bug.cgi?id=103167
  fdo#103313 https://bugs.freedesktop.org/show_bug.cgi?id=103313
  fdo#103558 https://bugs.freedesktop.org/show_bug.cgi?id=103558
  fdo#105010 https://bugs.freedesktop.org/show_bug.cgi?id=105010


== Participating hosts (6 -> 4) ==

  Missing    (2): shard-glk shard-glkb 


== Build changes ==

    * Linux: CI_DRM_4047 -> Patchwork_8675

  CI_DRM_4047: b21f4ebec3a255026a780ee4a35ec5f8d9ede6cd @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4423: d502f055ac4500cada758876a512ac4f14b34851 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_8675: 748e33a64fe482e3c732a3d4ec77a828c1b997ab @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4423: 45e115f293fd6acc0c9647cf2d3b76be78819ba5 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8675/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2] drm/i915: Pack params to engine->schedule() into a struct
  2018-04-11 23:00 ` [PATCH v2] " Chris Wilson
@ 2018-04-17  9:12   ` Tvrtko Ursulin
  2018-04-17  9:49     ` Chris Wilson
  0 siblings, 1 reply; 12+ messages in thread
From: Tvrtko Ursulin @ 2018-04-17  9:12 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 12/04/2018 00:00, Chris Wilson wrote:
> Today we only want to pass along the priority to engine->schedule(), but
> in the future we want to have much more control over the various aspects
> of the GPU during a context's execution, for example controlling the
> frequency allowed. As we need an ever growing number of parameters for
> scheduling, move those into a struct for convenience.

Sounds sensible in principle. Do you already have a tree implementing 
ctx freq on top of this so I can get a better idea how exactly it will 
be used?

For instance I am at the moment unsure if frequencies are not enough to 
be stored in the context which is available via requests at all 
scheduling sites.

> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
>   drivers/gpu/drm/i915/i915_drv.h               |  3 +-
>   drivers/gpu/drm/i915/i915_gem.c               | 18 ++---
>   drivers/gpu/drm/i915/i915_gem_context.c       |  8 +--
>   drivers/gpu/drm/i915/i915_gem_context.h       | 13 +---
>   drivers/gpu/drm/i915/i915_gpu_error.c         |  8 +--
>   drivers/gpu/drm/i915/i915_gpu_error.h         |  5 +-
>   drivers/gpu/drm/i915/i915_request.c           |  4 +-
>   drivers/gpu/drm/i915/i915_request.h           | 39 +----------
>   drivers/gpu/drm/i915/i915_scheduler.h         | 65 +++++++++++++++++++

Separating code movement and changes is our usual best practice, so if 
not too hard it would be good to do it like that.

Regards,

Tvrtko

>   drivers/gpu/drm/i915/intel_display.c          |  4 +-
>   drivers/gpu/drm/i915/intel_engine_cs.c        | 18 ++++-
>   drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
>   drivers/gpu/drm/i915/intel_lrc.c              | 20 +++---
>   drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +-
>   .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 +-
>   drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 ++-
>   17 files changed, 133 insertions(+), 92 deletions(-)
>   create mode 100644 drivers/gpu/drm/i915/i915_scheduler.h
> 
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 638abe84857c..f3d21849b0cb 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -1135,7 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
>   		return PTR_ERR(s->shadow_ctx);
>   
>   	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
> -		s->shadow_ctx->priority = INT_MAX;
> +		s->shadow_ctx->sched.priority = INT_MAX;
>   
>   	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
>   
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 649c0f2f3bae..fad860f54386 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -75,6 +75,7 @@
>   #include "i915_gem_timeline.h"
>   #include "i915_gpu_error.h"
>   #include "i915_request.h"
> +#include "i915_scheduler.h"
>   #include "i915_vma.h"
>   
>   #include "intel_gvt.h"
> @@ -3159,7 +3160,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
>   			 struct intel_rps_client *rps);
>   int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   				  unsigned int flags,
> -				  int priority);
> +				  const struct i915_sched_attr *attr);
>   #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
>   
>   int __must_check
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 60cf7cfc24ee..61e6be84362f 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -563,7 +563,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
>   	return timeout;
>   }
>   
> -static void __fence_set_priority(struct dma_fence *fence, int prio)
> +static void __fence_set_priority(struct dma_fence *fence,
> +				 const struct i915_sched_attr *attr)
>   {
>   	struct i915_request *rq;
>   	struct intel_engine_cs *engine;
> @@ -576,11 +577,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
>   
>   	rcu_read_lock();
>   	if (engine->schedule)
> -		engine->schedule(rq, prio);
> +		engine->schedule(rq, attr);
>   	rcu_read_unlock();
>   }
>   
> -static void fence_set_priority(struct dma_fence *fence, int prio)
> +static void fence_set_priority(struct dma_fence *fence,
> +			       const struct i915_sched_attr *attr)
>   {
>   	/* Recurse once into a fence-array */
>   	if (dma_fence_is_array(fence)) {
> @@ -588,16 +590,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
>   		int i;
>   
>   		for (i = 0; i < array->num_fences; i++)
> -			__fence_set_priority(array->fences[i], prio);
> +			__fence_set_priority(array->fences[i], attr);
>   	} else {
> -		__fence_set_priority(fence, prio);
> +		__fence_set_priority(fence, attr);
>   	}
>   }
>   
>   int
>   i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   			      unsigned int flags,
> -			      int prio)
> +			      const struct i915_sched_attr *attr)
>   {
>   	struct dma_fence *excl;
>   
> @@ -612,7 +614,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   			return ret;
>   
>   		for (i = 0; i < count; i++) {
> -			fence_set_priority(shared[i], prio);
> +			fence_set_priority(shared[i], attr);
>   			dma_fence_put(shared[i]);
>   		}
>   
> @@ -622,7 +624,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   	}
>   
>   	if (excl) {
> -		fence_set_priority(excl, prio);
> +		fence_set_priority(excl, attr);
>   		dma_fence_put(excl);
>   	}
>   	return 0;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 5cfac0255758..143e6bf0d545 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -280,7 +280,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
>   	kref_init(&ctx->ref);
>   	list_add_tail(&ctx->link, &dev_priv->contexts.list);
>   	ctx->i915 = dev_priv;
> -	ctx->priority = I915_PRIORITY_NORMAL;
> +	ctx->sched.priority = I915_PRIORITY_NORMAL;
>   
>   	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
>   	INIT_LIST_HEAD(&ctx->handles_list);
> @@ -430,7 +430,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
>   		return ctx;
>   
>   	i915_gem_context_clear_bannable(ctx);
> -	ctx->priority = prio;
> +	ctx->sched.priority = prio;
>   	ctx->ring_size = PAGE_SIZE;
>   
>   	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
> @@ -747,7 +747,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>   		args->value = i915_gem_context_is_bannable(ctx);
>   		break;
>   	case I915_CONTEXT_PARAM_PRIORITY:
> -		args->value = ctx->priority;
> +		args->value = ctx->sched.priority;
>   		break;
>   	default:
>   		ret = -EINVAL;
> @@ -820,7 +820,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
>   				 !capable(CAP_SYS_NICE))
>   				ret = -EPERM;
>   			else
> -				ctx->priority = priority;
> +				ctx->sched.priority = priority;
>   		}
>   		break;
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
> index 7854262ddfd9..b12a8a8c5af9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/i915_gem_context.h
> @@ -137,18 +137,7 @@ struct i915_gem_context {
>   	 */
>   	u32 user_handle;
>   
> -	/**
> -	 * @priority: execution and service priority
> -	 *
> -	 * All clients are equal, but some are more equal than others!
> -	 *
> -	 * Requests from a context with a greater (more positive) value of
> -	 * @priority will be executed before those with a lower @priority
> -	 * value, forming a simple QoS.
> -	 *
> -	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
> -	 */
> -	int priority;
> +	struct i915_sched_attr sched;
>   
>   	/** ggtt_offset_bias: placement restriction for context objects */
>   	u32 ggtt_offset_bias;
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index effaf982b19b..35cf001ef26b 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -411,7 +411,7 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
>   
>   	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
>   		   prefix, erq->pid, erq->ban_score,
> -		   erq->context, erq->seqno, erq->priority,
> +		   erq->context, erq->seqno, erq->sched.priority,
>   		   jiffies_to_msecs(jiffies - erq->jiffies),
>   		   erq->head, erq->tail);
>   }
> @@ -422,7 +422,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
>   {
>   	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
>   		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
> -		   ctx->priority, ctx->ban_score, bannable(ctx),
> +		   ctx->sched.priority, ctx->ban_score, bannable(ctx),
>   		   ctx->guilty, ctx->active);
>   }
>   
> @@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
>   			   struct drm_i915_error_request *erq)
>   {
>   	erq->context = request->ctx->hw_id;
> -	erq->priority = request->priotree.priority;
> +	erq->sched = request->priotree.attr;
>   	erq->ban_score = atomic_read(&request->ctx->ban_score);
>   	erq->seqno = request->global_seqno;
>   	erq->jiffies = request->emitted_jiffies;
> @@ -1372,7 +1372,7 @@ static void record_context(struct drm_i915_error_context *e,
>   
>   	e->handle = ctx->user_handle;
>   	e->hw_id = ctx->hw_id;
> -	e->priority = ctx->priority;
> +	e->sched = ctx->sched;
>   	e->ban_score = atomic_read(&ctx->ban_score);
>   	e->bannable = i915_gem_context_is_bannable(ctx);
>   	e->guilty = atomic_read(&ctx->guilty_count);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index c05b6034d718..cbf33aa59b22 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -20,6 +20,7 @@
>   #include "i915_gem.h"
>   #include "i915_gem_gtt.h"
>   #include "i915_params.h"
> +#include "i915_scheduler.h"
>   
>   struct drm_i915_private;
>   struct intel_overlay_error_state;
> @@ -122,11 +123,11 @@ struct i915_gpu_state {
>   			pid_t pid;
>   			u32 handle;
>   			u32 hw_id;
> -			int priority;
>   			int ban_score;
>   			int active;
>   			int guilty;
>   			bool bannable;
> +			struct i915_sched_attr sched;
>   		} context;
>   
>   		struct drm_i915_error_object {
> @@ -147,11 +148,11 @@ struct i915_gpu_state {
>   			long jiffies;
>   			pid_t pid;
>   			u32 context;
> -			int priority;
>   			int ban_score;
>   			u32 seqno;
>   			u32 head;
>   			u32 tail;
> +			struct i915_sched_attr sched;
>   		} *requests, execlist[EXECLIST_MAX_PORTS];
>   		unsigned int num_ports;
>   
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 9ca9c24b4421..e1675d44ac92 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -191,7 +191,7 @@ i915_priotree_init(struct i915_priotree *pt)
>   	INIT_LIST_HEAD(&pt->signalers_list);
>   	INIT_LIST_HEAD(&pt->waiters_list);
>   	INIT_LIST_HEAD(&pt->link);
> -	pt->priority = I915_PRIORITY_INVALID;
> +	pt->attr.priority = I915_PRIORITY_INVALID;
>   }
>   
>   static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
> @@ -1062,7 +1062,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
>   	 */
>   	rcu_read_lock();
>   	if (engine->schedule)
> -		engine->schedule(request, request->ctx->priority);
> +		engine->schedule(request, &request->ctx->sched);
>   	rcu_read_unlock();
>   
>   	local_bh_disable();
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 7d6eb82eeb91..5435878addd6 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -29,6 +29,7 @@
>   
>   #include "i915_gem.h"
>   #include "i915_sw_fence.h"
> +#include "i915_scheduler.h"
>   
>   #include <uapi/drm/i915_drm.h>
>   
> @@ -48,44 +49,6 @@ struct intel_signal_node {
>   	struct list_head link;
>   };
>   
> -struct i915_dependency {
> -	struct i915_priotree *signaler;
> -	struct list_head signal_link;
> -	struct list_head wait_link;
> -	struct list_head dfs_link;
> -	unsigned long flags;
> -#define I915_DEPENDENCY_ALLOC BIT(0)
> -};
> -
> -/*
> - * "People assume that time is a strict progression of cause to effect, but
> - * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
> - * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
> - *
> - * Requests exist in a complex web of interdependencies. Each request
> - * has to wait for some other request to complete before it is ready to be run
> - * (e.g. we have to wait until the pixels have been rendering into a texture
> - * before we can copy from it). We track the readiness of a request in terms
> - * of fences, but we also need to keep the dependency tree for the lifetime
> - * of the request (beyond the life of an individual fence). We use the tree
> - * at various points to reorder the requests whilst keeping the requests
> - * in order with respect to their various dependencies.
> - */
> -struct i915_priotree {
> -	struct list_head signalers_list; /* those before us, we depend upon */
> -	struct list_head waiters_list; /* those after us, they depend upon us */
> -	struct list_head link;
> -	int priority;
> -};
> -
> -enum {
> -	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
> -	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
> -	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
> -
> -	I915_PRIORITY_INVALID = INT_MIN
> -};
> -
>   struct i915_capture_list {
>   	struct i915_capture_list *next;
>   	struct i915_vma *vma;
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
> new file mode 100644
> index 000000000000..0664caae314b
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/i915_scheduler.h
> @@ -0,0 +1,65 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2018 Intel Corporation
> + */
> +
> +#ifndef _I915_SCHEDULER_H_
> +#define _I915_SCHEDULER_H_
> +
> +#include <uapi/drm/i915_drm.h>
> +
> +enum {
> +	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
> +	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
> +	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
> +
> +	I915_PRIORITY_INVALID = INT_MIN
> +};
> +
> +struct i915_sched_attr {
> +	/**
> +	 * @priority: execution and service priority
> +	 *
> +	 * All clients are equal, but some are more equal than others!
> +	 *
> +	 * Requests from a context with a greater (more positive) value of
> +	 * @priority will be executed before those with a lower @priority
> +	 * value, forming a simple QoS.
> +	 *
> +	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
> +	 */
> +	int priority;
> +};
> +
> +/*
> + * "People assume that time is a strict progression of cause to effect, but
> + * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
> + * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
> + *
> + * Requests exist in a complex web of interdependencies. Each request
> + * has to wait for some other request to complete before it is ready to be run
> + * (e.g. we have to wait until the pixels have been rendering into a texture
> + * before we can copy from it). We track the readiness of a request in terms
> + * of fences, but we also need to keep the dependency tree for the lifetime
> + * of the request (beyond the life of an individual fence). We use the tree
> + * at various points to reorder the requests whilst keeping the requests
> + * in order with respect to their various dependencies.
> + */
> +struct i915_priotree {
> +	struct list_head signalers_list; /* those before us, we depend upon */
> +	struct list_head waiters_list; /* those after us, they depend upon us */
> +	struct list_head link;
> +	struct i915_sched_attr attr;
> +};
> +
> +struct i915_dependency {
> +	struct i915_priotree *signaler;
> +	struct list_head signal_link;
> +	struct list_head wait_link;
> +	struct list_head dfs_link;
> +	unsigned long flags;
> +#define I915_DEPENDENCY_ALLOC BIT(0)
> +};
> +
> +#endif /* _I915_SCHEDULER_H_ */
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 020900e08d42..7c34b8c854be 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12839,7 +12839,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
>   
>   	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
>   
> -	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
> +	i915_gem_object_wait_priority(obj, 0, &(struct i915_sched_attr){
> +				      .priority = I915_PRIORITY_DISPLAY,
> +				      });
>   
>   	mutex_unlock(&dev_priv->drm.struct_mutex);
>   	i915_gem_object_unpin_pages(obj);
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index a217b3fe5f0b..7cc2892bf93b 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -1744,17 +1744,29 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
>   	return which;
>   }
>   
> +static void print_sched(struct drm_printer *m,
> +			const struct drm_i915_private *i915,
> +			const struct i915_sched_attr *sched)
> +{
> +	if (sched->priority == I915_PRIORITY_INVALID)
> +		return;
> +
> +	drm_printf(m, "prio=%d", sched->priority);
> +}
> +
>   static void print_request(struct drm_printer *m,
>   			  struct i915_request *rq,
>   			  const char *prefix)
>   {
>   	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
>   
> -	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
> +	drm_printf(m, "%s%x%s [%llx:%x] ",
> +		   prefix,
>   		   rq->global_seqno,
>   		   i915_request_completed(rq) ? "!" : "",
> -		   rq->fence.context, rq->fence.seqno,
> -		   rq->priotree.priority,
> +		   rq->fence.context, rq->fence.seqno);
> +	print_sched(m, rq->i915, &rq->priotree.attr);
> +	drm_printf(m, " @ %dms: %s\n",
>   		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
>   		   name);
>   }
> diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
> index 97121230656c..accbbb57b0fe 100644
> --- a/drivers/gpu/drm/i915/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/intel_guc_submission.c
> @@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
>   
>   static inline int rq_prio(const struct i915_request *rq)
>   {
> -	return rq->priotree.priority;
> +	return rq->priotree.attr.priority;
>   }
>   
>   static inline int port_prio(const struct execlist_port *port)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 665d9e82e954..632d0a630a83 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -176,7 +176,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
>   
>   static inline int rq_prio(const struct i915_request *rq)
>   {
> -	return rq->priotree.priority;
> +	return rq->priotree.attr.priority;
>   }
>   
>   static inline bool need_preempt(const struct intel_engine_cs *engine,
> @@ -1170,11 +1170,13 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
>   	return engine;
>   }
>   
> -static void execlists_schedule(struct i915_request *request, int prio)
> +static void execlists_schedule(struct i915_request *request,
> +			       const struct i915_sched_attr *attr)
>   {
>   	struct intel_engine_cs *engine;
>   	struct i915_dependency *dep, *p;
>   	struct i915_dependency stack;
> +	const int prio = attr->priority;
>   	LIST_HEAD(dfs);
>   
>   	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
> @@ -1182,7 +1184,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   	if (i915_request_completed(request))
>   		return;
>   
> -	if (prio <= READ_ONCE(request->priotree.priority))
> +	if (prio <= READ_ONCE(request->priotree.attr.priority))
>   		return;
>   
>   	/* Need BKL in order to use the temporary link inside i915_dependency */
> @@ -1224,8 +1226,8 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   			if (i915_priotree_signaled(p->signaler))
>   				continue;
>   
> -			GEM_BUG_ON(p->signaler->priority < pt->priority);
> -			if (prio > READ_ONCE(p->signaler->priority))
> +			GEM_BUG_ON(p->signaler->attr.priority < pt->attr.priority);
> +			if (prio > READ_ONCE(p->signaler->attr.priority))
>   				list_move_tail(&p->dfs_link, &dfs);
>   		}
>   	}
> @@ -1236,9 +1238,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   	 * execlists_submit_request()), we can set our own priority and skip
>   	 * acquiring the engine locks.
>   	 */
> -	if (request->priotree.priority == I915_PRIORITY_INVALID) {
> +	if (request->priotree.attr.priority == I915_PRIORITY_INVALID) {
>   		GEM_BUG_ON(!list_empty(&request->priotree.link));
> -		request->priotree.priority = prio;
> +		request->priotree.attr = *attr;
>   		if (stack.dfs_link.next == stack.dfs_link.prev)
>   			return;
>   		__list_del_entry(&stack.dfs_link);
> @@ -1255,10 +1257,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   
>   		engine = pt_lock_engine(pt, engine);
>   
> -		if (prio <= pt->priority)
> +		if (prio <= pt->attr.priority)
>   			continue;
>   
> -		pt->priority = prio;
> +		pt->attr.priority = prio;
>   		if (!list_empty(&pt->link)) {
>   			__list_del_entry(&pt->link);
>   			queue_request(engine, pt, prio);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 256d58487559..5bd0815037a0 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -14,6 +14,7 @@
>   #include "intel_gpu_commands.h"
>   
>   struct drm_printer;
> +struct i915_sched_attr;
>   
>   #define I915_CMD_HASH_ORDER 9
>   
> @@ -460,7 +461,8 @@ struct intel_engine_cs {
>   	 *
>   	 * Called under the struct_mutex.
>   	 */
> -	void		(*schedule)(struct i915_request *request, int priority);
> +	void		(*schedule)(struct i915_request *request,
> +				    const struct i915_sched_attr *attr);
>   
>   	/*
>   	 * Cancel all requests on the hardware, or queued for execution.
> diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> index 24f913f26a7b..f7ee54e109ae 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> @@ -628,7 +628,7 @@ static int active_engine(void *data)
>   		}
>   
>   		if (arg->flags & TEST_PRIORITY)
> -			ctx[idx]->priority =
> +			ctx[idx]->sched.priority =
>   				i915_prandom_u32_max_state(512, &prng);
>   
>   		rq[idx] = i915_request_get(new);
> @@ -683,7 +683,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
>   			return err;
>   
>   		if (flags & TEST_PRIORITY)
> -			h.ctx->priority = 1024;
> +			h.ctx->sched.priority = 1024;
>   	}
>   
>   	for_each_engine(engine, i915, id) {
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index 0481e2e01146..ee7e22d18ff8 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -335,12 +335,12 @@ static int live_preempt(void *arg)
>   	ctx_hi = kernel_context(i915);
>   	if (!ctx_hi)
>   		goto err_spin_lo;
> -	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
> +	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
>   
>   	ctx_lo = kernel_context(i915);
>   	if (!ctx_lo)
>   		goto err_ctx_hi;
> -	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
> +	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
>   
>   	for_each_engine(engine, i915, id) {
>   		struct i915_request *rq;
> @@ -407,6 +407,7 @@ static int live_late_preempt(void *arg)
>   	struct i915_gem_context *ctx_hi, *ctx_lo;
>   	struct spinner spin_hi, spin_lo;
>   	struct intel_engine_cs *engine;
> +	struct i915_sched_attr attr = {};
>   	enum intel_engine_id id;
>   	int err = -ENOMEM;
>   
> @@ -458,7 +459,8 @@ static int live_late_preempt(void *arg)
>   			goto err_wedged;
>   		}
>   
> -		engine->schedule(rq, I915_PRIORITY_MAX);
> +		attr.priority = I915_PRIORITY_MAX;
> +		engine->schedule(rq, &attr);
>   
>   		if (!wait_for_spinner(&spin_hi, rq)) {
>   			pr_err("High priority context failed to preempt the low priority context\n");
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2] drm/i915: Pack params to engine->schedule() into a struct
  2018-04-17  9:12   ` Tvrtko Ursulin
@ 2018-04-17  9:49     ` Chris Wilson
  0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-04-17  9:49 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2018-04-17 10:12:05)
> 
> On 12/04/2018 00:00, Chris Wilson wrote:
> > Today we only want to pass along the priority to engine->schedule(), but
> > in the future we want to have much more control over the various aspects
> > of the GPU during a context's execution, for example controlling the
> > frequency allowed. As we need an ever growing number of parameters for
> > scheduling, move those into a struct for convenience.
> 
> Sounds sensible in principle. Do you already have a tree implementing 
> ctx freq on top of this so I can get a better idea how exactly it will 
> be used?

It's because I want to pass in frequency adjustments from the caller and
percolate those through the dependencies, rather than pull them from the
context at that later timer. It also gets around the thorny issue of what
to do when the context is changed at runtime. What I'm playing around with is
https://cgit.freedesktop.org/~ickle/linux-2.6/commit/?h=prescheduler&id=e734f100cd74b340ff9675811268a30cbe9a7959
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v2] drm/i915: Pack params to engine->schedule() into a struct
  2018-04-18  9:41 ` [PATCH v2] " Chris Wilson
@ 2018-04-18 10:32   ` Joonas Lahtinen
  0 siblings, 0 replies; 12+ messages in thread
From: Joonas Lahtinen @ 2018-04-18 10:32 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Quoting Chris Wilson (2018-04-18 12:41:49)
> Today we only want to pass along the priority to engine->schedule(), but
> in the future we want to have much more control over the various aspects
> of the GPU during a context's execution, for example controlling the
> frequency allowed. As we need an ever growing number of parameters for
> scheduling, move those into a struct for convenience.
> 
> v2: Move the anonymous struct into its own function for legibility and
> ye olde gcc.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v2] drm/i915: Pack params to engine->schedule() into a struct
  2018-04-17 14:31 [PATCH v2 3/3] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
@ 2018-04-18  9:41 ` Chris Wilson
  2018-04-18 10:32   ` Joonas Lahtinen
  0 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2018-04-18  9:41 UTC (permalink / raw)
  To: intel-gfx

Today we only want to pass along the priority to engine->schedule(), but
in the future we want to have much more control over the various aspects
of the GPU during a context's execution, for example controlling the
frequency allowed. As we need an ever growing number of parameters for
scheduling, move those into a struct for convenience.

v2: Move the anonymous struct into its own function for legibility and
ye olde gcc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c               | 18 +++++++++--------
 drivers/gpu/drm/i915/i915_gem_context.c       |  8 ++++----
 drivers/gpu/drm/i915/i915_gem_context.h       | 13 +-----------
 drivers/gpu/drm/i915/i915_gpu_error.c         |  8 ++++----
 drivers/gpu/drm/i915/i915_gpu_error.h         |  5 +++--
 drivers/gpu/drm/i915/i915_request.c           |  4 ++--
 drivers/gpu/drm/i915/i915_request.h           |  1 +
 drivers/gpu/drm/i915/i915_scheduler.h         | 17 +++++++++++++++-
 drivers/gpu/drm/i915/intel_display.c          | 11 +++++++++-
 drivers/gpu/drm/i915/intel_engine_cs.c        | 18 ++++++++++++++---
 drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 20 ++++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +++-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 ++--
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 +++++---
 17 files changed, 91 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..f3d21849b0cb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1135,7 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		return PTR_ERR(s->shadow_ctx);
 
 	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
-		s->shadow_ctx->priority = INT_MAX;
+		s->shadow_ctx->sched.priority = INT_MAX;
 
 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8e8667d9b084..028691108125 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -75,6 +75,7 @@
 #include "i915_gem_timeline.h"
 #include "i915_gpu_error.h"
 #include "i915_request.h"
+#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -3158,7 +3159,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 struct intel_rps_client *rps);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
-				  int priority);
+				  const struct i915_sched_attr *attr);
 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
 
 int __must_check
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c9d2a6f7d28..795ca83aed7a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -564,7 +564,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+				 const struct i915_sched_attr *attr)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -577,11 +578,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(rq, prio);
+		engine->schedule(rq, attr);
 	rcu_read_unlock();
 }
 
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+			       const struct i915_sched_attr *attr)
 {
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
@@ -589,16 +591,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], prio);
+			__fence_set_priority(array->fences[i], attr);
 	} else {
-		__fence_set_priority(fence, prio);
+		__fence_set_priority(fence, attr);
 	}
 }
 
 int
 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
-			      int prio)
+			      const struct i915_sched_attr *attr)
 {
 	struct dma_fence *excl;
 
@@ -613,7 +615,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], prio);
+			fence_set_priority(shared[i], attr);
 			dma_fence_put(shared[i]);
 		}
 
@@ -623,7 +625,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, prio);
+		fence_set_priority(excl, attr);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 9b3834a846e8..74435affe23f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -281,7 +281,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_PRIORITY_NORMAL;
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
@@ -431,7 +431,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 		return ctx;
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->priority = prio;
+	ctx->sched.priority = prio;
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -753,7 +753,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->priority;
+		args->value = ctx->sched.priority;
 		break;
 	default:
 		ret = -EINVAL;
@@ -826,7 +826,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->priority = priority;
+				ctx->sched.priority = priority;
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7854262ddfd9..b12a8a8c5af9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -137,18 +137,7 @@ struct i915_gem_context {
 	 */
 	u32 user_handle;
 
-	/**
-	 * @priority: execution and service priority
-	 *
-	 * All clients are equal, but some are more equal than others!
-	 *
-	 * Requests from a context with a greater (more positive) value of
-	 * @priority will be executed before those with a lower @priority
-	 * value, forming a simple QoS.
-	 *
-	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
-	 */
-	int priority;
+	struct i915_sched_attr sched;
 
 	/** ggtt_offset_bias: placement restriction for context objects */
 	u32 ggtt_offset_bias;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 6b5b9b3ded02..671ffa37614e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
 
 	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
-		   erq->context, erq->seqno, erq->priority,
+		   erq->context, erq->seqno, erq->sched_attr.priority,
 		   jiffies_to_msecs(jiffies - erq->jiffies),
 		   erq->head, erq->tail);
 }
@@ -422,7 +422,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
 {
 	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-		   ctx->priority, ctx->ban_score, bannable(ctx),
+		   ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
 		   ctx->guilty, ctx->active);
 }
 
@@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
 	erq->context = request->ctx->hw_id;
-	erq->priority = request->sched.priority;
+	erq->sched_attr = request->sched.attr;
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
@@ -1372,7 +1372,7 @@ static void record_context(struct drm_i915_error_context *e,
 
 	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
-	e->priority = ctx->priority;
+	e->sched_attr = ctx->sched;
 	e->ban_score = atomic_read(&ctx->ban_score);
 	e->bannable = i915_gem_context_is_bannable(ctx);
 	e->guilty = atomic_read(&ctx->guilty_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index c05b6034d718..5d6fdcbc092c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -20,6 +20,7 @@
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_params.h"
+#include "i915_scheduler.h"
 
 struct drm_i915_private;
 struct intel_overlay_error_state;
@@ -122,11 +123,11 @@ struct i915_gpu_state {
 			pid_t pid;
 			u32 handle;
 			u32 hw_id;
-			int priority;
 			int ban_score;
 			int active;
 			int guilty;
 			bool bannable;
+			struct i915_sched_attr sched_attr;
 		} context;
 
 		struct drm_i915_error_object {
@@ -147,11 +148,11 @@ struct i915_gpu_state {
 			long jiffies;
 			pid_t pid;
 			u32 context;
-			int priority;
 			int ban_score;
 			u32 seqno;
 			u32 head;
 			u32 tail;
+			struct i915_sched_attr sched_attr;
 		} *requests, execlist[EXECLIST_MAX_PORTS];
 		unsigned int num_ports;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 0939c120b82c..c3a908436510 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -191,7 +191,7 @@ i915_sched_init(struct i915_sched *pt)
 	INIT_LIST_HEAD(&pt->signalers_list);
 	INIT_LIST_HEAD(&pt->waiters_list);
 	INIT_LIST_HEAD(&pt->link);
-	pt->priority = I915_PRIORITY_INVALID;
+	pt->attr.priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -1062,7 +1062,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 */
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(request, request->ctx->priority);
+		engine->schedule(request, &request->ctx->sched);
 	rcu_read_unlock();
 
 	local_bh_disable();
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 5d6619a245ba..701ee8c7325c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -30,6 +30,7 @@
 #include "i915_gem.h"
 #include "i915_scheduler.h"
 #include "i915_sw_fence.h"
+#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index b34fca3ba17f..4cae1edeb40d 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -19,6 +19,21 @@ enum {
 	I915_PRIORITY_INVALID = INT_MIN
 };
 
+struct i915_sched_attr {
+	/**
+	 * @priority: execution and service priority
+	 *
+	 * All clients are equal, but some are more equal than others!
+	 *
+	 * Requests from a context with a greater (more positive) value of
+	 * @priority will be executed before those with a lower @priority
+	 * value, forming a simple QoS.
+	 *
+	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
+	 */
+	int priority;
+};
+
 /*
  * "People assume that time is a strict progression of cause to effect, but
  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
@@ -37,7 +52,7 @@ struct i915_sched {
 	struct list_head signalers_list; /* those before us, we depend upon */
 	struct list_head waiters_list; /* those after us, they depend upon us */
 	struct list_head link;
-	int priority;
+	struct i915_sched_attr attr;
 };
 
 struct i915_dependency {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 020900e08d42..5fb00f1fa2a1 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12763,6 +12763,15 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
 		intel_unpin_fb_vma(vma, old_plane_state->flags);
 }
 
+static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
+{
+	struct i915_sched_attr attr = {
+		.priority = I915_PRIORITY_DISPLAY,
+	};
+
+	i915_gem_object_wait_priority(obj, 0, &attr);
+}
+
 /**
  * intel_prepare_plane_fb - Prepare fb for usage on plane
  * @plane: drm plane to prepare for
@@ -12839,7 +12848,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+	fb_obj_bump_render_priority(obj);
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index b542b1a4dddc..be608f7111f5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1113,17 +1113,29 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 	return which;
 }
 
+static void print_sched_attr(struct drm_printer *m,
+			     const struct drm_i915_private *i915,
+			     const struct i915_sched_attr *attr)
+{
+	if (attr->priority == I915_PRIORITY_INVALID)
+		return;
+
+	drm_printf(m, "prio=%d", attr->priority);
+}
+
 static void print_request(struct drm_printer *m,
 			  struct i915_request *rq,
 			  const char *prefix)
 {
 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
 
-	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
+	drm_printf(m, "%s%x%s [%llx:%x] ",
+		   prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
-		   rq->fence.context, rq->fence.seqno,
-		   rq->sched.priority,
+		   rq->fence.context, rq->fence.seqno);
+	print_sched_attr(m, rq->i915, &rq->sched.attr);
+	drm_printf(m, " @ %dms: %s\n",
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 0755f5cae950..02da05875aa7 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.priority;
+	return rq->sched.attr.priority;
 }
 
 static inline int port_prio(const struct execlist_port *port)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 01f356cb3e25..6be8ccd18b72 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -177,7 +177,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->sched.priority;
+	return rq->sched.attr.priority;
 }
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
@@ -1172,11 +1172,13 @@ sched_lock_engine(struct i915_sched *sched, struct intel_engine_cs *locked)
 	return engine;
 }
 
-static void execlists_schedule(struct i915_request *request, int prio)
+static void execlists_schedule(struct i915_request *request,
+			       const struct i915_sched_attr *attr)
 {
 	struct intel_engine_cs *engine;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
+	const int prio = attr->priority;
 	LIST_HEAD(dfs);
 
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
@@ -1184,7 +1186,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	if (i915_request_completed(request))
 		return;
 
-	if (prio <= READ_ONCE(request->sched.priority))
+	if (prio <= READ_ONCE(request->sched.attr.priority))
 		return;
 
 	/* Need BKL in order to use the temporary link inside i915_dependency */
@@ -1226,8 +1228,8 @@ static void execlists_schedule(struct i915_request *request, int prio)
 			if (i915_sched_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->priority < sched->priority);
-			if (prio > READ_ONCE(p->signaler->priority))
+			GEM_BUG_ON(p->signaler->attr.priority < sched->attr.priority);
+			if (prio > READ_ONCE(p->signaler->attr.priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
 	}
@@ -1238,9 +1240,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (request->sched.priority == I915_PRIORITY_INVALID) {
+	if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
 		GEM_BUG_ON(!list_empty(&request->sched.link));
-		request->sched.priority = prio;
+		request->sched.attr = *attr;
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
 		__list_del_entry(&stack.dfs_link);
@@ -1257,10 +1259,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
 
 		engine = sched_lock_engine(sched, engine);
 
-		if (prio <= sched->priority)
+		if (prio <= sched->attr.priority)
 			continue;
 
-		sched->priority = prio;
+		sched->attr.priority = prio;
 		if (!list_empty(&sched->link)) {
 			__list_del_entry(&sched->link);
 			queue_request(engine, sched, prio);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 717041640135..c5e27905b0e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,6 +14,7 @@
 #include "intel_gpu_commands.h"
 
 struct drm_printer;
+struct i915_sched_attr;
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -460,7 +461,8 @@ struct intel_engine_cs {
 	 *
 	 * Called under the struct_mutex.
 	 */
-	void		(*schedule)(struct i915_request *request, int priority);
+	void		(*schedule)(struct i915_request *request,
+				    const struct i915_sched_attr *attr);
 
 	/*
 	 * Cancel all requests on the hardware, or queued for execution.
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 24f913f26a7b..f7ee54e109ae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -628,7 +628,7 @@ static int active_engine(void *data)
 		}
 
 		if (arg->flags & TEST_PRIORITY)
-			ctx[idx]->priority =
+			ctx[idx]->sched.priority =
 				i915_prandom_u32_max_state(512, &prng);
 
 		rq[idx] = i915_request_get(new);
@@ -683,7 +683,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
 			return err;
 
 		if (flags & TEST_PRIORITY)
-			h.ctx->priority = 1024;
+			h.ctx->sched.priority = 1024;
 	}
 
 	for_each_engine(engine, i915, id) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 0481e2e01146..ee7e22d18ff8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -335,12 +335,12 @@ static int live_preempt(void *arg)
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
@@ -407,6 +407,7 @@ static int live_late_preempt(void *arg)
 	struct i915_gem_context *ctx_hi, *ctx_lo;
 	struct spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = {};
 	enum intel_engine_id id;
 	int err = -ENOMEM;
 
@@ -458,7 +459,8 @@ static int live_late_preempt(void *arg)
 			goto err_wedged;
 		}
 
-		engine->schedule(rq, I915_PRIORITY_MAX);
+		attr.priority = I915_PRIORITY_MAX;
+		engine->schedule(rq, &attr);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-04-18 10:32 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-11 17:35 [PATCH] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
2018-04-11 18:23 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2018-04-11 18:39 ` ✓ Fi.CI.BAT: success " Patchwork
2018-04-11 21:51 ` ✗ Fi.CI.IGT: failure " Patchwork
2018-04-11 23:00 ` [PATCH v2] " Chris Wilson
2018-04-17  9:12   ` Tvrtko Ursulin
2018-04-17  9:49     ` Chris Wilson
2018-04-11 23:27 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Pack params to engine->schedule() into a struct (rev2) Patchwork
2018-04-11 23:43 ` ✓ Fi.CI.BAT: success " Patchwork
2018-04-12  3:18 ` ✓ Fi.CI.IGT: " Patchwork
2018-04-17 14:31 [PATCH v2 3/3] drm/i915: Pack params to engine->schedule() into a struct Chris Wilson
2018-04-18  9:41 ` [PATCH v2] " Chris Wilson
2018-04-18 10:32   ` Joonas Lahtinen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.