All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH] drm/i915: make context status notifier head be per engine
@ 2017-03-02 11:23 changbin.du
  2017-03-02 11:47 ` ✗ Fi.CI.BAT: warning for " Patchwork
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: changbin.du @ 2017-03-02 11:23 UTC (permalink / raw)
  To: intel-gfx, daniel.vetter, chris

From: Changbin Du <changbin.du@intel.com>

hi, Daniel, Chris and All,
As you know, GVTg introduced the context status notifier to schedule
the GVTg workload. At that time, the notifier is bound to GVTg context
only, so GVTg is not aware of host workloads.

Now we are going to improve GVTg's guest workload scheduler policy,
and add Guc emulation support for new Gen graphics. Both these two
features require acknowledgment for all contexts running on hardware.
(But will not alter host workload.) So here try to make some change.

The change is easy:
  1. Move the context status notifier head from i915_gem_context to
     intel_engine_cs. Which means there is a notifier head per engine
     instead of per context. Execlist driver still call notifier for
     each context sched-in/out events of current engine.
  2. At GVTg side, it binds a notifier_block for each physical engine
     at GVTg initialization period. Then GVTg can hear all context
     status events.

In this RFC patch, GVTg do nothing for host context event, but later
will add a function there. But in any case, the notifier callback is
a noop if this is no active vGPU.

Since intel_gvt_init() is called at early initialization stage and
require the status notifier head has been initiated, I initiate it in
intel_engine_setup(). I am not sure if this is the right place, please
correct me if wrong. Thank you! Look forward to your feedback. :)

Signed-off-by: Changbin Du <changbin.du@intel.com>
---
 drivers/gpu/drm/i915/gvt/gvt.h          |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c    | 46 ++++++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem_context.c |  1 -
 drivers/gpu/drm/i915/i915_gem_context.h |  3 ---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  2 ++
 drivers/gpu/drm/i915/intel_lrc.c        |  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +++
 7 files changed, 28 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index a41b322..6c1ec7d6 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
 	atomic_t running_workload_num;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
-	struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 	struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
 	struct intel_gvt_gtt gtt;
 	struct intel_gvt_opregion opregion;
 	struct intel_gvt_workload_scheduler scheduler;
+	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
 	struct intel_vgpu_type *types;
 	unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e355a82..1cb90b8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -127,15 +127,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	return 0;
 }
 
+
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-			struct intel_vgpu, shadow_ctx_notifier_block);
-	struct drm_i915_gem_request *req =
-		(struct drm_i915_gem_request *)data;
-	struct intel_gvt_workload_scheduler *scheduler =
-		&vgpu->gvt->scheduler;
+	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+				shadow_ctx_notifier_block[req->engine->id]);
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload =
 		scheduler->current_workload[req->engine->id];
 
@@ -496,15 +495,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 
 	gvt_dbg_core("clean workload scheduler\n");
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (scheduler->thread[i]) {
-			kthread_stop(scheduler->thread[i]);
-			scheduler->thread[i] = NULL;
-		}
+	for_each_engine(engine, gvt->dev_priv, i) {
+		atomic_notifier_chain_unregister(
+					&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
+		kthread_stop(scheduler->thread[i]);
 	}
 }
 
@@ -512,18 +512,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct workload_thread_param *param = NULL;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 	int ret;
-	int i;
 
 	gvt_dbg_core("init workload scheduler\n");
 
 	init_waitqueue_head(&scheduler->workload_complete_wq);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		/* check ring mask at init time */
-		if (!HAS_ENGINE(gvt->dev_priv, i))
-			continue;
-
+	for_each_engine(engine, gvt->dev_priv, i) {
 		init_waitqueue_head(&scheduler->waitq[i]);
 
 		param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -542,6 +539,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 			ret = PTR_ERR(scheduler->thread[i]);
 			goto err;
 		}
+
+		gvt->shadow_ctx_notifier_block[i].notifier_call =
+					shadow_context_status_change;
+		atomic_notifier_chain_register(&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
 	}
 	return 0;
 err:
@@ -553,9 +555,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-	atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-			&vgpu->shadow_ctx_notifier_block);
-
 	i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -570,10 +569,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
 	vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-	vgpu->shadow_ctx_notifier_block.notifier_call =
-		shadow_context_status_change;
-
-	atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-				       &vgpu->shadow_ctx_notifier_block);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 2624520..6e202fb 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -335,7 +335,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->ring_size = 4 * PAGE_SIZE;
 	ctx->desc_template =
 		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
-	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
 	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
 	 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b..e9c008f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,9 +160,6 @@ struct i915_gem_context {
 	/** desc_template: invariant fields for the HW context descriptor */
 	u32 desc_template;
 
-	/** status_notifier: list of callbacks for context-switch changes */
-	struct atomic_notifier_head status_notifier;
-
 	/** guilty_count: How many times this context has caused a GPU hang. */
 	unsigned int guilty_count;
 	/**
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 46c9474..9695daf 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
 	dev_priv->engine[id] = engine;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 39329d4..b0daadf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -306,7 +306,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return;
 
-	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+				   status, rq);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4091c97..a40f8a7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -408,6 +408,9 @@ struct intel_engine_cs {
 	 */
 	struct i915_gem_context *legacy_active_context;
 
+	/* status_notifier: list of callbacks for context-switch changes */
+	struct atomic_notifier_head context_status_notifier;
+
 	struct intel_engine_hangcheck hangcheck;
 
 	bool needs_cmd_parser;
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* ✗ Fi.CI.BAT: warning for drm/i915: make context status notifier head be per engine
  2017-03-02 11:23 [RFC PATCH] drm/i915: make context status notifier head be per engine changbin.du
@ 2017-03-02 11:47 ` Patchwork
  2017-03-09  8:44 ` [RFC PATCH] " Du, Changbin
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2017-03-02 11:47 UTC (permalink / raw)
  To: changbin.du; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: make context status notifier head be per engine
URL   : https://patchwork.freedesktop.org/series/20552/
State : warning

== Summary ==

Series 20552v1 drm/i915: make context status notifier head be per engine
https://patchwork.freedesktop.org/api/1.0/series/20552/revisions/1/mbox/

Test gem_exec_parallel:
        Subgroup basic:
                pass       -> DMESG-WARN (fi-skl-6700hq)
Test pm_rpm:
        Subgroup basic-pci-d3-state:
                incomplete -> PASS       (fi-byt-n2820) fdo#99740

fdo#99740 https://bugs.freedesktop.org/show_bug.cgi?id=99740

fi-bdw-5557u     total:278  pass:267  dwarn:0   dfail:0   fail:0   skip:11 
fi-bsw-n3050     total:278  pass:239  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:278  pass:259  dwarn:0   dfail:0   fail:0   skip:19 
fi-bxt-t5700     total:278  pass:258  dwarn:0   dfail:0   fail:0   skip:20 
fi-byt-j1900     total:278  pass:251  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:278  pass:247  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16 
fi-hsw-4770r     total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16 
fi-ilk-650       total:278  pass:228  dwarn:0   dfail:0   fail:0   skip:50 
fi-ivb-3520m     total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18 
fi-ivb-3770      total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18 
fi-kbl-7500u     total:278  pass:259  dwarn:1   dfail:0   fail:0   skip:18 
fi-skl-6260u     total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10 
fi-skl-6700hq    total:278  pass:260  dwarn:1   dfail:0   fail:0   skip:17 
fi-skl-6700k     total:278  pass:256  dwarn:4   dfail:0   fail:0   skip:18 
fi-skl-6770hq    total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10 
fi-snb-2520m     total:278  pass:250  dwarn:0   dfail:0   fail:0   skip:28 
fi-snb-2600      total:278  pass:249  dwarn:0   dfail:0   fail:0   skip:29 

b3c4a574937b2256a716b23037c8b94ffebd7c8a drm-tip: 2017y-03m-02d-08h-59m-08s UTC integration manifest
6fcfeda drm/i915: make context status notifier head be per engine

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_4031/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [RFC PATCH] drm/i915: make context status notifier head be per engine
  2017-03-02 11:23 [RFC PATCH] drm/i915: make context status notifier head be per engine changbin.du
  2017-03-02 11:47 ` ✗ Fi.CI.BAT: warning for " Patchwork
@ 2017-03-09  8:44 ` Du, Changbin
  2017-03-09 11:27 ` [PATCH] " changbin.du
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Du, Changbin @ 2017-03-09  8:44 UTC (permalink / raw)
  To: changbin.du; +Cc: daniel.vetter, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 1745 bytes --]

hi, is there any comments for this? I'd like hear chris and daniel's
comments. :)

Thanks,
Changbin

On Thu, Mar 02, 2017 at 07:23:05PM +0800, changbin.du@intel.com wrote:
> From: Changbin Du <changbin.du@intel.com>
> 
> hi, Daniel, Chris and All,
> As you know, GVTg introduced the context status notifier to schedule
> the GVTg workload. At that time, the notifier is bound to GVTg context
> only, so GVTg is not aware of host workloads.
> 
> Now we are going to improve GVTg's guest workload scheduler policy,
> and add Guc emulation support for new Gen graphics. Both these two
> features require acknowledgment for all contexts running on hardware.
> (But will not alter host workload.) So here try to make some change.
> 
> The change is easy:
>   1. Move the context status notifier head from i915_gem_context to
>      intel_engine_cs. Which means there is a notifier head per engine
>      instead of per context. Execlist driver still call notifier for
>      each context sched-in/out events of current engine.
>   2. At GVTg side, it binds a notifier_block for each physical engine
>      at GVTg initialization period. Then GVTg can hear all context
>      status events.
> 
> In this RFC patch, GVTg do nothing for host context event, but later
> will add a function there. But in any case, the notifier callback is
> a noop if this is no active vGPU.
> 
> Since intel_gvt_init() is called at early initialization stage and
> require the status notifier head has been initiated, I initiate it in
> intel_engine_setup(). I am not sure if this is the right place, please
> correct me if wrong. Thank you! Look forward to your feedback. :)
> 
> Signed-off-by: Changbin Du <changbin.du@intel.com>

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] drm/i915: make context status notifier head be per engine
  2017-03-02 11:23 [RFC PATCH] drm/i915: make context status notifier head be per engine changbin.du
  2017-03-02 11:47 ` ✗ Fi.CI.BAT: warning for " Patchwork
  2017-03-09  8:44 ` [RFC PATCH] " Du, Changbin
@ 2017-03-09 11:27 ` changbin.du
  2017-03-10 17:17   ` Chris Wilson
  2017-03-13  2:47   ` [PATCH v2] " changbin.du
  2017-03-09 15:22 ` ✓ Fi.CI.BAT: success for drm/i915: make context status notifier head be per engine (rev2) Patchwork
  2017-03-13  4:25 ` ✗ Fi.CI.BAT: failure for drm/i915: make context status notifier head be per engine (rev3) Patchwork
  4 siblings, 2 replies; 13+ messages in thread
From: changbin.du @ 2017-03-09 11:27 UTC (permalink / raw)
  To: intel-gfx, chris, daniel.vetter

From: Changbin Du <changbin.du@intel.com>

GVTg has introduced the context status notifier to schedule the GVTg
workload. At that time, the notifier is bound to GVTg context only,
so GVTg is not aware of host workloads.

Now we are going to improve GVTg's guest workload scheduler policy,
and add Guc emulation support for new Gen graphics. Both these two
features require acknowledgment for all contexts running on hardware.
(But will not alter host workload.) So here try to make some change.

The change is simple:
  1. Move the context status notifier head from i915_gem_context to
     intel_engine_cs. Which means there is a notifier head per engine
     instead of per context. Execlist driver still call notifier for
     each context sched-in/out events of current engine.
  2. At GVTg side, it binds a notifier_block for each physical engine
     at GVTg initialization period. Then GVTg can hear all context
     status events.

In this patch, GVTg do nothing for host context event, but later
will add a function there. But in any case, the notifier callback is
a noop if this is no active vGPU.

Since intel_gvt_init() is called at early initialization stage and
require the status notifier head has been initiated, I initiate it in
intel_engine_setup().

Signed-off-by: Changbin Du <changbin.du@intel.com>
---
 drivers/gpu/drm/i915/gvt/gvt.h          |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c    | 46 ++++++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem_context.c |  1 -
 drivers/gpu/drm/i915/i915_gem_context.h |  3 ---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  2 ++
 drivers/gpu/drm/i915/intel_lrc.c        |  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +++
 7 files changed, 28 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2379192..6dfc48b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
 	atomic_t running_workload_num;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
-	struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 	struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
 	struct intel_gvt_gtt gtt;
 	struct intel_gvt_opregion opregion;
 	struct intel_gvt_workload_scheduler scheduler;
+	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
 	struct intel_vgpu_type *types;
 	unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c9..64875ec 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -127,15 +127,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	return 0;
 }
 
+
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-			struct intel_vgpu, shadow_ctx_notifier_block);
-	struct drm_i915_gem_request *req =
-		(struct drm_i915_gem_request *)data;
-	struct intel_gvt_workload_scheduler *scheduler =
-		&vgpu->gvt->scheduler;
+	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+				shadow_ctx_notifier_block[req->engine->id]);
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload =
 		scheduler->current_workload[req->engine->id];
 
@@ -513,15 +512,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 
 	gvt_dbg_core("clean workload scheduler\n");
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (scheduler->thread[i]) {
-			kthread_stop(scheduler->thread[i]);
-			scheduler->thread[i] = NULL;
-		}
+	for_each_engine(engine, gvt->dev_priv, i) {
+		atomic_notifier_chain_unregister(
+					&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
+		kthread_stop(scheduler->thread[i]);
 	}
 }
 
@@ -529,18 +529,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct workload_thread_param *param = NULL;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 	int ret;
-	int i;
 
 	gvt_dbg_core("init workload scheduler\n");
 
 	init_waitqueue_head(&scheduler->workload_complete_wq);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		/* check ring mask at init time */
-		if (!HAS_ENGINE(gvt->dev_priv, i))
-			continue;
-
+	for_each_engine(engine, gvt->dev_priv, i) {
 		init_waitqueue_head(&scheduler->waitq[i]);
 
 		param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +556,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 			ret = PTR_ERR(scheduler->thread[i]);
 			goto err;
 		}
+
+		gvt->shadow_ctx_notifier_block[i].notifier_call =
+					shadow_context_status_change;
+		atomic_notifier_chain_register(&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
 	}
 	return 0;
 err:
@@ -570,9 +572,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-	atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-			&vgpu->shadow_ctx_notifier_block);
-
 	i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -587,10 +586,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
 	vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-	vgpu->shadow_ctx_notifier_block.notifier_call =
-		shadow_context_status_change;
-
-	atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-				       &vgpu->shadow_ctx_notifier_block);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index baceca1..0005085 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -318,7 +318,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->ring_size = 4 * PAGE_SIZE;
 	ctx->desc_template =
 		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
-	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
 	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
 	 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 81268c9..4af2ab94 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -158,9 +158,6 @@ struct i915_gem_context {
 	/** desc_template: invariant fields for the HW context descriptor */
 	u32 desc_template;
 
-	/** status_notifier: list of callbacks for context-switch changes */
-	struct atomic_notifier_head status_notifier;
-
 	/** guilty_count: How many times this context has caused a GPU hang. */
 	unsigned int guilty_count;
 	/**
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 73fe718..695693a 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
 	dev_priv->engine[id] = engine;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 89f38e7..808ad46 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -306,7 +306,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return;
 
-	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+				   status, rq);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0ef491d..b7d42a5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -408,6 +408,9 @@ struct intel_engine_cs {
 	 */
 	struct i915_gem_context *legacy_active_context;
 
+	/* status_notifier: list of callbacks for context-switch changes */
+	struct atomic_notifier_head context_status_notifier;
+
 	struct intel_engine_hangcheck hangcheck;
 
 	bool needs_cmd_parser;
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: make context status notifier head be per engine (rev2)
  2017-03-02 11:23 [RFC PATCH] drm/i915: make context status notifier head be per engine changbin.du
                   ` (2 preceding siblings ...)
  2017-03-09 11:27 ` [PATCH] " changbin.du
@ 2017-03-09 15:22 ` Patchwork
  2017-03-13  4:25 ` ✗ Fi.CI.BAT: failure for drm/i915: make context status notifier head be per engine (rev3) Patchwork
  4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2017-03-09 15:22 UTC (permalink / raw)
  To: changbin.du; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: make context status notifier head be per engine (rev2)
URL   : https://patchwork.freedesktop.org/series/20552/
State : success

== Summary ==

Series 20552v2 drm/i915: make context status notifier head be per engine
https://patchwork.freedesktop.org/api/1.0/series/20552/revisions/2/mbox/

Test gem_ctx_create:
        Subgroup basic-files:
                incomplete -> PASS       (fi-skl-6700k) fdo#100130
Test gem_exec_flush:
        Subgroup basic-uc-ro-default:
                incomplete -> PASS       (fi-kbl-7500u) fdo#100130

fdo#100130 https://bugs.freedesktop.org/show_bug.cgi?id=100130

fi-bdw-5557u     total:278  pass:267  dwarn:0   dfail:0   fail:0   skip:11  time: 465s
fi-bsw-n3050     total:278  pass:239  dwarn:0   dfail:0   fail:0   skip:39  time: 613s
fi-bxt-j4205     total:278  pass:259  dwarn:0   dfail:0   fail:0   skip:19  time: 532s
fi-bxt-t5700     total:278  pass:258  dwarn:0   dfail:0   fail:0   skip:20  time: 612s
fi-byt-j1900     total:278  pass:251  dwarn:0   dfail:0   fail:0   skip:27  time: 506s
fi-byt-n2820     total:278  pass:247  dwarn:0   dfail:0   fail:0   skip:31  time: 500s
fi-hsw-4770      total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16  time: 443s
fi-hsw-4770r     total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16  time: 432s
fi-ilk-650       total:278  pass:228  dwarn:0   dfail:0   fail:0   skip:50  time: 447s
fi-ivb-3520m     total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18  time: 505s
fi-ivb-3770      total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18  time: 491s
fi-kbl-7500u     total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18  time: 470s
fi-skl-6260u     total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10  time: 509s
fi-skl-6700hq    total:278  pass:261  dwarn:0   dfail:0   fail:0   skip:17  time: 597s
fi-skl-6700k     total:53   pass:45   dwarn:0   dfail:0   fail:0   skip:7   time: 0s
fi-skl-6770hq    total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10  time: 547s
fi-snb-2520m     total:278  pass:250  dwarn:0   dfail:0   fail:0   skip:28  time: 558s
fi-snb-2600      total:278  pass:249  dwarn:0   dfail:0   fail:0   skip:29  time: 419s

789c80247ee92848a46d66a32ca723e16a261913 drm-tip: 2017y-03m-09d-12h-32m-54s UTC integration manifest
f793ac6 drm/i915: make context status notifier head be per engine

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_4115/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] drm/i915: make context status notifier head be per engine
  2017-03-09 11:27 ` [PATCH] " changbin.du
@ 2017-03-10 17:17   ` Chris Wilson
  2017-03-13  2:14     ` Du, Changbin
  2017-03-13  2:47   ` [PATCH v2] " changbin.du
  1 sibling, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2017-03-10 17:17 UTC (permalink / raw)
  To: changbin.du; +Cc: daniel.vetter, intel-gfx

On Thu, Mar 09, 2017 at 07:27:24PM +0800, changbin.du@intel.com wrote:
> From: Changbin Du <changbin.du@intel.com>
> 
> GVTg has introduced the context status notifier to schedule the GVTg
> workload. At that time, the notifier is bound to GVTg context only,
> so GVTg is not aware of host workloads.
> 
> Now we are going to improve GVTg's guest workload scheduler policy,
> and add Guc emulation support for new Gen graphics. Both these two
> features require acknowledgment for all contexts running on hardware.
> (But will not alter host workload.) So here try to make some change.
> 
> The change is simple:
>   1. Move the context status notifier head from i915_gem_context to
>      intel_engine_cs. Which means there is a notifier head per engine
>      instead of per context. Execlist driver still call notifier for
>      each context sched-in/out events of current engine.
>   2. At GVTg side, it binds a notifier_block for each physical engine
>      at GVTg initialization period. Then GVTg can hear all context
>      status events.
> 
> In this patch, GVTg do nothing for host context event, but later
> will add a function there. But in any case, the notifier callback is
> a noop if this is no active vGPU.
> 
> Since intel_gvt_init() is called at early initialization stage and
> require the status notifier head has been initiated, I initiate it in
> intel_engine_setup().
> 
> Signed-off-by: Changbin Du <changbin.du@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

I presume you will apply this via gvt?

> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index d3a56c9..64875ec 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -127,15 +127,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
>  	return 0;
>  }
>  
> +

Bonus newline

>  static int shadow_context_status_change(struct notifier_block *nb,
>  		unsigned long action, void *data)
>  {
> -	struct intel_vgpu *vgpu = container_of(nb,
> -			struct intel_vgpu, shadow_ctx_notifier_block);
> -	struct drm_i915_gem_request *req =
> -		(struct drm_i915_gem_request *)data;
> -	struct intel_gvt_workload_scheduler *scheduler =
> -		&vgpu->gvt->scheduler;
> +	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
> +	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
> +				shadow_ctx_notifier_block[req->engine->id]);
> +	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
>  	struct intel_vgpu_workload *workload =
>  		scheduler->current_workload[req->engine->id];
>  

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH] drm/i915: make context status notifier head be per engine
  2017-03-10 17:17   ` Chris Wilson
@ 2017-03-13  2:14     ` Du, Changbin
  0 siblings, 0 replies; 13+ messages in thread
From: Du, Changbin @ 2017-03-13  2:14 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx, daniel.vetter


[-- Attachment #1.1: Type: text/plain, Size: 3090 bytes --]

hi, chris,

On Fri, Mar 10, 2017 at 05:17:17PM +0000, Chris Wilson wrote:
> On Thu, Mar 09, 2017 at 07:27:24PM +0800, changbin.du@intel.com wrote:
> > From: Changbin Du <changbin.du@intel.com>
> > 
> > GVTg has introduced the context status notifier to schedule the GVTg
> > workload. At that time, the notifier is bound to GVTg context only,
> > so GVTg is not aware of host workloads.
> > 
> > Now we are going to improve GVTg's guest workload scheduler policy,
> > and add Guc emulation support for new Gen graphics. Both these two
> > features require acknowledgment for all contexts running on hardware.
> > (But will not alter host workload.) So here try to make some change.
> > 
> > The change is simple:
> >   1. Move the context status notifier head from i915_gem_context to
> >      intel_engine_cs. Which means there is a notifier head per engine
> >      instead of per context. Execlist driver still call notifier for
> >      each context sched-in/out events of current engine.
> >   2. At GVTg side, it binds a notifier_block for each physical engine
> >      at GVTg initialization period. Then GVTg can hear all context
> >      status events.
> > 
> > In this patch, GVTg do nothing for host context event, but later
> > will add a function there. But in any case, the notifier callback is
> > a noop if this is no active vGPU.
> > 
> > Since intel_gvt_init() is called at early initialization stage and
> > require the status notifier head has been initiated, I initiate it in
> > intel_engine_setup().
> > 
> > Signed-off-by: Changbin Du <changbin.du@intel.com>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> I presume you will apply this via gvt?
>
Sure, I'll sync with zhenyu about this. Then update patch with below
'bonus newline' fixed. Thanks.

> > diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> > index d3a56c9..64875ec 100644
> > --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> > +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> > @@ -127,15 +127,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
> >  	return 0;
> >  }
> >  
> > +
> 
> Bonus newline
> 
> >  static int shadow_context_status_change(struct notifier_block *nb,
> >  		unsigned long action, void *data)
> >  {
> > -	struct intel_vgpu *vgpu = container_of(nb,
> > -			struct intel_vgpu, shadow_ctx_notifier_block);
> > -	struct drm_i915_gem_request *req =
> > -		(struct drm_i915_gem_request *)data;
> > -	struct intel_gvt_workload_scheduler *scheduler =
> > -		&vgpu->gvt->scheduler;
> > +	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
> > +	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
> > +				shadow_ctx_notifier_block[req->engine->id]);
> > +	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
> >  	struct intel_vgpu_workload *workload =
> >  		scheduler->current_workload[req->engine->id];
> >  
> 
> -- 
> Chris Wilson, Intel Open Source Technology Centre

-- 
Thanks,
Changbin Du

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v2] drm/i915: make context status notifier head be per engine
  2017-03-09 11:27 ` [PATCH] " changbin.du
  2017-03-10 17:17   ` Chris Wilson
@ 2017-03-13  2:47   ` changbin.du
  2017-03-13  4:03     ` Zhenyu Wang
  1 sibling, 1 reply; 13+ messages in thread
From: changbin.du @ 2017-03-13  2:47 UTC (permalink / raw)
  To: intel-gvt-dev; +Cc: daniel.vetter, intel-gfx

From: Changbin Du <changbin.du@intel.com>

GVTg has introduced the context status notifier to schedule the GVTg
workload. At that time, the notifier is bound to GVTg context only,
so GVTg is not aware of host workloads.

Now we are going to improve GVTg's guest workload scheduler policy,
and add Guc emulation support for new Gen graphics. Both these two
features require acknowledgment for all contexts running on hardware.
(But will not alter host workload.) So here try to make some change.

The change is simple:
  1. Move the context status notifier head from i915_gem_context to
     intel_engine_cs. Which means there is a notifier head per engine
     instead of per context. Execlist driver still call notifier for
     each context sched-in/out events of current engine.
  2. At GVTg side, it binds a notifier_block for each physical engine
     at GVTg initialization period. Then GVTg can hear all context
     status events.

In this patch, GVTg do nothing for host context event, but later
will add a function there. But in any case, the notifier callback is
a noop if this is no active vGPU.

Since intel_gvt_init() is called at early initialization stage and
require the status notifier head has been initiated, I initiate it in
intel_engine_setup().

v2: remove a redundant newline. (chris)

Signed-off-by: Changbin Du <changbin.du@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gvt/gvt.h          |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c    | 45 ++++++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem_context.c |  1 -
 drivers/gpu/drm/i915/i915_gem_context.h |  3 ---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  2 ++
 drivers/gpu/drm/i915/intel_lrc.c        |  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +++
 7 files changed, 27 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2379192..6dfc48b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
 	atomic_t running_workload_num;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
-	struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 	struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
 	struct intel_gvt_gtt gtt;
 	struct intel_gvt_opregion opregion;
 	struct intel_gvt_workload_scheduler scheduler;
+	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
 	struct intel_vgpu_type *types;
 	unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d3a56c9..48e5088 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-			struct intel_vgpu, shadow_ctx_notifier_block);
-	struct drm_i915_gem_request *req =
-		(struct drm_i915_gem_request *)data;
-	struct intel_gvt_workload_scheduler *scheduler =
-		&vgpu->gvt->scheduler;
+	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+				shadow_ctx_notifier_block[req->engine->id]);
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload =
 		scheduler->current_workload[req->engine->id];
 
@@ -513,15 +511,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 
 	gvt_dbg_core("clean workload scheduler\n");
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (scheduler->thread[i]) {
-			kthread_stop(scheduler->thread[i]);
-			scheduler->thread[i] = NULL;
-		}
+	for_each_engine(engine, gvt->dev_priv, i) {
+		atomic_notifier_chain_unregister(
+					&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
+		kthread_stop(scheduler->thread[i]);
 	}
 }
 
@@ -529,18 +528,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct workload_thread_param *param = NULL;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 	int ret;
-	int i;
 
 	gvt_dbg_core("init workload scheduler\n");
 
 	init_waitqueue_head(&scheduler->workload_complete_wq);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		/* check ring mask at init time */
-		if (!HAS_ENGINE(gvt->dev_priv, i))
-			continue;
-
+	for_each_engine(engine, gvt->dev_priv, i) {
 		init_waitqueue_head(&scheduler->waitq[i]);
 
 		param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -559,6 +555,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 			ret = PTR_ERR(scheduler->thread[i]);
 			goto err;
 		}
+
+		gvt->shadow_ctx_notifier_block[i].notifier_call =
+					shadow_context_status_change;
+		atomic_notifier_chain_register(&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
 	}
 	return 0;
 err:
@@ -570,9 +571,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-	atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-			&vgpu->shadow_ctx_notifier_block);
-
 	i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -587,10 +585,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
 	vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-	vgpu->shadow_ctx_notifier_block.notifier_call =
-		shadow_context_status_change;
-
-	atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-				       &vgpu->shadow_ctx_notifier_block);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index baceca1..0005085 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -318,7 +318,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->ring_size = 4 * PAGE_SIZE;
 	ctx->desc_template =
 		default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
-	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
 	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
 	 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 81268c9..4af2ab94 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -158,9 +158,6 @@ struct i915_gem_context {
 	/** desc_template: invariant fields for the HW context descriptor */
 	u32 desc_template;
 
-	/** status_notifier: list of callbacks for context-switch changes */
-	struct atomic_notifier_head status_notifier;
-
 	/** guilty_count: How many times this context has caused a GPU hang. */
 	unsigned int guilty_count;
 	/**
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 73fe718..695693a 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
 	dev_priv->engine[id] = engine;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 89f38e7..808ad46 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -306,7 +306,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return;
 
-	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+				   status, rq);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0ef491d..b7d42a5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -408,6 +408,9 @@ struct intel_engine_cs {
 	 */
 	struct i915_gem_context *legacy_active_context;
 
+	/* status_notifier: list of callbacks for context-switch changes */
+	struct atomic_notifier_head context_status_notifier;
+
 	struct intel_engine_hangcheck hangcheck;
 
 	bool needs_cmd_parser;
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v2] drm/i915: make context status notifier head be per engine
  2017-03-13  2:47   ` [PATCH v2] " changbin.du
@ 2017-03-13  4:03     ` Zhenyu Wang
  2017-03-16 16:28       ` Chris Wilson
  0 siblings, 1 reply; 13+ messages in thread
From: Zhenyu Wang @ 2017-03-13  4:03 UTC (permalink / raw)
  To: changbin.du; +Cc: daniel.vetter, intel-gfx, intel-gvt-dev


[-- Attachment #1.1: Type: text/plain, Size: 1709 bytes --]

On 2017.03.13 10:47:11 +0800, changbin.du@intel.com wrote:
> From: Changbin Du <changbin.du@intel.com>
> 
> GVTg has introduced the context status notifier to schedule the GVTg
> workload. At that time, the notifier is bound to GVTg context only,
> so GVTg is not aware of host workloads.
> 
> Now we are going to improve GVTg's guest workload scheduler policy,
> and add Guc emulation support for new Gen graphics. Both these two
> features require acknowledgment for all contexts running on hardware.
> (But will not alter host workload.) So here try to make some change.
> 
> The change is simple:
>   1. Move the context status notifier head from i915_gem_context to
>      intel_engine_cs. Which means there is a notifier head per engine
>      instead of per context. Execlist driver still call notifier for
>      each context sched-in/out events of current engine.
>   2. At GVTg side, it binds a notifier_block for each physical engine
>      at GVTg initialization period. Then GVTg can hear all context
>      status events.
> 
> In this patch, GVTg do nothing for host context event, but later
> will add a function there. But in any case, the notifier callback is
> a noop if this is no active vGPU.
> 
> Since intel_gvt_init() is called at early initialization stage and
> require the status notifier head has been initiated, I initiate it in
> intel_engine_setup().
> 
> v2: remove a redundant newline. (chris)
> 
> Signed-off-by: Changbin Du <changbin.du@intel.com>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

Applied to gvt-next, thanks!

-- 
Open Source Technology Center, Intel ltd.

$gpg --keyserver wwwkeys.pgp.net --recv-keys 4D781827

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 163 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: make context status notifier head be per engine (rev3)
  2017-03-02 11:23 [RFC PATCH] drm/i915: make context status notifier head be per engine changbin.du
                   ` (3 preceding siblings ...)
  2017-03-09 15:22 ` ✓ Fi.CI.BAT: success for drm/i915: make context status notifier head be per engine (rev2) Patchwork
@ 2017-03-13  4:25 ` Patchwork
  4 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2017-03-13  4:25 UTC (permalink / raw)
  To: changbin.du; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: make context status notifier head be per engine (rev3)
URL   : https://patchwork.freedesktop.org/series/20552/
State : failure

== Summary ==

Series 20552v3 drm/i915: make context status notifier head be per engine
https://patchwork.freedesktop.org/api/1.0/series/20552/revisions/3/mbox/

Test drv_module_reload:
        Subgroup basic-reload-inject:
                pass       -> INCOMPLETE (fi-hsw-4770r)
Test gem_exec_flush:
        Subgroup basic-batch-kernel-default-uc:
                pass       -> FAIL       (fi-snb-2600) fdo#100007
Test kms_force_connector_basic:
        Subgroup prune-stale-modes:
                skip       -> PASS       (fi-snb-2520m)

fdo#100007 https://bugs.freedesktop.org/show_bug.cgi?id=100007

fi-bdw-5557u     total:278  pass:267  dwarn:0   dfail:0   fail:0   skip:11  time: 454s
fi-bsw-n3050     total:278  pass:239  dwarn:0   dfail:0   fail:0   skip:39  time: 624s
fi-bxt-j4205     total:278  pass:259  dwarn:0   dfail:0   fail:0   skip:19  time: 535s
fi-bxt-t5700     total:278  pass:258  dwarn:0   dfail:0   fail:0   skip:20  time: 579s
fi-byt-j1900     total:278  pass:251  dwarn:0   dfail:0   fail:0   skip:27  time: 505s
fi-byt-n2820     total:278  pass:247  dwarn:0   dfail:0   fail:0   skip:31  time: 504s
fi-hsw-4770      total:278  pass:262  dwarn:0   dfail:0   fail:0   skip:16  time: 441s
fi-hsw-4770r     total:276  pass:260  dwarn:0   dfail:0   fail:0   skip:15  time: 0s
fi-ilk-650       total:278  pass:228  dwarn:0   dfail:0   fail:0   skip:50  time: 445s
fi-ivb-3520m     total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18  time: 495s
fi-ivb-3770      total:278  pass:260  dwarn:0   dfail:0   fail:0   skip:18  time: 491s
fi-kbl-7500u     total:278  pass:259  dwarn:1   dfail:0   fail:0   skip:18  time: 474s
fi-skl-6260u     total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10  time: 502s
fi-skl-6700hq    total:278  pass:261  dwarn:0   dfail:0   fail:0   skip:17  time: 585s
fi-skl-6700k     total:278  pass:256  dwarn:4   dfail:0   fail:0   skip:18  time: 496s
fi-skl-6770hq    total:278  pass:268  dwarn:0   dfail:0   fail:0   skip:10  time: 543s
fi-snb-2520m     total:278  pass:250  dwarn:0   dfail:0   fail:0   skip:28  time: 567s
fi-snb-2600      total:278  pass:248  dwarn:0   dfail:0   fail:1   skip:29  time: 419s

2cb12884655eab41d4992b33ccb36c609c4537d3 drm-tip: 2017y-03m-12d-13h-00m-05s UTC integration manifest
2bd4f8a drm/i915: make context status notifier head be per engine

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_4147/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2] drm/i915: make context status notifier head be per engine
  2017-03-13  4:03     ` Zhenyu Wang
@ 2017-03-16 16:28       ` Chris Wilson
  2017-03-17  2:28         ` Du, Changbin
  0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2017-03-16 16:28 UTC (permalink / raw)
  To: Zhenyu Wang; +Cc: intel-gfx, intel-gvt-dev, daniel.vetter

On Mon, Mar 13, 2017 at 12:03:30PM +0800, Zhenyu Wang wrote:
> On 2017.03.13 10:47:11 +0800, changbin.du@intel.com wrote:
> > From: Changbin Du <changbin.du@intel.com>
> > 
> > GVTg has introduced the context status notifier to schedule the GVTg
> > workload. At that time, the notifier is bound to GVTg context only,
> > so GVTg is not aware of host workloads.
> > 
> > Now we are going to improve GVTg's guest workload scheduler policy,
> > and add Guc emulation support for new Gen graphics. Both these two
> > features require acknowledgment for all contexts running on hardware.
> > (But will not alter host workload.) So here try to make some change.
> > 
> > The change is simple:
> >   1. Move the context status notifier head from i915_gem_context to
> >      intel_engine_cs. Which means there is a notifier head per engine
> >      instead of per context. Execlist driver still call notifier for
> >      each context sched-in/out events of current engine.
> >   2. At GVTg side, it binds a notifier_block for each physical engine
> >      at GVTg initialization period. Then GVTg can hear all context
> >      status events.
> > 
> > In this patch, GVTg do nothing for host context event, but later
> > will add a function there. But in any case, the notifier callback is
> > a noop if this is no active vGPU.
> > 
> > Since intel_gvt_init() is called at early initialization stage and
> > require the status notifier head has been initiated, I initiate it in
> > intel_engine_setup().
> > 
> > v2: remove a redundant newline. (chris)
> > 
> > Signed-off-by: Changbin Du <changbin.du@intel.com>
> > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> Applied to gvt-next, thanks!

It turns out this fixes a use-after-free bug that CI was falling over.
Applied to dinq.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100232
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v2] drm/i915: make context status notifier head be per engine
  2017-03-16 16:28       ` Chris Wilson
@ 2017-03-17  2:28         ` Du, Changbin
  0 siblings, 0 replies; 13+ messages in thread
From: Du, Changbin @ 2017-03-17  2:28 UTC (permalink / raw)
  To: Chris Wilson, Zhenyu Wang, changbin.du, intel-gvt-dev,
	daniel.vetter, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 900 bytes --]

> > > Since intel_gvt_init() is called at early initialization stage and
> > > require the status notifier head has been initiated, I initiate it in
> > > intel_engine_setup().
> > > 
> > > v2: remove a redundant newline. (chris)
> > > 
> > > Signed-off-by: Changbin Du <changbin.du@intel.com>
> > > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> > 
> > Applied to gvt-next, thanks!
> 
> It turns out this fixes a use-after-free bug that CI was falling over.
> Applied to dinq.
> 
wow, that's out of my expectation. lucky!

> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100232
> -Chris
> 
> -- 
> Chris Wilson, Intel Open Source Technology Centre
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev

-- 
Thanks,
Changbin Du

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 473 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH] drm/i915: make context status notifier head be per engine
  2017-03-21 14:23 Fixes that failed to backport to v4.11-rc1 Jani Nikula
@ 2017-03-21 14:47 ` Chris Wilson
  0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2017-03-21 14:47 UTC (permalink / raw)
  To: intel-gfx; +Cc: jani.nikula

From: Changbin Du <changbin.du@intel.com>

GVTg has introduced the context status notifier to schedule the GVTg
workload. At that time, the notifier is bound to GVTg context only,
so GVTg is not aware of host workloads.

Now we are going to improve GVTg's guest workload scheduler policy,
and add Guc emulation support for new Gen graphics. Both these two
features require acknowledgment for all contexts running on hardware.
(But will not alter host workload.) So here try to make some change.

The change is simple:
  1. Move the context status notifier head from i915_gem_context to
     intel_engine_cs. Which means there is a notifier head per engine
     instead of per context. Execlist driver still call notifier for
     each context sched-in/out events of current engine.
  2. At GVTg side, it binds a notifier_block for each physical engine
     at GVTg initialization period. Then GVTg can hear all context
     status events.

In this patch, GVTg do nothing for host context event, but later
will add a function there. But in any case, the notifier callback is
a noop if this is no active vGPU.

Since intel_gvt_init() is called at early initialization stage and
require the status notifier head has been initiated, I initiate it in
intel_engine_setup().

v2: remove a redundant newline. (chris)

Fixes: 3c7ba6359d70 ("drm/i915: Introduce execlist context status change notification")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=100232
Signed-off-by: Changbin Du <changbin.du@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170313024711.28591-1-changbin.du@intel.com
Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
(cherry picked from commit 3fc03069bc6e6c316f19bb526e3c8ce784677477)
---
 drivers/gpu/drm/i915/gvt/gvt.h          |  2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c    | 45 ++++++++++++++-------------------
 drivers/gpu/drm/i915/i915_gem_context.c |  1 -
 drivers/gpu/drm/i915/i915_gem_context.h |  3 ---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  2 ++
 drivers/gpu/drm/i915/intel_lrc.c        |  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +++
 7 files changed, 27 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 23791920ced1..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -162,7 +162,6 @@ struct intel_vgpu {
 	atomic_t running_workload_num;
 	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
 	struct i915_gem_context *shadow_ctx;
-	struct notifier_block shadow_ctx_notifier_block;
 
 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
 	struct {
@@ -233,6 +232,7 @@ struct intel_gvt {
 	struct intel_gvt_gtt gtt;
 	struct intel_gvt_opregion opregion;
 	struct intel_gvt_workload_scheduler scheduler;
+	struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
 	DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
 	struct intel_vgpu_type *types;
 	unsigned int num_types;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 39a83eb7aecc..c4353ed86d4b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 static int shadow_context_status_change(struct notifier_block *nb,
 		unsigned long action, void *data)
 {
-	struct intel_vgpu *vgpu = container_of(nb,
-			struct intel_vgpu, shadow_ctx_notifier_block);
-	struct drm_i915_gem_request *req =
-		(struct drm_i915_gem_request *)data;
-	struct intel_gvt_workload_scheduler *scheduler =
-		&vgpu->gvt->scheduler;
+	struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+	struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+				shadow_ctx_notifier_block[req->engine->id]);
+	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct intel_vgpu_workload *workload =
 		scheduler->current_workload[req->engine->id];
 
@@ -534,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-	int i;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 
 	gvt_dbg_core("clean workload scheduler\n");
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (scheduler->thread[i]) {
-			kthread_stop(scheduler->thread[i]);
-			scheduler->thread[i] = NULL;
-		}
+	for_each_engine(engine, gvt->dev_priv, i) {
+		atomic_notifier_chain_unregister(
+					&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
+		kthread_stop(scheduler->thread[i]);
 	}
 }
 
@@ -550,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 {
 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
 	struct workload_thread_param *param = NULL;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id i;
 	int ret;
-	int i;
 
 	gvt_dbg_core("init workload scheduler\n");
 
 	init_waitqueue_head(&scheduler->workload_complete_wq);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		/* check ring mask at init time */
-		if (!HAS_ENGINE(gvt->dev_priv, i))
-			continue;
-
+	for_each_engine(engine, gvt->dev_priv, i) {
 		init_waitqueue_head(&scheduler->waitq[i]);
 
 		param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -580,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 			ret = PTR_ERR(scheduler->thread[i]);
 			goto err;
 		}
+
+		gvt->shadow_ctx_notifier_block[i].notifier_call =
+					shadow_context_status_change;
+		atomic_notifier_chain_register(&engine->context_status_notifier,
+					&gvt->shadow_ctx_notifier_block[i]);
 	}
 	return 0;
 err:
@@ -591,9 +592,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
 
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
 {
-	atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
-			&vgpu->shadow_ctx_notifier_block);
-
 	i915_gem_context_put_unlocked(vgpu->shadow_ctx);
 }
 
@@ -608,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
 
 	vgpu->shadow_ctx->engine[RCS].initialised = true;
 
-	vgpu->shadow_ctx_notifier_block.notifier_call =
-		shadow_context_status_change;
-
-	atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
-				       &vgpu->shadow_ctx_notifier_block);
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 17f90c618208..e2d83b6d376b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->ring_size = 4 * PAGE_SIZE;
 	ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
 			     GEN8_CTX_ADDRESSING_MODE_SHIFT;
-	ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
 
 	/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
 	 * present or not in use we still need a small bias as ring wraparound
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b90f3d..e9c008fe14b1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -160,9 +160,6 @@ struct i915_gem_context {
 	/** desc_template: invariant fields for the HW context descriptor */
 	u32 desc_template;
 
-	/** status_notifier: list of callbacks for context-switch changes */
-	struct atomic_notifier_head status_notifier;
-
 	/** guilty_count: How many times this context has caused a GPU hang. */
 	unsigned int guilty_count;
 	/**
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 371acf109e34..ab1be5c80ea5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
 	/* Nothing to do here, execute in order of dependencies */
 	engine->schedule = NULL;
 
+	ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
 	dev_priv->engine[id] = engine;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ebf8023d21e6..471af3b480ad 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
 	if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
 		return;
 
-	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+	atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+				   status, rq);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c2b8d72322..13dccb18cd43 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -403,6 +403,9 @@ struct intel_engine_cs {
 	 */
 	struct i915_gem_context *legacy_active_context;
 
+	/* status_notifier: list of callbacks for context-switch changes */
+	struct atomic_notifier_head context_status_notifier;
+
 	struct intel_engine_hangcheck hangcheck;
 
 	bool needs_cmd_parser;
-- 
2.11.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2017-03-21 14:47 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-02 11:23 [RFC PATCH] drm/i915: make context status notifier head be per engine changbin.du
2017-03-02 11:47 ` ✗ Fi.CI.BAT: warning for " Patchwork
2017-03-09  8:44 ` [RFC PATCH] " Du, Changbin
2017-03-09 11:27 ` [PATCH] " changbin.du
2017-03-10 17:17   ` Chris Wilson
2017-03-13  2:14     ` Du, Changbin
2017-03-13  2:47   ` [PATCH v2] " changbin.du
2017-03-13  4:03     ` Zhenyu Wang
2017-03-16 16:28       ` Chris Wilson
2017-03-17  2:28         ` Du, Changbin
2017-03-09 15:22 ` ✓ Fi.CI.BAT: success for drm/i915: make context status notifier head be per engine (rev2) Patchwork
2017-03-13  4:25 ` ✗ Fi.CI.BAT: failure for drm/i915: make context status notifier head be per engine (rev3) Patchwork
2017-03-21 14:23 Fixes that failed to backport to v4.11-rc1 Jani Nikula
2017-03-21 14:47 ` [PATCH] drm/i915: make context status notifier head be per engine Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.