All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/11] drm/i915: Split detaching and removing the vma
@ 2019-10-30 19:21 ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

In order to keep the assert_bind_count() valid, we need to hold the vma
page reference until after we drop the bind count. However, we must also
keep the drm_mm_remove_node() as the last action of i915_vma_unbind() so
that it serialises with the unlocked check inside i915_vma_destroy(). So
we need to split up i915_vma_remove() so that we order the detach, drop
pages and remove as required during unbind.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112067
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 37 +++++++++++++++------------------
 1 file changed, 17 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d733bcf262f0..e5512f26e20a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -700,41 +700,35 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 
-	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
-
 	if (vma->obj) {
-		atomic_inc(&vma->obj->bind_count);
-		assert_bind_count(vma->obj);
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		atomic_inc(&obj->bind_count);
+		assert_bind_count(obj);
 	}
+	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
 
 	return 0;
 }
 
 static void
-i915_vma_remove(struct i915_vma *vma)
+i915_vma_detach(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
-	list_del(&vma->vm_link);
-
 	/*
-	 * Since the unbound list is global, only move to that list if
-	 * no more VMAs exist.
+	 * And finally now the object is completely decoupled from this
+	 * vma, we can drop its hold on the backing storage and allow
+	 * it to be reaped by the shrinker.
 	 */
+	list_del(&vma->vm_link);
 	if (vma->obj) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		/*
-		 * And finally now the object is completely decoupled from this
-		 * vma, we can drop its hold on the backing storage and allow
-		 * it to be reaped by the shrinker.
-		 */
-		atomic_dec(&obj->bind_count);
 		assert_bind_count(obj);
+		atomic_dec(&obj->bind_count);
 	}
-
-	drm_mm_remove_node(&vma->node);
 }
 
 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -929,8 +923,10 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 
 err_remove:
-	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
-		i915_vma_remove(vma);
+	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
+		i915_vma_detach(vma);
+		drm_mm_remove_node(&vma->node);
+	}
 err_active:
 	i915_active_release(&vma->active);
 err_unlock:
@@ -1187,9 +1183,10 @@ int __i915_vma_unbind(struct i915_vma *vma)
 	}
 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
 
+	i915_vma_detach(vma);
 	vma_unbind_pages(vma);
-	i915_vma_remove(vma);
 
+	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
 	return 0;
 }
 
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 01/11] drm/i915: Split detaching and removing the vma
@ 2019-10-30 19:21 ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

In order to keep the assert_bind_count() valid, we need to hold the vma
page reference until after we drop the bind count. However, we must also
keep the drm_mm_remove_node() as the last action of i915_vma_unbind() so
that it serialises with the unlocked check inside i915_vma_destroy(). So
we need to split up i915_vma_remove() so that we order the detach, drop
pages and remove as required during unbind.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112067
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_vma.c | 37 +++++++++++++++------------------
 1 file changed, 17 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d733bcf262f0..e5512f26e20a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -700,41 +700,35 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
 
-	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
-
 	if (vma->obj) {
-		atomic_inc(&vma->obj->bind_count);
-		assert_bind_count(vma->obj);
+		struct drm_i915_gem_object *obj = vma->obj;
+
+		atomic_inc(&obj->bind_count);
+		assert_bind_count(obj);
 	}
+	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
 
 	return 0;
 }
 
 static void
-i915_vma_remove(struct i915_vma *vma)
+i915_vma_detach(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 	GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 
-	list_del(&vma->vm_link);
-
 	/*
-	 * Since the unbound list is global, only move to that list if
-	 * no more VMAs exist.
+	 * And finally now the object is completely decoupled from this
+	 * vma, we can drop its hold on the backing storage and allow
+	 * it to be reaped by the shrinker.
 	 */
+	list_del(&vma->vm_link);
 	if (vma->obj) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		/*
-		 * And finally now the object is completely decoupled from this
-		 * vma, we can drop its hold on the backing storage and allow
-		 * it to be reaped by the shrinker.
-		 */
-		atomic_dec(&obj->bind_count);
 		assert_bind_count(obj);
+		atomic_dec(&obj->bind_count);
 	}
-
-	drm_mm_remove_node(&vma->node);
 }
 
 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -929,8 +923,10 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 
 err_remove:
-	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
-		i915_vma_remove(vma);
+	if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
+		i915_vma_detach(vma);
+		drm_mm_remove_node(&vma->node);
+	}
 err_active:
 	i915_active_release(&vma->active);
 err_unlock:
@@ -1187,9 +1183,10 @@ int __i915_vma_unbind(struct i915_vma *vma)
 	}
 	atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
 
+	i915_vma_detach(vma);
 	vma_unbind_pages(vma);
-	i915_vma_remove(vma);
 
+	drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
 	return 0;
 }
 
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 02/11] drm/i915/gt: Call intel_gt_sanitize() directly
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Assume all responsibility for operating on the HW to sanitize the GT
state upon load/resume in intel_gt_sanitize() itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  5 ---
 drivers/gpu/drm/i915/gt/intel_gt.c            |  6 ++-
 drivers/gpu/drm/i915/gt/intel_gt.h            |  2 +-
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         | 36 +++++++++++++++++-
 drivers/gpu/drm/i915/i915_drv.c               |  8 +---
 drivers/gpu/drm/i915/i915_drv.h               |  2 -
 drivers/gpu/drm/i915/i915_gem.c               | 37 -------------------
 drivers/gpu/drm/i915/selftests/i915_gem.c     |  1 -
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  1 -
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  3 +-
 10 files changed, 43 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index c99bb94fe41e..e2ee9c04ece8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -74,7 +74,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	 * not rely on its state.
 	 */
 	intel_gt_suspend(&i915->gt);
-	intel_uc_suspend(&i915->gt.uc);
 
 	i915_gem_drain_freed_objects(i915);
 }
@@ -140,8 +139,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 		list_splice_tail(&keep, *phase);
 	}
 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
-	i915_gem_sanitize(i915);
 }
 
 void i915_gem_resume(struct drm_i915_private *i915)
@@ -161,8 +158,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
-	intel_uc_resume(&i915->gt.uc);
-
 	/* Always reload a context for powersaving. */
 	if (!switch_to_kernel_context_sync(&i915->gt))
 		goto err_wedged;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 898662c158ad..4c26daf7ee46 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -31,9 +31,11 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 	intel_uc_init_early(&gt->uc);
 }
 
-void intel_gt_init_hw_early(struct drm_i915_private *i915)
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
 {
-	i915->gt.ggtt = &i915->ggtt;
+	gt->ggtt = ggtt;
+
+	intel_gt_sanitize(gt, false);
 }
 
 static void init_unused_ring(struct intel_gt *gt, u32 base)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 5b6effed3713..5436f8c30708 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -28,7 +28,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
 }
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw_early(struct drm_i915_private *i915);
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
 int __must_check intel_gt_init_hw(struct intel_gt *gt);
 int intel_gt_init(struct intel_gt *gt);
 void intel_gt_driver_register(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 32becf15d4e0..1679f4f78d65 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -118,8 +118,22 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
 
-	GEM_TRACE("\n");
+	GEM_TRACE("force:%s\n", yesno(force));
+
+	/* Use a raw wakeref to avoid calling intel_display_power_get early */
+	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+	/*
+	 * As we have just resumed the machine and woken the device up from
+	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+	 * back to defaults, recovering from whatever wedged state we left it
+	 * in and so worth trying to use the device once more.
+	 */
+	if (intel_gt_is_wedged(gt))
+		intel_gt_unset_wedged(gt);
 
 	intel_uc_sanitize(&gt->uc);
 
@@ -127,6 +141,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 		if (engine->reset.prepare)
 			engine->reset.prepare(engine);
 
+	intel_uc_reset_prepare(&gt->uc);
+
 	if (reset_engines(gt) || force) {
 		for_each_engine(engine, gt, id)
 			__intel_engine_reset(engine, false);
@@ -135,6 +151,9 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 	for_each_engine(engine, gt, id)
 		if (engine->reset.finish)
 			engine->reset.finish(engine);
+
+	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 }
 
 void intel_gt_pm_fini(struct intel_gt *gt)
@@ -148,6 +167,8 @@ int intel_gt_resume(struct intel_gt *gt)
 	enum intel_engine_id id;
 	int err = 0;
 
+	GEM_TRACE("\n");
+
 	/*
 	 * After resume, we may need to poke into the pinned kernel
 	 * contexts to paper over any damage caused by the sudden suspend.
@@ -186,6 +207,9 @@ int intel_gt_resume(struct intel_gt *gt)
 	}
 
 	intel_rc6_enable(&gt->rc6);
+
+	intel_uc_resume(&gt->uc);
+
 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 	intel_gt_pm_put(gt);
 
@@ -212,20 +236,30 @@ void intel_gt_suspend(struct intel_gt *gt)
 	/* We expect to be idle already; but also want to be independent */
 	wait_for_idle(gt);
 
+	intel_uc_suspend(&gt->uc);
+
 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
 		intel_rps_disable(&gt->rps);
 		intel_rc6_disable(&gt->rc6);
 		intel_llc_disable(&gt->llc);
 	}
+
+	intel_gt_sanitize(gt, false);
+
+	GEM_TRACE("\n");
 }
 
 void intel_gt_runtime_suspend(struct intel_gt *gt)
 {
 	intel_uc_runtime_suspend(&gt->uc);
+
+	GEM_TRACE("\n");
 }
 
 int intel_gt_runtime_resume(struct intel_gt *gt)
 {
+	GEM_TRACE("\n");
+
 	intel_gt_init_swizzling(gt);
 
 	return intel_uc_runtime_resume(&gt->uc);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 21273b516dbe..7e0c0e31955e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -603,8 +603,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_uncore;
 
-	i915_gem_init_mmio(dev_priv);
-
 	return 0;
 
 err_uncore:
@@ -1177,7 +1175,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_ggtt;
 
-	intel_gt_init_hw_early(dev_priv);
+	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret) {
@@ -1804,7 +1802,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
-	i915_gem_sanitize(dev_priv);
+	intel_gt_sanitize(&dev_priv->gt, true);
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret)
@@ -1935,8 +1933,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	intel_power_domains_resume(dev_priv);
 
-	intel_gt_sanitize(&dev_priv->gt, true);
-
 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..b5de8f928320 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1779,7 +1779,6 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
 /* i915_gem.c */
 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_sanitize(struct drm_i915_private *i915);
 void i915_gem_init_early(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -1863,7 +1862,6 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
 	return atomic_read(&error->reset_engine_count[engine->uabi_class]);
 }
 
-void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
 void i915_gem_driver_register(struct drm_i915_private *i915);
 void i915_gem_driver_unregister(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b1574ab104d7..ac23322fb382 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1039,38 +1039,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	return err;
 }
 
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
-	intel_wakeref_t wakeref;
-
-	GEM_TRACE("\n");
-
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
-	/*
-	 * As we have just resumed the machine and woken the device up from
-	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
-	 * back to defaults, recovering from whatever wedged state we left it
-	 * in and so worth trying to use the device once more.
-	 */
-	if (intel_gt_is_wedged(&i915->gt))
-		intel_gt_unset_wedged(&i915->gt);
-
-	/*
-	 * If we inherit context state from the BIOS or earlier occupants
-	 * of the GPU, the GPU may be in an inconsistent state when we
-	 * try to take over. The only way to remove the earlier state
-	 * is by resetting. However, resetting on earlier gen is tricky as
-	 * it may impact the display and we are uncertain about the stability
-	 * of the reset, so this could be applied to even earlier gen.
-	 */
-	intel_gt_sanitize(&i915->gt, false);
-
-	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
 static int __intel_engines_record_defaults(struct intel_gt *gt)
 {
 	struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -1413,11 +1381,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 	WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
 }
 
-void i915_gem_init_mmio(struct drm_i915_private *i915)
-{
-	i915_gem_sanitize(i915);
-}
-
 static void i915_gem_init__mm(struct drm_i915_private *i915)
 {
 	spin_lock_init(&i915->mm.obj_lock);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e378543ed453..d83f6bf6d9d4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -124,7 +124,6 @@ static void pm_resume(struct drm_i915_private *i915)
 	 */
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
 		intel_gt_sanitize(&i915->gt, false);
-		i915_gem_sanitize(i915);
 
 		i915_gem_restore_gtt_mappings(i915);
 		i915_gem_restore_fences(&i915->ggtt);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a0da5944dd33..27ed3cee6a9b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -183,7 +183,6 @@ struct drm_i915_private *mock_gem_device(void)
 	intel_timelines_init(i915);
 
 	mock_init_ggtt(i915, &i915->ggtt);
-	i915->gt.ggtt = &i915->ggtt;
 
 	mkwrite_device_info(i915)->engine_mask = BIT(0);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 9ec93dc27fb5..20ac3844edec 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -118,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
 	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
-	intel_gt_init_hw_early(i915);
+	i915->gt.ggtt = ggtt;
 }
 
 void mock_fini_ggtt(struct i915_ggtt *ggtt)
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 02/11] drm/i915/gt: Call intel_gt_sanitize() directly
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Assume all responsibility for operating on the HW to sanitize the GT
state upon load/resume in intel_gt_sanitize() itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  5 ---
 drivers/gpu/drm/i915/gt/intel_gt.c            |  6 ++-
 drivers/gpu/drm/i915/gt/intel_gt.h            |  2 +-
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         | 36 +++++++++++++++++-
 drivers/gpu/drm/i915/i915_drv.c               |  8 +---
 drivers/gpu/drm/i915/i915_drv.h               |  2 -
 drivers/gpu/drm/i915/i915_gem.c               | 37 -------------------
 drivers/gpu/drm/i915/selftests/i915_gem.c     |  1 -
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  1 -
 drivers/gpu/drm/i915/selftests/mock_gtt.c     |  3 +-
 10 files changed, 43 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index c99bb94fe41e..e2ee9c04ece8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -74,7 +74,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	 * not rely on its state.
 	 */
 	intel_gt_suspend(&i915->gt);
-	intel_uc_suspend(&i915->gt.uc);
 
 	i915_gem_drain_freed_objects(i915);
 }
@@ -140,8 +139,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 		list_splice_tail(&keep, *phase);
 	}
 	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-
-	i915_gem_sanitize(i915);
 }
 
 void i915_gem_resume(struct drm_i915_private *i915)
@@ -161,8 +158,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
-	intel_uc_resume(&i915->gt.uc);
-
 	/* Always reload a context for powersaving. */
 	if (!switch_to_kernel_context_sync(&i915->gt))
 		goto err_wedged;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 898662c158ad..4c26daf7ee46 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -31,9 +31,11 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 	intel_uc_init_early(&gt->uc);
 }
 
-void intel_gt_init_hw_early(struct drm_i915_private *i915)
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
 {
-	i915->gt.ggtt = &i915->ggtt;
+	gt->ggtt = ggtt;
+
+	intel_gt_sanitize(gt, false);
 }
 
 static void init_unused_ring(struct intel_gt *gt, u32 base)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 5b6effed3713..5436f8c30708 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -28,7 +28,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
 }
 
 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
-void intel_gt_init_hw_early(struct drm_i915_private *i915);
+void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt);
 int __must_check intel_gt_init_hw(struct intel_gt *gt);
 int intel_gt_init(struct intel_gt *gt);
 void intel_gt_driver_register(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 32becf15d4e0..1679f4f78d65 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -118,8 +118,22 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	intel_wakeref_t wakeref;
 
-	GEM_TRACE("\n");
+	GEM_TRACE("force:%s\n", yesno(force));
+
+	/* Use a raw wakeref to avoid calling intel_display_power_get early */
+	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+
+	/*
+	 * As we have just resumed the machine and woken the device up from
+	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
+	 * back to defaults, recovering from whatever wedged state we left it
+	 * in and so worth trying to use the device once more.
+	 */
+	if (intel_gt_is_wedged(gt))
+		intel_gt_unset_wedged(gt);
 
 	intel_uc_sanitize(&gt->uc);
 
@@ -127,6 +141,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 		if (engine->reset.prepare)
 			engine->reset.prepare(engine);
 
+	intel_uc_reset_prepare(&gt->uc);
+
 	if (reset_engines(gt) || force) {
 		for_each_engine(engine, gt, id)
 			__intel_engine_reset(engine, false);
@@ -135,6 +151,9 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 	for_each_engine(engine, gt, id)
 		if (engine->reset.finish)
 			engine->reset.finish(engine);
+
+	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
+	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 }
 
 void intel_gt_pm_fini(struct intel_gt *gt)
@@ -148,6 +167,8 @@ int intel_gt_resume(struct intel_gt *gt)
 	enum intel_engine_id id;
 	int err = 0;
 
+	GEM_TRACE("\n");
+
 	/*
 	 * After resume, we may need to poke into the pinned kernel
 	 * contexts to paper over any damage caused by the sudden suspend.
@@ -186,6 +207,9 @@ int intel_gt_resume(struct intel_gt *gt)
 	}
 
 	intel_rc6_enable(&gt->rc6);
+
+	intel_uc_resume(&gt->uc);
+
 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 	intel_gt_pm_put(gt);
 
@@ -212,20 +236,30 @@ void intel_gt_suspend(struct intel_gt *gt)
 	/* We expect to be idle already; but also want to be independent */
 	wait_for_idle(gt);
 
+	intel_uc_suspend(&gt->uc);
+
 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
 		intel_rps_disable(&gt->rps);
 		intel_rc6_disable(&gt->rc6);
 		intel_llc_disable(&gt->llc);
 	}
+
+	intel_gt_sanitize(gt, false);
+
+	GEM_TRACE("\n");
 }
 
 void intel_gt_runtime_suspend(struct intel_gt *gt)
 {
 	intel_uc_runtime_suspend(&gt->uc);
+
+	GEM_TRACE("\n");
 }
 
 int intel_gt_runtime_resume(struct intel_gt *gt)
 {
+	GEM_TRACE("\n");
+
 	intel_gt_init_swizzling(gt);
 
 	return intel_uc_runtime_resume(&gt->uc);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 21273b516dbe..7e0c0e31955e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -603,8 +603,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_uncore;
 
-	i915_gem_init_mmio(dev_priv);
-
 	return 0;
 
 err_uncore:
@@ -1177,7 +1175,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 	if (ret)
 		goto err_ggtt;
 
-	intel_gt_init_hw_early(dev_priv);
+	intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret) {
@@ -1804,7 +1802,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
-	i915_gem_sanitize(dev_priv);
+	intel_gt_sanitize(&dev_priv->gt, true);
 
 	ret = i915_ggtt_enable_hw(dev_priv);
 	if (ret)
@@ -1935,8 +1933,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	intel_power_domains_resume(dev_priv);
 
-	intel_gt_sanitize(&dev_priv->gt, true);
-
 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..b5de8f928320 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1779,7 +1779,6 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
 /* i915_gem.c */
 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
-void i915_gem_sanitize(struct drm_i915_private *i915);
 void i915_gem_init_early(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
 int i915_gem_freeze(struct drm_i915_private *dev_priv);
@@ -1863,7 +1862,6 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
 	return atomic_read(&error->reset_engine_count[engine->uabi_class]);
 }
 
-void i915_gem_init_mmio(struct drm_i915_private *i915);
 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
 void i915_gem_driver_register(struct drm_i915_private *i915);
 void i915_gem_driver_unregister(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b1574ab104d7..ac23322fb382 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1039,38 +1039,6 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	return err;
 }
 
-void i915_gem_sanitize(struct drm_i915_private *i915)
-{
-	intel_wakeref_t wakeref;
-
-	GEM_TRACE("\n");
-
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
-
-	/*
-	 * As we have just resumed the machine and woken the device up from
-	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
-	 * back to defaults, recovering from whatever wedged state we left it
-	 * in and so worth trying to use the device once more.
-	 */
-	if (intel_gt_is_wedged(&i915->gt))
-		intel_gt_unset_wedged(&i915->gt);
-
-	/*
-	 * If we inherit context state from the BIOS or earlier occupants
-	 * of the GPU, the GPU may be in an inconsistent state when we
-	 * try to take over. The only way to remove the earlier state
-	 * is by resetting. However, resetting on earlier gen is tricky as
-	 * it may impact the display and we are uncertain about the stability
-	 * of the reset, so this could be applied to even earlier gen.
-	 */
-	intel_gt_sanitize(&i915->gt, false);
-
-	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-}
-
 static int __intel_engines_record_defaults(struct intel_gt *gt)
 {
 	struct i915_request *requests[I915_NUM_ENGINES] = {};
@@ -1413,11 +1381,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 	WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
 }
 
-void i915_gem_init_mmio(struct drm_i915_private *i915)
-{
-	i915_gem_sanitize(i915);
-}
-
 static void i915_gem_init__mm(struct drm_i915_private *i915)
 {
 	spin_lock_init(&i915->mm.obj_lock);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e378543ed453..d83f6bf6d9d4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -124,7 +124,6 @@ static void pm_resume(struct drm_i915_private *i915)
 	 */
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
 		intel_gt_sanitize(&i915->gt, false);
-		i915_gem_sanitize(i915);
 
 		i915_gem_restore_gtt_mappings(i915);
 		i915_gem_restore_fences(&i915->ggtt);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index a0da5944dd33..27ed3cee6a9b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -183,7 +183,6 @@ struct drm_i915_private *mock_gem_device(void)
 	intel_timelines_init(i915);
 
 	mock_init_ggtt(i915, &i915->ggtt);
-	i915->gt.ggtt = &i915->ggtt;
 
 	mkwrite_device_info(i915)->engine_mask = BIT(0);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 9ec93dc27fb5..20ac3844edec 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -118,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
 	ggtt->vm.vma_ops.clear_pages = clear_pages;
 
 	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
-
-	intel_gt_init_hw_early(i915);
+	i915->gt.ggtt = ggtt;
 }
 
 void mock_fini_ggtt(struct i915_ggtt *ggtt)
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 03/11] drm/i915/gem: Leave reloading kernel context on resume to GT
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

As we already do reload the kernel context in intel_gt_resume, repeating
that action inside i915_gem_resume() as well is redundant.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c | 30 --------------------------
 1 file changed, 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index e2ee9c04ece8..64dd04ae3d57 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,32 +11,6 @@
 
 #include "i915_drv.h"
 
-static bool switch_to_kernel_context_sync(struct intel_gt *gt)
-{
-	bool result = !intel_gt_is_wedged(gt);
-
-	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
-		/* XXX hide warning from gem_eio */
-		if (i915_modparams.reset) {
-			dev_err(gt->i915->drm.dev,
-				"Failed to idle engines, declaring wedged!\n");
-			GEM_TRACE_DUMP();
-		}
-
-		/*
-		 * Forcibly cancel outstanding work and leave
-		 * the gpu quiet.
-		 */
-		intel_gt_set_wedged(gt);
-		result = false;
-	}
-
-	if (intel_gt_pm_wait_for_idle(gt))
-		result = false;
-
-	return result;
-}
-
 static void user_forcewake(struct intel_gt *gt, bool suspend)
 {
 	int count = atomic_read(&gt->user_wakeref);
@@ -158,10 +132,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
-	/* Always reload a context for powersaving. */
-	if (!switch_to_kernel_context_sync(&i915->gt))
-		goto err_wedged;
-
 	user_forcewake(&i915->gt, false);
 
 out_unlock:
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 03/11] drm/i915/gem: Leave reloading kernel context on resume to GT
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

As we already do reload the kernel context in intel_gt_resume, repeating
that action inside i915_gem_resume() as well is redundant.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c | 30 --------------------------
 1 file changed, 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index e2ee9c04ece8..64dd04ae3d57 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,32 +11,6 @@
 
 #include "i915_drv.h"
 
-static bool switch_to_kernel_context_sync(struct intel_gt *gt)
-{
-	bool result = !intel_gt_is_wedged(gt);
-
-	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
-		/* XXX hide warning from gem_eio */
-		if (i915_modparams.reset) {
-			dev_err(gt->i915->drm.dev,
-				"Failed to idle engines, declaring wedged!\n");
-			GEM_TRACE_DUMP();
-		}
-
-		/*
-		 * Forcibly cancel outstanding work and leave
-		 * the gpu quiet.
-		 */
-		intel_gt_set_wedged(gt);
-		result = false;
-	}
-
-	if (intel_gt_pm_wait_for_idle(gt))
-		result = false;
-
-	return result;
-}
-
 static void user_forcewake(struct intel_gt *gt, bool suspend)
 {
 	int count = atomic_read(&gt->user_wakeref);
@@ -158,10 +132,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
-	/* Always reload a context for powersaving. */
-	if (!switch_to_kernel_context_sync(&i915->gt))
-		goto err_wedged;
-
 	user_forcewake(&i915->gt, false);
 
 out_unlock:
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 04/11] drm/i915/gt: Move user_forcewake application to GT
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

We already track the debugfs user_forcewake on the GT, so it is natural
to pull the suspend/resume handling under gt/ as well.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c | 22 ----------------------
 drivers/gpu/drm/i915/gt/intel_gt_pm.c  | 22 ++++++++++++++++++++++
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 64dd04ae3d57..6779ab34101b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,24 +11,6 @@
 
 #include "i915_drv.h"
 
-static void user_forcewake(struct intel_gt *gt, bool suspend)
-{
-	int count = atomic_read(&gt->user_wakeref);
-
-	/* Inside suspend/resume so single threaded, no races to worry about. */
-	if (likely(!count))
-		return;
-
-	intel_gt_pm_get(gt);
-	if (suspend) {
-		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
-		atomic_sub(count, &gt->wakeref.count);
-	} else {
-		atomic_add(count, &gt->wakeref.count);
-	}
-	intel_gt_pm_put(gt);
-}
-
 void i915_gem_suspend(struct drm_i915_private *i915)
 {
 	GEM_TRACE("\n");
@@ -36,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
 	flush_workqueue(i915->wq);
 
-	user_forcewake(&i915->gt, true);
-
 	/*
 	 * We have to flush all the executing contexts to main memory so
 	 * that they can saved in the hibernation image. To ensure the last
@@ -132,8 +112,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
-	user_forcewake(&i915->gt, false);
-
 out_unlock:
 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 	return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1679f4f78d65..11661de8c40b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -18,6 +18,24 @@
 #include "intel_rps.h"
 #include "intel_wakeref.h"
 
+static void user_forcewake(struct intel_gt *gt, bool suspend)
+{
+	int count = atomic_read(&gt->user_wakeref);
+
+	/* Inside suspend/resume so single threaded, no races to worry about. */
+	if (likely(!count))
+		return;
+
+	intel_gt_pm_get(gt);
+	if (suspend) {
+		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+		atomic_sub(count, &gt->wakeref.count);
+	} else {
+		atomic_add(count, &gt->wakeref.count);
+	}
+	intel_gt_pm_put(gt);
+}
+
 static int __gt_unpark(struct intel_wakeref *wf)
 {
 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -210,6 +228,8 @@ int intel_gt_resume(struct intel_gt *gt)
 
 	intel_uc_resume(&gt->uc);
 
+	user_forcewake(gt, false);
+
 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 	intel_gt_pm_put(gt);
 
@@ -233,6 +253,8 @@ void intel_gt_suspend(struct intel_gt *gt)
 {
 	intel_wakeref_t wakeref;
 
+	user_forcewake(gt, true);
+
 	/* We expect to be idle already; but also want to be independent */
 	wait_for_idle(gt);
 
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 04/11] drm/i915/gt: Move user_forcewake application to GT
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

We already track the debugfs user_forcewake on the GT, so it is natural
to pull the suspend/resume handling under gt/ as well.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c | 22 ----------------------
 drivers/gpu/drm/i915/gt/intel_gt_pm.c  | 22 ++++++++++++++++++++++
 2 files changed, 22 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 64dd04ae3d57..6779ab34101b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -11,24 +11,6 @@
 
 #include "i915_drv.h"
 
-static void user_forcewake(struct intel_gt *gt, bool suspend)
-{
-	int count = atomic_read(&gt->user_wakeref);
-
-	/* Inside suspend/resume so single threaded, no races to worry about. */
-	if (likely(!count))
-		return;
-
-	intel_gt_pm_get(gt);
-	if (suspend) {
-		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
-		atomic_sub(count, &gt->wakeref.count);
-	} else {
-		atomic_add(count, &gt->wakeref.count);
-	}
-	intel_gt_pm_put(gt);
-}
-
 void i915_gem_suspend(struct drm_i915_private *i915)
 {
 	GEM_TRACE("\n");
@@ -36,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
 	flush_workqueue(i915->wq);
 
-	user_forcewake(&i915->gt, true);
-
 	/*
 	 * We have to flush all the executing contexts to main memory so
 	 * that they can saved in the hibernation image. To ensure the last
@@ -132,8 +112,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
-	user_forcewake(&i915->gt, false);
-
 out_unlock:
 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 	return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 1679f4f78d65..11661de8c40b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -18,6 +18,24 @@
 #include "intel_rps.h"
 #include "intel_wakeref.h"
 
+static void user_forcewake(struct intel_gt *gt, bool suspend)
+{
+	int count = atomic_read(&gt->user_wakeref);
+
+	/* Inside suspend/resume so single threaded, no races to worry about. */
+	if (likely(!count))
+		return;
+
+	intel_gt_pm_get(gt);
+	if (suspend) {
+		GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
+		atomic_sub(count, &gt->wakeref.count);
+	} else {
+		atomic_add(count, &gt->wakeref.count);
+	}
+	intel_gt_pm_put(gt);
+}
+
 static int __gt_unpark(struct intel_wakeref *wf)
 {
 	struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
@@ -210,6 +228,8 @@ int intel_gt_resume(struct intel_gt *gt)
 
 	intel_uc_resume(&gt->uc);
 
+	user_forcewake(gt, false);
+
 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 	intel_gt_pm_put(gt);
 
@@ -233,6 +253,8 @@ void intel_gt_suspend(struct intel_gt *gt)
 {
 	intel_wakeref_t wakeref;
 
+	user_forcewake(gt, true);
+
 	/* We expect to be idle already; but also want to be independent */
 	wait_for_idle(gt);
 
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 05/11] drm/i915: Defer rc6 shutdown to suspend_late
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Currently we shutdown rc6 during i915_gem_resume() but this is called
during the preparation phase (i915_drm_prepare) for all suspend paths,
but we only want to shutdown rc6 for S3+. Move the actual shutdown to
i915_gem_suspend_late().

We then need to differentiate between suspend targets, to distinguish S0
(s2idle) where the device is kept awake but needs to be in a low power
mode (the same as runtime suspend) from the device suspend levels where
we lose control of HW and so must disable any HW access to dangling
memory.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111909
Fixes: c113236718e8 ("drm/i915: Extract GT render sleep (rc6) management")
Testcase: igt/gem_exec_suspend/power-S0
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  4 ++-
 drivers/gpu/drm/i915/gt/intel_gt_pm.c    | 39 ++++++++++++++++++++----
 drivers/gpu/drm/i915/gt/intel_gt_pm.h    |  3 +-
 drivers/gpu/drm/i915/gt/intel_rc6.c      |  5 +++
 drivers/gpu/drm/i915/gt/selftest_gt_pm.c |  2 +-
 5 files changed, 44 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 6779ab34101b..f88ee1317bb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	 * state. Fortunately, the kernel_context is disposable and we do
 	 * not rely on its state.
 	 */
-	intel_gt_suspend(&i915->gt);
+	intel_gt_suspend_prepare(&i915->gt);
 
 	i915_gem_drain_freed_objects(i915);
 }
@@ -69,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 	 * machine in an unusable condition.
 	 */
 
+	intel_gt_suspend_late(&i915->gt);
+
 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
 	for (phase = phases; *phase; phase++) {
 		LIST_HEAD(keep);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 11661de8c40b..b60b2dfb4fea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -4,6 +4,8 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include <linux/suspend.h>
+
 #include "i915_drv.h"
 #include "i915_globals.h"
 #include "i915_params.h"
@@ -236,8 +238,13 @@ int intel_gt_resume(struct intel_gt *gt)
 	return err;
 }
 
-static void wait_for_idle(struct intel_gt *gt)
+void intel_gt_suspend_prepare(struct intel_gt *gt)
 {
+	if (!intel_gt_pm_is_awake(gt))
+		return;
+
+	user_forcewake(gt, true);
+
 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
 		/*
 		 * Forcibly cancel outstanding work and leave
@@ -247,18 +254,38 @@ static void wait_for_idle(struct intel_gt *gt)
 	}
 
 	intel_gt_pm_wait_for_idle(gt);
+
+	intel_uc_suspend(&gt->uc);
 }
 
-void intel_gt_suspend(struct intel_gt *gt)
+static suspend_state_t pm_suspend_target(void)
 {
-	intel_wakeref_t wakeref;
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+	return pm_suspend_target_state;
+#else
+	return PM_SUSPEND_TO_IDLE;
+#endif
+}
 
-	user_forcewake(gt, true);
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+	intel_wakeref_t wakeref;
 
 	/* We expect to be idle already; but also want to be independent */
-	wait_for_idle(gt);
+	intel_gt_suspend_prepare(gt);
 
-	intel_uc_suspend(&gt->uc);
+	/*
+	 * On disabling the device, we want to turn off HW access to memory
+	 * that we no longer own.
+	 *
+	 * However, not all suspend-states disable the device. S0 (s2idle)
+	 * is effectively runtime-suspend, the device is left powered on
+	 * but needs to be put into a low power state. We need to keep
+	 * powermanagement enabled, but we also retain system state and so
+	 * it remains safe to keep on using our allocated memory.
+	 */
+	if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+		return;
 
 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
 		intel_rps_disable(&gt->rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index d924c984c74d..b3e17399be9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -43,8 +43,9 @@ void intel_gt_pm_fini(struct intel_gt *gt);
 
 void intel_gt_sanitize(struct intel_gt *gt, bool force);
 
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
 int intel_gt_resume(struct intel_gt *gt);
-void intel_gt_suspend(struct intel_gt *gt);
 
 void intel_gt_runtime_suspend(struct intel_gt *gt);
 int intel_gt_runtime_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 70f0e01a38b9..5ad4a92a9582 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -525,6 +525,11 @@ void intel_rc6_init(struct intel_rc6 *rc6)
 
 void intel_rc6_sanitize(struct intel_rc6 *rc6)
 {
+	if (rc6->enabled) { /* unbalanced suspend/resume */
+		rpm_get(rc6);
+		rc6->enabled = false;
+	}
+
 	if (rc6->supported)
 		__intel_rc6_disable(rc6);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 5d429037cdad..3d4e6a008af8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -15,7 +15,7 @@ static int live_gt_resume(void *arg)
 
 	/* Do several suspend/resume cycles to check we don't explode! */
 	do {
-		intel_gt_suspend(gt);
+		intel_gt_suspend_late(gt);
 
 		if (gt->rc6.enabled) {
 			pr_err("rc6 still enabled after suspend!\n");
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 05/11] drm/i915: Defer rc6 shutdown to suspend_late
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Currently we shutdown rc6 during i915_gem_resume() but this is called
during the preparation phase (i915_drm_prepare) for all suspend paths,
but we only want to shutdown rc6 for S3+. Move the actual shutdown to
i915_gem_suspend_late().

We then need to differentiate between suspend targets, to distinguish S0
(s2idle) where the device is kept awake but needs to be in a low power
mode (the same as runtime suspend) from the device suspend levels where
we lose control of HW and so must disable any HW access to dangling
memory.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111909
Fixes: c113236718e8 ("drm/i915: Extract GT render sleep (rc6) management")
Testcase: igt/gem_exec_suspend/power-S0
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Andi Shyti <andi.shyti@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c   |  4 ++-
 drivers/gpu/drm/i915/gt/intel_gt_pm.c    | 39 ++++++++++++++++++++----
 drivers/gpu/drm/i915/gt/intel_gt_pm.h    |  3 +-
 drivers/gpu/drm/i915/gt/intel_rc6.c      |  5 +++
 drivers/gpu/drm/i915/gt/selftest_gt_pm.c |  2 +-
 5 files changed, 44 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 6779ab34101b..f88ee1317bb4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	 * state. Fortunately, the kernel_context is disposable and we do
 	 * not rely on its state.
 	 */
-	intel_gt_suspend(&i915->gt);
+	intel_gt_suspend_prepare(&i915->gt);
 
 	i915_gem_drain_freed_objects(i915);
 }
@@ -69,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 	 * machine in an unusable condition.
 	 */
 
+	intel_gt_suspend_late(&i915->gt);
+
 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
 	for (phase = phases; *phase; phase++) {
 		LIST_HEAD(keep);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 11661de8c40b..b60b2dfb4fea 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -4,6 +4,8 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include <linux/suspend.h>
+
 #include "i915_drv.h"
 #include "i915_globals.h"
 #include "i915_params.h"
@@ -236,8 +238,13 @@ int intel_gt_resume(struct intel_gt *gt)
 	return err;
 }
 
-static void wait_for_idle(struct intel_gt *gt)
+void intel_gt_suspend_prepare(struct intel_gt *gt)
 {
+	if (!intel_gt_pm_is_awake(gt))
+		return;
+
+	user_forcewake(gt, true);
+
 	if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
 		/*
 		 * Forcibly cancel outstanding work and leave
@@ -247,18 +254,38 @@ static void wait_for_idle(struct intel_gt *gt)
 	}
 
 	intel_gt_pm_wait_for_idle(gt);
+
+	intel_uc_suspend(&gt->uc);
 }
 
-void intel_gt_suspend(struct intel_gt *gt)
+static suspend_state_t pm_suspend_target(void)
 {
-	intel_wakeref_t wakeref;
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+	return pm_suspend_target_state;
+#else
+	return PM_SUSPEND_TO_IDLE;
+#endif
+}
 
-	user_forcewake(gt, true);
+void intel_gt_suspend_late(struct intel_gt *gt)
+{
+	intel_wakeref_t wakeref;
 
 	/* We expect to be idle already; but also want to be independent */
-	wait_for_idle(gt);
+	intel_gt_suspend_prepare(gt);
 
-	intel_uc_suspend(&gt->uc);
+	/*
+	 * On disabling the device, we want to turn off HW access to memory
+	 * that we no longer own.
+	 *
+	 * However, not all suspend-states disable the device. S0 (s2idle)
+	 * is effectively runtime-suspend, the device is left powered on
+	 * but needs to be put into a low power state. We need to keep
+	 * powermanagement enabled, but we also retain system state and so
+	 * it remains safe to keep on using our allocated memory.
+	 */
+	if (pm_suspend_target() == PM_SUSPEND_TO_IDLE)
+		return;
 
 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
 		intel_rps_disable(&gt->rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index d924c984c74d..b3e17399be9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -43,8 +43,9 @@ void intel_gt_pm_fini(struct intel_gt *gt);
 
 void intel_gt_sanitize(struct intel_gt *gt, bool force);
 
+void intel_gt_suspend_prepare(struct intel_gt *gt);
+void intel_gt_suspend_late(struct intel_gt *gt);
 int intel_gt_resume(struct intel_gt *gt);
-void intel_gt_suspend(struct intel_gt *gt);
 
 void intel_gt_runtime_suspend(struct intel_gt *gt);
 int intel_gt_runtime_resume(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 70f0e01a38b9..5ad4a92a9582 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -525,6 +525,11 @@ void intel_rc6_init(struct intel_rc6 *rc6)
 
 void intel_rc6_sanitize(struct intel_rc6 *rc6)
 {
+	if (rc6->enabled) { /* unbalanced suspend/resume */
+		rpm_get(rc6);
+		rc6->enabled = false;
+	}
+
 	if (rc6->supported)
 		__intel_rc6_disable(rc6);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 5d429037cdad..3d4e6a008af8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -15,7 +15,7 @@ static int live_gt_resume(void *arg)
 
 	/* Do several suspend/resume cycles to check we don't explode! */
 	do {
-		intel_gt_suspend(gt);
+		intel_gt_suspend_late(gt);
 
 		if (gt->rc6.enabled) {
 			pr_err("rc6 still enabled after suspend!\n");
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 06/11] drm/i915: Drop GEM context as a direct link from i915_request
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Keep the intel_context as being the primary state for i915_request, with
the GEM context a backpointer from the low level state for the rarer
cases we need client information. Our goal is to remove such references
to clients from the backend, and leave the HW submission agnostic to
client interfaces and self-contained.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 15 +++----
 drivers/gpu/drm/i915/gem/i915_gem_context.h   | 38 ----------------
 .../gpu/drm/i915/gem/i915_gem_context_types.h |  7 +--
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  8 ++--
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c   |  4 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |  2 +-
 drivers/gpu/drm/i915/gt/intel_context.h       | 42 ++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  6 +--
 drivers/gpu/drm/i915/gt/intel_lrc.c           | 44 +++++++++----------
 drivers/gpu/drm/i915/gt/intel_reset.c         | 38 +++++++++-------
 .../gpu/drm/i915/gt/intel_ring_submission.c   | 10 ++---
 drivers/gpu/drm/i915/gt/selftest_lrc.c        | 20 ++++-----
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  6 +--
 drivers/gpu/drm/i915/gvt/scheduler.c          | 27 +++++-------
 drivers/gpu/drm/i915/i915_gem.c               |  6 +--
 drivers/gpu/drm/i915/i915_gpu_error.c         | 11 +++--
 drivers/gpu/drm/i915/i915_perf.c              |  4 +-
 drivers/gpu/drm/i915/i915_request.c           | 18 ++++----
 drivers/gpu/drm/i915/i915_request.h           |  3 +-
 drivers/gpu/drm/i915/i915_scheduler.c         |  2 +-
 21 files changed, 160 insertions(+), 156 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index cbdf2fb32636..412d6c7949b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,6 +69,7 @@
 
 #include <drm/i915_drm.h>
 
+#include "gt/intel_context.h"
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_lrc_reg.h"
@@ -365,15 +366,6 @@ static void kill_context(struct i915_gem_context *ctx)
 	struct i915_gem_engines_iter it;
 	struct intel_context *ce;
 
-	/*
-	 * If we are already banned, it was due to a guilty request causing
-	 * a reset and the entire context being evicted from the GPU.
-	 */
-	if (i915_gem_context_is_banned(ctx))
-		return;
-
-	i915_gem_context_set_banned(ctx);
-
 	/*
 	 * Map the user's engine back to the actual engines; one virtual
 	 * engine will be mapped to multiple engines, and using ctx->engine[]
@@ -385,6 +377,9 @@ static void kill_context(struct i915_gem_context *ctx)
 		struct intel_engine_cs *engine;
 		struct dma_fence *fence;
 
+		if (!intel_context_set_banned(ce))
+			continue;
+
 		if (!ce->timeline)
 			continue;
 
@@ -1026,7 +1021,7 @@ static void set_ppgtt_barrier(void *data)
 
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
-	struct i915_address_space *vm = rq->hw_context->vm;
+	struct i915_address_space *vm = rq->context->vm;
 	struct intel_engine_cs *engine = rq->engine;
 	u32 base = engine->mmio_base;
 	u32 *cs;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18e50a769a6e..69932899803e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -91,26 +91,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c
 	clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
 }
 
-static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
-{
-	set_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
-{
-	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
 static inline bool
 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
 {
@@ -129,24 +109,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 }
 
-static inline bool
-i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
-{
-	set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
-{
-	clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 {
 	return !ctx->file_priv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 861d7d92fe9f..39628cff8f06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -143,11 +143,8 @@ struct i915_gem_context {
 	 * @flags: small set of booleans
 	 */
 	unsigned long flags;
-#define CONTEXT_BANNED			0
-#define CONTEXT_CLOSED			1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
-#define CONTEXT_USER_ENGINES		3
-#define CONTEXT_NOPREEMPT		4
+#define CONTEXT_CLOSED			0
+#define CONTEXT_USER_ENGINES		1
 
 	struct mutex mutex;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index e4f5c269150a..6dc86c129eee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -746,9 +746,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	unsigned int i, batch;
 	int err;
 
-	if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
-		return -EIO;
-
 	INIT_LIST_HEAD(&eb->relocs);
 	INIT_LIST_HEAD(&eb->unbound);
 
@@ -2078,7 +2075,7 @@ static int eb_submit(struct i915_execbuffer *eb)
 	if (err)
 		return err;
 
-	if (i915_gem_context_nopreempt(eb->gem_context))
+	if (intel_context_nopreempt(eb->context))
 		eb->request->flags |= I915_REQUEST_NOPREEMPT;
 
 	return 0;
@@ -2164,6 +2161,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
 	if (err)
 		return err;
 
+	if (unlikely(intel_context_is_banned(ce)))
+		return -EIO;
+
 	/*
 	 * Pinning the contexts may generate requests in order to acquire
 	 * GGTT space, so do this first before we reserve a seqno for
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 55317081d48b..2f11bee638d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -271,7 +271,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
 
 	if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
 		struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-		struct intel_context *ce = rq->hw_context;
+		struct intel_context *ce = rq->context;
 		struct list_head *pos;
 
 		spin_lock(&b->irq_lock);
@@ -326,7 +326,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
 	 */
 	spin_lock(&b->irq_lock);
 	if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
-		struct intel_context *ce = rq->hw_context;
+		struct intel_context *ce = rq->context;
 
 		list_del(&rq->signal_link);
 		if (list_empty(&ce->signals))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ee9d2bcd2c13..625f75f7825d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -307,7 +307,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 	int err;
 
 	/* Only suitable for use in remotely modifying this context */
-	GEM_BUG_ON(rq->hw_context == ce);
+	GEM_BUG_ON(rq->context == ce);
 
 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
 		err = mutex_lock_interruptible_nested(&tl->mutex,
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 68b3d317d959..1e607343d256 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -7,7 +7,9 @@
 #ifndef __INTEL_CONTEXT_H__
 #define __INTEL_CONTEXT_H__
 
+#include <linux/bitops.h>
 #include <linux/lockdep.h>
+#include <linux/types.h>
 
 #include "i915_active.h"
 #include "intel_context_types.h"
@@ -153,4 +155,44 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
 	return u64_to_ptr(struct intel_ring, sz);
 }
 
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+	return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+	set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+	clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
 #endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 6959b05ae5f8..0251edc8f568 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -54,7 +54,10 @@ struct intel_context {
 	struct intel_timeline *timeline;
 
 	unsigned long flags;
-#define CONTEXT_ALLOC_BIT 0
+#define CONTEXT_ALLOC_BIT		0
+#define CONTEXT_BANNED			1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
+#define CONTEXT_NOPREEMPT		3
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f8113bc756c6..efde3f8e0559 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1513,9 +1513,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 		print_request_ring(m, rq);
 
-		if (rq->hw_context->lrc_reg_state) {
+		if (rq->context->lrc_reg_state) {
 			drm_printf(m, "Logical Ring Context:\n");
-			hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
 		}
 	}
 	spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -1576,7 +1576,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 
 		for (port = execlists->pending; (rq = *port); port++) {
 			/* Exclude any contexts already counted in active */
-			if (!intel_context_inflight_count(rq->hw_context))
+			if (!intel_context_inflight_count(rq->context))
 				engine->stats.active++;
 		}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 6fb3def5ba16..5b23aeb50385 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -888,7 +888,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			list_move(&rq->sched.link, pl);
 			active = rq;
 		} else {
-			struct intel_engine_cs *owner = rq->hw_context->engine;
+			struct intel_engine_cs *owner = rq->context->engine;
 
 			/*
 			 * Decouple the virtual breadcrumb before moving it
@@ -995,7 +995,7 @@ static inline struct intel_engine_cs *
 __execlists_schedule_in(struct i915_request *rq)
 {
 	struct intel_engine_cs * const engine = rq->engine;
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 
 	intel_context_get(ce);
 
@@ -1021,7 +1021,7 @@ __execlists_schedule_in(struct i915_request *rq)
 static inline struct i915_request *
 execlists_schedule_in(struct i915_request *rq, int idx)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *old;
 
 	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
@@ -1064,7 +1064,7 @@ static void restore_default_state(struct intel_context *ce,
 static void reset_active(struct i915_request *rq,
 			 struct intel_engine_cs *engine)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	u32 head;
 
 	/*
@@ -1105,13 +1105,13 @@ static inline void
 __execlists_schedule_out(struct i915_request *rq,
 			 struct intel_engine_cs * const engine)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 
 	intel_engine_context_out(engine);
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 	intel_gt_pm_put(engine->gt);
 
-	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+	if (unlikely(intel_context_is_banned(ce)))
 		reset_active(rq, engine);
 
 	/*
@@ -1132,7 +1132,7 @@ __execlists_schedule_out(struct i915_request *rq,
 static inline void
 execlists_schedule_out(struct i915_request *rq)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *cur, *old;
 
 	trace_i915_request_out(rq);
@@ -1149,7 +1149,7 @@ execlists_schedule_out(struct i915_request *rq)
 
 static u64 execlists_update_context(const struct i915_request *rq)
 {
-	struct intel_context *ce = rq->hw_context;
+	struct intel_context *ce = rq->context;
 	u64 desc;
 
 	ce->lrc_reg_state[CTX_RING_TAIL] =
@@ -1236,13 +1236,13 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 	}
 
 	for (port = execlists->pending; (rq = *port); port++) {
-		if (ce == rq->hw_context) {
+		if (ce == rq->context) {
 			GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
 				      port - execlists->pending);
 			return false;
 		}
 
-		ce = rq->hw_context;
+		ce = rq->context;
 		if (i915_request_completed(rq))
 			continue;
 
@@ -1307,7 +1307,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 static bool ctx_single_port_submission(const struct intel_context *ce)
 {
 	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-		i915_gem_context_force_single_submission(ce->gem_context));
+		intel_context_force_single_submission(ce));
 }
 
 static bool can_merge_ctx(const struct intel_context *prev,
@@ -1343,7 +1343,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 		     (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
 		return false;
 
-	if (!can_merge_ctx(prev->hw_context, next->hw_context))
+	if (!can_merge_ctx(prev->context, next->context))
 		return false;
 
 	return true;
@@ -1530,7 +1530,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
 		return 0;
 
 	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
-	if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+	if (unlikely(intel_context_is_banned(rq->context)))
 		return 1;
 
 	return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1638,7 +1638,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * tendency to ignore us rewinding the TAIL to the
 			 * end of an earlier request.
 			 */
-			last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+			last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 			last = NULL;
 		} else if (need_timeslice(engine, last) &&
 			   timer_expired(&engine->execlists.timer)) {
@@ -1720,7 +1720,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 		GEM_BUG_ON(rq != ve->request);
 		GEM_BUG_ON(rq->engine != &ve->base);
-		GEM_BUG_ON(rq->hw_context != &ve->context);
+		GEM_BUG_ON(rq->context != &ve->context);
 
 		if (rq_prio(rq) >= queue_prio(execlists)) {
 			if (!virtual_matches(ve, rq, engine)) {
@@ -1839,7 +1839,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * same LRCA, i.e. we must submit 2 different
 				 * contexts if we submit 2 ELSP.
 				 */
-				if (last->hw_context == rq->hw_context)
+				if (last->context == rq->context)
 					goto done;
 
 				if (i915_request_has_sentinel(last))
@@ -1852,8 +1852,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * the same context (even though a different
 				 * request) to the second port.
 				 */
-				if (ctx_single_port_submission(last->hw_context) ||
-				    ctx_single_port_submission(rq->hw_context))
+				if (ctx_single_port_submission(last->context) ||
+				    ctx_single_port_submission(rq->context))
 					goto done;
 
 				merge = false;
@@ -1867,8 +1867,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				}
 
 				GEM_BUG_ON(last &&
-					   !can_merge_ctx(last->hw_context,
-							  rq->hw_context));
+					   !can_merge_ctx(last->context,
+							  rq->context));
 
 				submit = true;
 				last = rq;
@@ -2484,7 +2484,7 @@ static int execlists_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
@@ -2995,7 +2995,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	/* We still have requests in-flight; the engine should be active */
 	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
-	ce = rq->hw_context;
+	ce = rq->context;
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
 	if (i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index f03e000051c1..b4c0ad4a6081 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,27 +40,31 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
 static void engine_skip_context(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
-	struct i915_gem_context *hung_ctx = rq->gem_context;
+	struct intel_context *hung_ctx = rq->context;
 
 	if (!i915_request_is_active(rq))
-		return;
+		rq = list_first_entry(&engine->active.requests,
+				      typeof(*rq), sched.link);
 
 	lockdep_assert_held(&engine->active.lock);
 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
-		if (rq->gem_context == hung_ctx)
+		if (rq->context == hung_ctx)
 			i915_request_skip(rq, -EIO);
 }
 
-static void client_mark_guilty(struct drm_i915_file_private *file_priv,
-			       const struct i915_gem_context *ctx)
+static void client_mark_guilty(struct i915_request *rq, bool banned)
 {
-	unsigned int score;
+	struct i915_gem_context *ctx = rq->context->gem_context;
+	struct drm_i915_file_private *file_priv = ctx->file_priv;
 	unsigned long prev_hang;
+	unsigned int score;
 
-	if (i915_gem_context_is_banned(ctx))
+	if (IS_ERR_OR_NULL(file_priv))
+		return;
+
+	score = 0;
+	if (banned)
 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
-	else
-		score = 0;
 
 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
@@ -75,8 +79,9 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
 	}
 }
 
-static bool context_mark_guilty(struct i915_gem_context *ctx)
+static bool mark_guilty(struct i915_request *rq)
 {
+	struct i915_gem_context *ctx = rq->context->gem_context;
 	unsigned long prev_hang;
 	bool banned;
 	int i;
@@ -100,18 +105,17 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
 	if (banned) {
 		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
 				 ctx->name, atomic_read(&ctx->guilty_count));
-		i915_gem_context_set_banned(ctx);
+		intel_context_set_banned(rq->context);
 	}
 
-	if (!IS_ERR_OR_NULL(ctx->file_priv))
-		client_mark_guilty(ctx->file_priv, ctx);
+	client_mark_guilty(rq, banned);
 
 	return banned;
 }
 
-static void context_mark_innocent(struct i915_gem_context *ctx)
+static void mark_innocent(struct i915_request *rq)
 {
-	atomic_inc(&ctx->active_count);
+	atomic_inc(&rq->context->gem_context->active_count);
 }
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
@@ -126,11 +130,11 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
 
 	if (guilty) {
 		i915_request_skip(rq, -EIO);
-		if (context_mark_guilty(rq->gem_context))
+		if (mark_guilty(rq))
 			engine_skip_context(rq);
 	} else {
 		dma_fence_set_error(&rq->fence, -EAGAIN);
-		context_mark_innocent(rq->gem_context);
+		mark_innocent(rq);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a47d5a7c32c9..1c2f3c3a8840 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1485,7 +1485,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
+	*cs++ = i915_ggtt_offset(rq->context->state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1555,7 +1555,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
 
 static int remap_l3(struct i915_request *rq)
 {
-	struct i915_gem_context *ctx = rq->gem_context;
+	struct i915_gem_context *ctx = rq->context->gem_context;
 	int i, err;
 
 	if (!ctx->remap_slice)
@@ -1576,7 +1576,7 @@ static int remap_l3(struct i915_request *rq)
 
 static int switch_context(struct i915_request *rq)
 {
-	struct intel_context *ce = rq->hw_context;
+	struct intel_context *ce = rq->context;
 	struct i915_address_space *vm = vm_alias(ce);
 	int ret;
 
@@ -1601,7 +1601,7 @@ static int switch_context(struct i915_request *rq)
 		 * is purely used for flushing user contexts.
 		 */
 		hw_flags = 0;
-		if (i915_gem_context_is_kernel(rq->gem_context))
+		if (i915_gem_context_is_kernel(rq->context->gem_context))
 			hw_flags = MI_RESTORE_INHIBIT;
 
 		ret = mi_set_context(rq, hw_flags);
@@ -1648,7 +1648,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
 	GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 5e8c365548f0..95504c48cd99 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1187,13 +1187,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq = spinner_create_request(&arg->a.spin,
 				    arg->a.ctx, arg->engine,
 				    MI_ARB_CHECK);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
 	i915_request_get(rq);
 	i915_request_add(rq);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1201,7 +1201,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1236,13 +1236,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq[0] = spinner_create_request(&arg->a.spin,
 				       arg->a.ctx, arg->engine,
 				       MI_NOOP); /* no preemption */
 	if (IS_ERR(rq[0]))
 		return PTR_ERR(rq[0]);
 
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
 	i915_request_get(rq[0]);
 	i915_request_add(rq[0]);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1250,7 +1250,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
 	rq[1] = spinner_create_request(&arg->b.spin,
 				       arg->b.ctx, arg->engine,
 				       MI_ARB_CHECK);
@@ -1259,13 +1258,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
 	i915_request_get(rq[1]);
 	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 	i915_request_add(rq[1]);
 	if (err)
 		goto out;
 
-	i915_gem_context_set_banned(arg->b.ctx);
+	intel_context_set_banned(rq[1]->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1308,13 +1308,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq[0] = spinner_create_request(&arg->a.spin,
 				       arg->a.ctx, arg->engine,
 				       MI_ARB_CHECK);
 	if (IS_ERR(rq[0]))
 		return PTR_ERR(rq[0]);
 
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
 	i915_request_get(rq[0]);
 	i915_request_add(rq[0]);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1322,13 +1322,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
 	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
 	if (IS_ERR(rq[1])) {
 		err = PTR_ERR(rq[1]);
 		goto out;
 	}
 
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
 	i915_request_get(rq[1]);
 	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 	i915_request_add(rq[1]);
@@ -1349,7 +1349,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 	if (err)
 		goto out;
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq[2]->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1396,13 +1396,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 		return 0;
 
 	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq = spinner_create_request(&arg->a.spin,
 				    arg->a.ctx, arg->engine,
 				    MI_NOOP); /* preemption disabled */
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
 	i915_request_get(rq);
 	i915_request_add(rq);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1410,7 +1410,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq->context);
 	err = intel_engine_pulse(arg->engine); /* force reset */
 	if (err)
 		goto out;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2498c55e0ea5..08267d2c3cfc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -471,7 +471,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
 	struct intel_guc_client *client = guc->execbuf_client;
 	struct intel_engine_cs *engine = rq->engine;
-	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
+	u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
 	guc_wq_item_append(client, engine->guc_id, ctx_desc,
@@ -572,7 +572,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
-			if (last && rq->hw_context != last->hw_context) {
+			if (last && rq->context != last->context) {
 				if (port == last_port)
 					goto done;
 
@@ -677,7 +677,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
 		stalled = false;
 
 	__i915_request_reset(rq, stalled);
-	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+	intel_lr_context_reset(engine, rq->context, rq->head, stalled);
 
 out_unlock:
 	spin_unlock_irqrestore(&engine->active.lock, flags);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 377811f8853f..976d6950218e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer(
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
 	struct drm_i915_gem_object *ctx_obj =
-		workload->req->hw_context->state->obj;
+		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	struct intel_gvt *gvt = vgpu->gvt;
 	int ring_id = workload->ring_id;
 	struct drm_i915_gem_object *ctx_obj =
-		workload->req->hw_context->state->obj;
+		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *dst;
@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	return 0;
 }
 
-static inline bool is_gvt_request(struct i915_request *req)
+static inline bool is_gvt_request(struct i915_request *rq)
 {
-	return i915_gem_context_force_single_submission(req->gem_context);
+	return intel_context_force_single_submission(rq->context);
 }
 
 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	u32 *cs;
 	int err;
 
-	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
+	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
 		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/*
@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 }
 
 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
-					  struct i915_gem_context *ctx)
+					  struct intel_context *ce)
 {
 	struct intel_vgpu_mm *mm = workload->shadow_mm;
-	struct i915_ppgtt *ppgtt =
-		i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
 	int i = 0;
 
 	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
 			px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
 		}
 	}
-
-	i915_vm_put(&ppgtt->vm);
 }
 
 static int
@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
 	struct i915_request *rq = workload->req;
 	struct execlist_ring_context *shadow_ring_context =
-		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
+		(struct execlist_ring_context *)rq->context->lrc_reg_state;
 
 	shadow_ring_context->bb_per_ctx_ptr.val =
 		(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 
 	update_shadow_pdps(workload);
 
-	set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+	set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
 
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	if (ret) {
@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	struct i915_request *rq = workload->req;
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
+	struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *src;
@@ -1232,8 +1229,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	i915_gem_context_set_force_single_submission(ctx);
-
 	ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
 	i915_context_ppgtt_root_save(s, ppgtt);
 
@@ -1249,6 +1244,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 			goto out_shadow_ctx;
 		}
 
+		intel_context_set_single_submission(ce);
+
 		if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
 			const unsigned int ring_size = 512 * SZ_4K;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ac23322fb382..fe348ff7db4f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1108,9 +1108,9 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
 			continue;
 
 		/* We want to be able to unbind the state from the GGTT */
-		GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+		GEM_BUG_ON(intel_context_is_pinned(rq->context));
 
-		state = rq->hw_context->state;
+		state = rq->context->state;
 		if (!state)
 			continue;
 
@@ -1162,7 +1162,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
 		if (!rq)
 			continue;
 
-		ce = rq->hw_context;
+		ce = rq->context;
 		i915_request_put(rq);
 		intel_context_put(ce);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e8b67f5e521d..b40157cfae14 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1220,7 +1220,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 static void record_request(const struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
-	const struct i915_gem_context *ctx = request->gem_context;
+	const struct i915_gem_context *ctx = request->context->gem_context;
 
 	erq->flags = request->fence.flags;
 	erq->context = request->fence.context;
@@ -1232,7 +1232,7 @@ static void record_request(const struct i915_request *request,
 	erq->tail = request->tail;
 
 	rcu_read_lock();
-	erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
+	erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
 	rcu_read_unlock();
 }
 
@@ -1300,7 +1300,10 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
 static bool record_context(struct drm_i915_error_context *e,
 			   const struct i915_request *rq)
 {
-	const struct i915_gem_context *ctx = rq->gem_context;
+	const struct i915_gem_context *ctx = rq->context->gem_context;
+
+	if (!ctx)
+		return false;
 
 	if (ctx->pid) {
 		struct task_struct *task;
@@ -1454,7 +1457,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
 		capture = request_record_user_bo(request, ee, capture);
 
 		capture = capture_vma(capture,
-				      request->hw_context->state,
+				      request->context->state,
 				      &ee->ctx);
 
 		capture = capture_vma(capture,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a8c2318d3d5e..6e35b9255882 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3070,7 +3070,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
 		stream->ops->enable(stream);
 
 	if (stream->hold_preemption)
-		i915_gem_context_set_nopreempt(stream->ctx);
+		intel_context_set_nopreempt(stream->pinned_ctx);
 }
 
 /**
@@ -3096,7 +3096,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
 	stream->enabled = false;
 
 	if (stream->hold_preemption)
-		i915_gem_context_clear_nopreempt(stream->ctx);
+		intel_context_clear_nopreempt(stream->pinned_ctx);
 
 	if (stream->ops->disable)
 		stream->ops->disable(stream);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 00011f9533b6..60f6179c2e5e 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -62,6 +62,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
+	const struct i915_gem_context *ctx;
+
 	/*
 	 * The timeline struct (as part of the ppgtt underneath a context)
 	 * may be freed when the request is no longer in use by the GPU.
@@ -74,7 +76,8 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return "signaled";
 
-	return to_request(fence)->gem_context->name ?: "[i915]";
+	ctx = to_request(fence)->context->gem_context;
+	return ctx && ctx->name ? ctx->name : "[i915]";
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -272,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq)
 	remove_from_client(rq);
 	list_del(&rq->link);
 
-	intel_context_exit(rq->hw_context);
-	intel_context_unpin(rq->hw_context);
+	intel_context_exit(rq->context);
+	intel_context_unpin(rq->context);
 
 	free_capture_list(rq);
 	i915_sched_node_fini(&rq->sched);
@@ -376,7 +379,7 @@ bool __i915_request_submit(struct i915_request *request)
 	if (i915_request_completed(request))
 		goto xfer;
 
-	if (i915_gem_context_is_banned(request->gem_context))
+	if (intel_context_is_banned(request->context))
 		i915_request_skip(request, -EIO);
 
 	/*
@@ -643,8 +646,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 		goto err_free;
 
 	rq->i915 = ce->engine->i915;
-	rq->hw_context = ce;
-	rq->gem_context = ce->gem_context;
+	rq->context = ce;
 	rq->engine = ce->engine;
 	rq->ring = ce->ring;
 	rq->execution_mask = ce->engine->mask;
@@ -899,7 +901,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 						       &from->submit,
 						       I915_FENCE_GFP);
 	} else if (intel_engine_has_semaphores(to->engine) &&
-		   to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+		   to->context->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
 	} else {
 		ret = i915_sw_fence_await_dma_fence(&to->submit,
@@ -1227,7 +1229,7 @@ void __i915_request_queue(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq)
 {
-	struct i915_sched_attr attr = rq->gem_context->sched;
+	struct i915_sched_attr attr = rq->context->gem_context->sched;
 	struct intel_timeline * const tl = i915_request_timeline(rq);
 	struct i915_request *prev;
 
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 96991d64759c..b3b6534ae798 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -109,9 +109,8 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct i915_gem_context *gem_context;
 	struct intel_engine_cs *engine;
-	struct intel_context *hw_context;
+	struct intel_context *context;
 	struct intel_ring *ring;
 	struct intel_timeline __rcu *timeline;
 	struct list_head signal_link;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d2edb527dcb8..bf023b9c06ff 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -211,7 +211,7 @@ static void kick_submission(struct intel_engine_cs *engine,
 	 * If we are already the currently executing context, don't
 	 * bother evaluating if we should preempt ourselves.
 	 */
-	if (inflight->hw_context == rq->hw_context)
+	if (inflight->context == rq->context)
 		return;
 
 	engine->execlists.queue_priority_hint = prio;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 06/11] drm/i915: Drop GEM context as a direct link from i915_request
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Keep the intel_context as being the primary state for i915_request, with
the GEM context a backpointer from the low level state for the rarer
cases we need client information. Our goal is to remove such references
to clients from the backend, and leave the HW submission agnostic to
client interfaces and self-contained.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 15 +++----
 drivers/gpu/drm/i915/gem/i915_gem_context.h   | 38 ----------------
 .../gpu/drm/i915/gem/i915_gem_context_types.h |  7 +--
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  8 ++--
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c   |  4 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |  2 +-
 drivers/gpu/drm/i915/gt/intel_context.h       | 42 ++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  6 +--
 drivers/gpu/drm/i915/gt/intel_lrc.c           | 44 +++++++++----------
 drivers/gpu/drm/i915/gt/intel_reset.c         | 38 +++++++++-------
 .../gpu/drm/i915/gt/intel_ring_submission.c   | 10 ++---
 drivers/gpu/drm/i915/gt/selftest_lrc.c        | 20 ++++-----
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  6 +--
 drivers/gpu/drm/i915/gvt/scheduler.c          | 27 +++++-------
 drivers/gpu/drm/i915/i915_gem.c               |  6 +--
 drivers/gpu/drm/i915/i915_gpu_error.c         | 11 +++--
 drivers/gpu/drm/i915/i915_perf.c              |  4 +-
 drivers/gpu/drm/i915/i915_request.c           | 18 ++++----
 drivers/gpu/drm/i915/i915_request.h           |  3 +-
 drivers/gpu/drm/i915/i915_scheduler.c         |  2 +-
 21 files changed, 160 insertions(+), 156 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index cbdf2fb32636..412d6c7949b7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -69,6 +69,7 @@
 
 #include <drm/i915_drm.h>
 
+#include "gt/intel_context.h"
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_lrc_reg.h"
@@ -365,15 +366,6 @@ static void kill_context(struct i915_gem_context *ctx)
 	struct i915_gem_engines_iter it;
 	struct intel_context *ce;
 
-	/*
-	 * If we are already banned, it was due to a guilty request causing
-	 * a reset and the entire context being evicted from the GPU.
-	 */
-	if (i915_gem_context_is_banned(ctx))
-		return;
-
-	i915_gem_context_set_banned(ctx);
-
 	/*
 	 * Map the user's engine back to the actual engines; one virtual
 	 * engine will be mapped to multiple engines, and using ctx->engine[]
@@ -385,6 +377,9 @@ static void kill_context(struct i915_gem_context *ctx)
 		struct intel_engine_cs *engine;
 		struct dma_fence *fence;
 
+		if (!intel_context_set_banned(ce))
+			continue;
+
 		if (!ce->timeline)
 			continue;
 
@@ -1026,7 +1021,7 @@ static void set_ppgtt_barrier(void *data)
 
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
-	struct i915_address_space *vm = rq->hw_context->vm;
+	struct i915_address_space *vm = rq->context->vm;
 	struct intel_engine_cs *engine = rq->engine;
 	u32 base = engine->mmio_base;
 	u32 *cs;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18e50a769a6e..69932899803e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -91,26 +91,6 @@ static inline void i915_gem_context_clear_persistence(struct i915_gem_context *c
 	clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
 }
 
-static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
-{
-	set_bit(CONTEXT_BANNED, &ctx->flags);
-}
-
-static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
-static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
-{
-	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
-}
-
 static inline bool
 i915_gem_context_user_engines(const struct i915_gem_context *ctx)
 {
@@ -129,24 +109,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
 	clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
 }
 
-static inline bool
-i915_gem_context_nopreempt(const struct i915_gem_context *ctx)
-{
-	return test_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_set_nopreempt(struct i915_gem_context *ctx)
-{
-	set_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
-static inline void
-i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx)
-{
-	clear_bit(CONTEXT_NOPREEMPT, &ctx->flags);
-}
-
 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 {
 	return !ctx->file_priv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 861d7d92fe9f..39628cff8f06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -143,11 +143,8 @@ struct i915_gem_context {
 	 * @flags: small set of booleans
 	 */
 	unsigned long flags;
-#define CONTEXT_BANNED			0
-#define CONTEXT_CLOSED			1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
-#define CONTEXT_USER_ENGINES		3
-#define CONTEXT_NOPREEMPT		4
+#define CONTEXT_CLOSED			0
+#define CONTEXT_USER_ENGINES		1
 
 	struct mutex mutex;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index e4f5c269150a..6dc86c129eee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -746,9 +746,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	unsigned int i, batch;
 	int err;
 
-	if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
-		return -EIO;
-
 	INIT_LIST_HEAD(&eb->relocs);
 	INIT_LIST_HEAD(&eb->unbound);
 
@@ -2078,7 +2075,7 @@ static int eb_submit(struct i915_execbuffer *eb)
 	if (err)
 		return err;
 
-	if (i915_gem_context_nopreempt(eb->gem_context))
+	if (intel_context_nopreempt(eb->context))
 		eb->request->flags |= I915_REQUEST_NOPREEMPT;
 
 	return 0;
@@ -2164,6 +2161,9 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
 	if (err)
 		return err;
 
+	if (unlikely(intel_context_is_banned(ce)))
+		return -EIO;
+
 	/*
 	 * Pinning the contexts may generate requests in order to acquire
 	 * GGTT space, so do this first before we reserve a seqno for
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 55317081d48b..2f11bee638d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -271,7 +271,7 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
 
 	if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
 		struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-		struct intel_context *ce = rq->hw_context;
+		struct intel_context *ce = rq->context;
 		struct list_head *pos;
 
 		spin_lock(&b->irq_lock);
@@ -326,7 +326,7 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
 	 */
 	spin_lock(&b->irq_lock);
 	if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
-		struct intel_context *ce = rq->hw_context;
+		struct intel_context *ce = rq->context;
 
 		list_del(&rq->signal_link);
 		if (list_empty(&ce->signals))
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index ee9d2bcd2c13..625f75f7825d 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -307,7 +307,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 	int err;
 
 	/* Only suitable for use in remotely modifying this context */
-	GEM_BUG_ON(rq->hw_context == ce);
+	GEM_BUG_ON(rq->context == ce);
 
 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
 		err = mutex_lock_interruptible_nested(&tl->mutex,
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 68b3d317d959..1e607343d256 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -7,7 +7,9 @@
 #ifndef __INTEL_CONTEXT_H__
 #define __INTEL_CONTEXT_H__
 
+#include <linux/bitops.h>
 #include <linux/lockdep.h>
+#include <linux/types.h>
 
 #include "i915_active.h"
 #include "intel_context_types.h"
@@ -153,4 +155,44 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
 	return u64_to_ptr(struct intel_ring, sz);
 }
 
+static inline bool intel_context_is_banned(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool intel_context_set_banned(struct intel_context *ce)
+{
+	return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
+}
+
+static inline bool
+intel_context_force_single_submission(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline void
+intel_context_set_single_submission(struct intel_context *ce)
+{
+	__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
+}
+
+static inline bool
+intel_context_nopreempt(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_set_nopreempt(struct intel_context *ce)
+{
+	set_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
+static inline void
+intel_context_clear_nopreempt(struct intel_context *ce)
+{
+	clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
+}
+
 #endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 6959b05ae5f8..0251edc8f568 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -54,7 +54,10 @@ struct intel_context {
 	struct intel_timeline *timeline;
 
 	unsigned long flags;
-#define CONTEXT_ALLOC_BIT 0
+#define CONTEXT_ALLOC_BIT		0
+#define CONTEXT_BANNED			1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
+#define CONTEXT_NOPREEMPT		3
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f8113bc756c6..efde3f8e0559 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1513,9 +1513,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
 		print_request_ring(m, rq);
 
-		if (rq->hw_context->lrc_reg_state) {
+		if (rq->context->lrc_reg_state) {
 			drm_printf(m, "Logical Ring Context:\n");
-			hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
+			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
 		}
 	}
 	spin_unlock_irqrestore(&engine->active.lock, flags);
@@ -1576,7 +1576,7 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
 
 		for (port = execlists->pending; (rq = *port); port++) {
 			/* Exclude any contexts already counted in active */
-			if (!intel_context_inflight_count(rq->hw_context))
+			if (!intel_context_inflight_count(rq->context))
 				engine->stats.active++;
 		}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 6fb3def5ba16..5b23aeb50385 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -888,7 +888,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			list_move(&rq->sched.link, pl);
 			active = rq;
 		} else {
-			struct intel_engine_cs *owner = rq->hw_context->engine;
+			struct intel_engine_cs *owner = rq->context->engine;
 
 			/*
 			 * Decouple the virtual breadcrumb before moving it
@@ -995,7 +995,7 @@ static inline struct intel_engine_cs *
 __execlists_schedule_in(struct i915_request *rq)
 {
 	struct intel_engine_cs * const engine = rq->engine;
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 
 	intel_context_get(ce);
 
@@ -1021,7 +1021,7 @@ __execlists_schedule_in(struct i915_request *rq)
 static inline struct i915_request *
 execlists_schedule_in(struct i915_request *rq, int idx)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *old;
 
 	GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
@@ -1064,7 +1064,7 @@ static void restore_default_state(struct intel_context *ce,
 static void reset_active(struct i915_request *rq,
 			 struct intel_engine_cs *engine)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	u32 head;
 
 	/*
@@ -1105,13 +1105,13 @@ static inline void
 __execlists_schedule_out(struct i915_request *rq,
 			 struct intel_engine_cs * const engine)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 
 	intel_engine_context_out(engine);
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 	intel_gt_pm_put(engine->gt);
 
-	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+	if (unlikely(intel_context_is_banned(ce)))
 		reset_active(rq, engine);
 
 	/*
@@ -1132,7 +1132,7 @@ __execlists_schedule_out(struct i915_request *rq,
 static inline void
 execlists_schedule_out(struct i915_request *rq)
 {
-	struct intel_context * const ce = rq->hw_context;
+	struct intel_context * const ce = rq->context;
 	struct intel_engine_cs *cur, *old;
 
 	trace_i915_request_out(rq);
@@ -1149,7 +1149,7 @@ execlists_schedule_out(struct i915_request *rq)
 
 static u64 execlists_update_context(const struct i915_request *rq)
 {
-	struct intel_context *ce = rq->hw_context;
+	struct intel_context *ce = rq->context;
 	u64 desc;
 
 	ce->lrc_reg_state[CTX_RING_TAIL] =
@@ -1236,13 +1236,13 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 	}
 
 	for (port = execlists->pending; (rq = *port); port++) {
-		if (ce == rq->hw_context) {
+		if (ce == rq->context) {
 			GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
 				      port - execlists->pending);
 			return false;
 		}
 
-		ce = rq->hw_context;
+		ce = rq->context;
 		if (i915_request_completed(rq))
 			continue;
 
@@ -1307,7 +1307,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 static bool ctx_single_port_submission(const struct intel_context *ce)
 {
 	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-		i915_gem_context_force_single_submission(ce->gem_context));
+		intel_context_force_single_submission(ce));
 }
 
 static bool can_merge_ctx(const struct intel_context *prev,
@@ -1343,7 +1343,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 		     (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL)))
 		return false;
 
-	if (!can_merge_ctx(prev->hw_context, next->hw_context))
+	if (!can_merge_ctx(prev->context, next->context))
 		return false;
 
 	return true;
@@ -1530,7 +1530,7 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
 		return 0;
 
 	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
-	if (unlikely(i915_gem_context_is_banned(rq->gem_context)))
+	if (unlikely(intel_context_is_banned(rq->context)))
 		return 1;
 
 	return READ_ONCE(engine->props.preempt_timeout_ms);
@@ -1638,7 +1638,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * tendency to ignore us rewinding the TAIL to the
 			 * end of an earlier request.
 			 */
-			last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+			last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 			last = NULL;
 		} else if (need_timeslice(engine, last) &&
 			   timer_expired(&engine->execlists.timer)) {
@@ -1720,7 +1720,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 		GEM_BUG_ON(rq != ve->request);
 		GEM_BUG_ON(rq->engine != &ve->base);
-		GEM_BUG_ON(rq->hw_context != &ve->context);
+		GEM_BUG_ON(rq->context != &ve->context);
 
 		if (rq_prio(rq) >= queue_prio(execlists)) {
 			if (!virtual_matches(ve, rq, engine)) {
@@ -1839,7 +1839,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * same LRCA, i.e. we must submit 2 different
 				 * contexts if we submit 2 ELSP.
 				 */
-				if (last->hw_context == rq->hw_context)
+				if (last->context == rq->context)
 					goto done;
 
 				if (i915_request_has_sentinel(last))
@@ -1852,8 +1852,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * the same context (even though a different
 				 * request) to the second port.
 				 */
-				if (ctx_single_port_submission(last->hw_context) ||
-				    ctx_single_port_submission(rq->hw_context))
+				if (ctx_single_port_submission(last->context) ||
+				    ctx_single_port_submission(rq->context))
 					goto done;
 
 				merge = false;
@@ -1867,8 +1867,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				}
 
 				GEM_BUG_ON(last &&
-					   !can_merge_ctx(last->hw_context,
-							  rq->hw_context));
+					   !can_merge_ctx(last->context,
+							  rq->context));
 
 				submit = true;
 				last = rq;
@@ -2484,7 +2484,7 @@ static int execlists_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
 
 	/*
 	 * Flush enough space to reduce the likelihood of waiting after
@@ -2995,7 +2995,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	/* We still have requests in-flight; the engine should be active */
 	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
-	ce = rq->hw_context;
+	ce = rq->context;
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
 	if (i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index f03e000051c1..b4c0ad4a6081 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -40,27 +40,31 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
 static void engine_skip_context(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
-	struct i915_gem_context *hung_ctx = rq->gem_context;
+	struct intel_context *hung_ctx = rq->context;
 
 	if (!i915_request_is_active(rq))
-		return;
+		rq = list_first_entry(&engine->active.requests,
+				      typeof(*rq), sched.link);
 
 	lockdep_assert_held(&engine->active.lock);
 	list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
-		if (rq->gem_context == hung_ctx)
+		if (rq->context == hung_ctx)
 			i915_request_skip(rq, -EIO);
 }
 
-static void client_mark_guilty(struct drm_i915_file_private *file_priv,
-			       const struct i915_gem_context *ctx)
+static void client_mark_guilty(struct i915_request *rq, bool banned)
 {
-	unsigned int score;
+	struct i915_gem_context *ctx = rq->context->gem_context;
+	struct drm_i915_file_private *file_priv = ctx->file_priv;
 	unsigned long prev_hang;
+	unsigned int score;
 
-	if (i915_gem_context_is_banned(ctx))
+	if (IS_ERR_OR_NULL(file_priv))
+		return;
+
+	score = 0;
+	if (banned)
 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
-	else
-		score = 0;
 
 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
@@ -75,8 +79,9 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
 	}
 }
 
-static bool context_mark_guilty(struct i915_gem_context *ctx)
+static bool mark_guilty(struct i915_request *rq)
 {
+	struct i915_gem_context *ctx = rq->context->gem_context;
 	unsigned long prev_hang;
 	bool banned;
 	int i;
@@ -100,18 +105,17 @@ static bool context_mark_guilty(struct i915_gem_context *ctx)
 	if (banned) {
 		DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
 				 ctx->name, atomic_read(&ctx->guilty_count));
-		i915_gem_context_set_banned(ctx);
+		intel_context_set_banned(rq->context);
 	}
 
-	if (!IS_ERR_OR_NULL(ctx->file_priv))
-		client_mark_guilty(ctx->file_priv, ctx);
+	client_mark_guilty(rq, banned);
 
 	return banned;
 }
 
-static void context_mark_innocent(struct i915_gem_context *ctx)
+static void mark_innocent(struct i915_request *rq)
 {
-	atomic_inc(&ctx->active_count);
+	atomic_inc(&rq->context->gem_context->active_count);
 }
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
@@ -126,11 +130,11 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
 
 	if (guilty) {
 		i915_request_skip(rq, -EIO);
-		if (context_mark_guilty(rq->gem_context))
+		if (mark_guilty(rq))
 			engine_skip_context(rq);
 	} else {
 		dma_fence_set_error(&rq->fence, -EAGAIN);
-		context_mark_innocent(rq->gem_context);
+		mark_innocent(rq);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index a47d5a7c32c9..1c2f3c3a8840 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1485,7 +1485,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
+	*cs++ = i915_ggtt_offset(rq->context->state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1555,7 +1555,7 @@ static int remap_l3_slice(struct i915_request *rq, int slice)
 
 static int remap_l3(struct i915_request *rq)
 {
-	struct i915_gem_context *ctx = rq->gem_context;
+	struct i915_gem_context *ctx = rq->context->gem_context;
 	int i, err;
 
 	if (!ctx->remap_slice)
@@ -1576,7 +1576,7 @@ static int remap_l3(struct i915_request *rq)
 
 static int switch_context(struct i915_request *rq)
 {
-	struct intel_context *ce = rq->hw_context;
+	struct intel_context *ce = rq->context;
 	struct i915_address_space *vm = vm_alias(ce);
 	int ret;
 
@@ -1601,7 +1601,7 @@ static int switch_context(struct i915_request *rq)
 		 * is purely used for flushing user contexts.
 		 */
 		hw_flags = 0;
-		if (i915_gem_context_is_kernel(rq->gem_context))
+		if (i915_gem_context_is_kernel(rq->context->gem_context))
 			hw_flags = MI_RESTORE_INHIBIT;
 
 		ret = mi_set_context(rq, hw_flags);
@@ -1648,7 +1648,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
+	GEM_BUG_ON(!intel_context_is_pinned(request->context));
 	GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 5e8c365548f0..95504c48cd99 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1187,13 +1187,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq = spinner_create_request(&arg->a.spin,
 				    arg->a.ctx, arg->engine,
 				    MI_ARB_CHECK);
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
 	i915_request_get(rq);
 	i915_request_add(rq);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1201,7 +1201,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1236,13 +1236,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq[0] = spinner_create_request(&arg->a.spin,
 				       arg->a.ctx, arg->engine,
 				       MI_NOOP); /* no preemption */
 	if (IS_ERR(rq[0]))
 		return PTR_ERR(rq[0]);
 
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
 	i915_request_get(rq[0]);
 	i915_request_add(rq[0]);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1250,7 +1250,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
 	rq[1] = spinner_create_request(&arg->b.spin,
 				       arg->b.ctx, arg->engine,
 				       MI_ARB_CHECK);
@@ -1259,13 +1258,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
 	i915_request_get(rq[1]);
 	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 	i915_request_add(rq[1]);
 	if (err)
 		goto out;
 
-	i915_gem_context_set_banned(arg->b.ctx);
+	intel_context_set_banned(rq[1]->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1308,13 +1308,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 				__func__, arg->engine->name))
 		return -EIO;
 
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq[0] = spinner_create_request(&arg->a.spin,
 				       arg->a.ctx, arg->engine,
 				       MI_ARB_CHECK);
 	if (IS_ERR(rq[0]))
 		return PTR_ERR(rq[0]);
 
+	clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
 	i915_request_get(rq[0]);
 	i915_request_add(rq[0]);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1322,13 +1322,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
 	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
 	if (IS_ERR(rq[1])) {
 		err = PTR_ERR(rq[1]);
 		goto out;
 	}
 
+	clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
 	i915_request_get(rq[1]);
 	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
 	i915_request_add(rq[1]);
@@ -1349,7 +1349,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
 	if (err)
 		goto out;
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq[2]->context);
 	err = intel_engine_pulse(arg->engine);
 	if (err)
 		goto out;
@@ -1396,13 +1396,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 		return 0;
 
 	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
-	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
 	rq = spinner_create_request(&arg->a.spin,
 				    arg->a.ctx, arg->engine,
 				    MI_NOOP); /* preemption disabled */
 	if (IS_ERR(rq))
 		return PTR_ERR(rq);
 
+	clear_bit(CONTEXT_BANNED, &rq->context->flags);
 	i915_request_get(rq);
 	i915_request_add(rq);
 	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1410,7 +1410,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
 		goto out;
 	}
 
-	i915_gem_context_set_banned(arg->a.ctx);
+	intel_context_set_banned(rq->context);
 	err = intel_engine_pulse(arg->engine); /* force reset */
 	if (err)
 		goto out;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 2498c55e0ea5..08267d2c3cfc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -471,7 +471,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
 	struct intel_guc_client *client = guc->execbuf_client;
 	struct intel_engine_cs *engine = rq->engine;
-	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
+	u32 ctx_desc = lower_32_bits(rq->context->lrc_desc);
 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
 	guc_wq_item_append(client, engine->guc_id, ctx_desc,
@@ -572,7 +572,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
-			if (last && rq->hw_context != last->hw_context) {
+			if (last && rq->context != last->context) {
 				if (port == last_port)
 					goto done;
 
@@ -677,7 +677,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
 		stalled = false;
 
 	__i915_request_reset(rq, stalled);
-	intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+	intel_lr_context_reset(engine, rq->context, rq->head, stalled);
 
 out_unlock:
 	spin_unlock_irqrestore(&engine->active.lock, flags);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 377811f8853f..976d6950218e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -59,7 +59,7 @@ static void set_context_pdp_root_pointer(
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
 	struct drm_i915_gem_object *ctx_obj =
-		workload->req->hw_context->state->obj;
+		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 
@@ -130,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	struct intel_gvt *gvt = vgpu->gvt;
 	int ring_id = workload->ring_id;
 	struct drm_i915_gem_object *ctx_obj =
-		workload->req->hw_context->state->obj;
+		workload->req->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *dst;
@@ -205,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	return 0;
 }
 
-static inline bool is_gvt_request(struct i915_request *req)
+static inline bool is_gvt_request(struct i915_request *rq)
 {
-	return i915_gem_context_force_single_submission(req->gem_context);
+	return intel_context_force_single_submission(rq->context);
 }
 
 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -307,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	u32 *cs;
 	int err;
 
-	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
+	if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context))
 		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/*
@@ -363,11 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 }
 
 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
-					  struct i915_gem_context *ctx)
+					  struct intel_context *ce)
 {
 	struct intel_vgpu_mm *mm = workload->shadow_mm;
-	struct i915_ppgtt *ppgtt =
-		i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
+	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
 	int i = 0;
 
 	if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
@@ -380,8 +379,6 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
 			px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
 		}
 	}
-
-	i915_vm_put(&ppgtt->vm);
 }
 
 static int
@@ -529,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
 	struct i915_request *rq = workload->req;
 	struct execlist_ring_context *shadow_ring_context =
-		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
+		(struct execlist_ring_context *)rq->context->lrc_reg_state;
 
 	shadow_ring_context->bb_per_ctx_ptr.val =
 		(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -628,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 
 	update_shadow_pdps(workload);
 
-	set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+	set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
 
 	ret = intel_vgpu_sync_oos_pages(workload->vgpu);
 	if (ret) {
@@ -787,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
 	struct i915_request *rq = workload->req;
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
+	struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *src;
@@ -1232,8 +1229,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	if (IS_ERR(ctx))
 		return PTR_ERR(ctx);
 
-	i915_gem_context_set_force_single_submission(ctx);
-
 	ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
 	i915_context_ppgtt_root_save(s, ppgtt);
 
@@ -1249,6 +1244,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 			goto out_shadow_ctx;
 		}
 
+		intel_context_set_single_submission(ce);
+
 		if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
 			const unsigned int ring_size = 512 * SZ_4K;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ac23322fb382..fe348ff7db4f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1108,9 +1108,9 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
 			continue;
 
 		/* We want to be able to unbind the state from the GGTT */
-		GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+		GEM_BUG_ON(intel_context_is_pinned(rq->context));
 
-		state = rq->hw_context->state;
+		state = rq->context->state;
 		if (!state)
 			continue;
 
@@ -1162,7 +1162,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
 		if (!rq)
 			continue;
 
-		ce = rq->hw_context;
+		ce = rq->context;
 		i915_request_put(rq);
 		intel_context_put(ce);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e8b67f5e521d..b40157cfae14 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1220,7 +1220,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 static void record_request(const struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
-	const struct i915_gem_context *ctx = request->gem_context;
+	const struct i915_gem_context *ctx = request->context->gem_context;
 
 	erq->flags = request->fence.flags;
 	erq->context = request->fence.context;
@@ -1232,7 +1232,7 @@ static void record_request(const struct i915_request *request,
 	erq->tail = request->tail;
 
 	rcu_read_lock();
-	erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
+	erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
 	rcu_read_unlock();
 }
 
@@ -1300,7 +1300,10 @@ static void error_record_engine_execlists(const struct intel_engine_cs *engine,
 static bool record_context(struct drm_i915_error_context *e,
 			   const struct i915_request *rq)
 {
-	const struct i915_gem_context *ctx = rq->gem_context;
+	const struct i915_gem_context *ctx = rq->context->gem_context;
+
+	if (!ctx)
+		return false;
 
 	if (ctx->pid) {
 		struct task_struct *task;
@@ -1454,7 +1457,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
 		capture = request_record_user_bo(request, ee, capture);
 
 		capture = capture_vma(capture,
-				      request->hw_context->state,
+				      request->context->state,
 				      &ee->ctx);
 
 		capture = capture_vma(capture,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a8c2318d3d5e..6e35b9255882 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3070,7 +3070,7 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream)
 		stream->ops->enable(stream);
 
 	if (stream->hold_preemption)
-		i915_gem_context_set_nopreempt(stream->ctx);
+		intel_context_set_nopreempt(stream->pinned_ctx);
 }
 
 /**
@@ -3096,7 +3096,7 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream)
 	stream->enabled = false;
 
 	if (stream->hold_preemption)
-		i915_gem_context_clear_nopreempt(stream->ctx);
+		intel_context_clear_nopreempt(stream->pinned_ctx);
 
 	if (stream->ops->disable)
 		stream->ops->disable(stream);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 00011f9533b6..60f6179c2e5e 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -62,6 +62,8 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
+	const struct i915_gem_context *ctx;
+
 	/*
 	 * The timeline struct (as part of the ppgtt underneath a context)
 	 * may be freed when the request is no longer in use by the GPU.
@@ -74,7 +76,8 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
 		return "signaled";
 
-	return to_request(fence)->gem_context->name ?: "[i915]";
+	ctx = to_request(fence)->context->gem_context;
+	return ctx && ctx->name ? ctx->name : "[i915]";
 }
 
 static bool i915_fence_signaled(struct dma_fence *fence)
@@ -272,8 +275,8 @@ bool i915_request_retire(struct i915_request *rq)
 	remove_from_client(rq);
 	list_del(&rq->link);
 
-	intel_context_exit(rq->hw_context);
-	intel_context_unpin(rq->hw_context);
+	intel_context_exit(rq->context);
+	intel_context_unpin(rq->context);
 
 	free_capture_list(rq);
 	i915_sched_node_fini(&rq->sched);
@@ -376,7 +379,7 @@ bool __i915_request_submit(struct i915_request *request)
 	if (i915_request_completed(request))
 		goto xfer;
 
-	if (i915_gem_context_is_banned(request->gem_context))
+	if (intel_context_is_banned(request->context))
 		i915_request_skip(request, -EIO);
 
 	/*
@@ -643,8 +646,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 		goto err_free;
 
 	rq->i915 = ce->engine->i915;
-	rq->hw_context = ce;
-	rq->gem_context = ce->gem_context;
+	rq->context = ce;
 	rq->engine = ce->engine;
 	rq->ring = ce->ring;
 	rq->execution_mask = ce->engine->mask;
@@ -899,7 +901,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 						       &from->submit,
 						       I915_FENCE_GFP);
 	} else if (intel_engine_has_semaphores(to->engine) &&
-		   to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+		   to->context->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
 	} else {
 		ret = i915_sw_fence_await_dma_fence(&to->submit,
@@ -1227,7 +1229,7 @@ void __i915_request_queue(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq)
 {
-	struct i915_sched_attr attr = rq->gem_context->sched;
+	struct i915_sched_attr attr = rq->context->gem_context->sched;
 	struct intel_timeline * const tl = i915_request_timeline(rq);
 	struct i915_request *prev;
 
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 96991d64759c..b3b6534ae798 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -109,9 +109,8 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct i915_gem_context *gem_context;
 	struct intel_engine_cs *engine;
-	struct intel_context *hw_context;
+	struct intel_context *context;
 	struct intel_ring *ring;
 	struct intel_timeline __rcu *timeline;
 	struct list_head signal_link;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d2edb527dcb8..bf023b9c06ff 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -211,7 +211,7 @@ static void kick_submission(struct intel_engine_cs *engine,
 	 * If we are already the currently executing context, don't
 	 * bother evaluating if we should preempt ourselves.
 	 */
-	if (inflight->hw_context == rq->hw_context)
+	if (inflight->context == rq->context)
 		return;
 
 	engine->execlists.queue_priority_hint = prio;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 07/11] drm/i915: Push the use-semaphore marker onto the intel_context
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Instead of rummaging through the intel_context to peek at the GEM
context in the middle of request submission to decide whether to use
semaphores, store that information on the intel_context itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 52 +++++++++++++------
 drivers/gpu/drm/i915/gt/intel_context.c       |  3 ++
 drivers/gpu/drm/i915/gt/intel_context.h       | 15 ++++++
 drivers/gpu/drm/i915/gt/intel_context_types.h |  7 +--
 drivers/gpu/drm/i915/i915_request.c           |  8 ++-
 5 files changed, 60 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 412d6c7949b7..455690c80d42 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1778,6 +1778,40 @@ set_persistence(struct i915_gem_context *ctx,
 	return __context_set_persistence(ctx, args->value);
 }
 
+static void __apply_priority(struct intel_context *ce, void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+
+	if (intel_context_use_semaphores(ce) &&
+	    ctx->sched.priority < I915_PRIORITY_NORMAL)
+		intel_context_clear_use_semaphores(ce);
+}
+
+static int set_priority(struct i915_gem_context *ctx,
+			const struct drm_i915_gem_context_param *args)
+{
+	s64 priority = args->value;
+
+	if (args->size)
+		return -EINVAL;
+
+	if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+		return -ENODEV;
+
+	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
+		return -EINVAL;
+
+	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+	    !capable(CAP_SYS_NICE))
+		return -EPERM;
+
+	ctx->sched.priority = I915_USER_PRIORITY(priority);
+	context_apply_all(ctx, __apply_priority, ctx);
+
+	return 0;
+}
+
 static int ctx_setparam(struct drm_i915_file_private *fpriv,
 			struct i915_gem_context *ctx,
 			struct drm_i915_gem_context_param *args)
@@ -1824,23 +1858,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
 		break;
 
 	case I915_CONTEXT_PARAM_PRIORITY:
-		{
-			s64 priority = args->value;
-
-			if (args->size)
-				ret = -EINVAL;
-			else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
-				ret = -ENODEV;
-			else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
-				 priority < I915_CONTEXT_MIN_USER_PRIORITY)
-				ret = -EINVAL;
-			else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
-				 !capable(CAP_SYS_NICE))
-				ret = -EPERM;
-			else
-				ctx->sched.priority =
-					I915_USER_PRIORITY(priority);
-		}
+		ret = set_priority(ctx, args);
 		break;
 
 	case I915_CONTEXT_PARAM_SSEU:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 625f75f7825d..7075d03f508f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -238,6 +238,9 @@ intel_context_init(struct intel_context *ce,
 	rcu_read_unlock();
 	if (ctx->timeline)
 		ce->timeline = intel_timeline_get(ctx->timeline);
+	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+	    intel_engine_has_semaphores(engine))
+		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
 
 	ce->engine = engine;
 	ce->ops = engine->cops;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 1e607343d256..d7b667a26e08 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -155,6 +155,21 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
 	return u64_to_ptr(struct intel_ring, sz);
 }
 
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+	set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+	clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
 static inline bool intel_context_is_banned(const struct intel_context *ce)
 {
 	return test_bit(CONTEXT_BANNED, &ce->flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 0251edc8f568..264c1efde772 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -55,9 +55,10 @@ struct intel_context {
 
 	unsigned long flags;
 #define CONTEXT_ALLOC_BIT		0
-#define CONTEXT_BANNED			1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
-#define CONTEXT_NOPREEMPT		3
+#define CONTEXT_USE_SEMAPHORES		1
+#define CONTEXT_BANNED			2
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	3
+#define CONTEXT_NOPREEMPT		4
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 60f6179c2e5e..b9bccdb9ee6c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -896,18 +896,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 			return ret;
 	}
 
-	if (to->engine == from->engine) {
+	if (to->engine == from->engine)
 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
 						       &from->submit,
 						       I915_FENCE_GFP);
-	} else if (intel_engine_has_semaphores(to->engine) &&
-		   to->context->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+	else if (intel_context_use_semaphores(to->context))
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
-	} else {
+	else
 		ret = i915_sw_fence_await_dma_fence(&to->submit,
 						    &from->fence, 0,
 						    I915_FENCE_GFP);
-	}
 	if (ret < 0)
 		return ret;
 
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 07/11] drm/i915: Push the use-semaphore marker onto the intel_context
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Instead of rummaging through the intel_context to peek at the GEM
context in the middle of request submission to decide whether to use
semaphores, store that information on the intel_context itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 52 +++++++++++++------
 drivers/gpu/drm/i915/gt/intel_context.c       |  3 ++
 drivers/gpu/drm/i915/gt/intel_context.h       | 15 ++++++
 drivers/gpu/drm/i915/gt/intel_context_types.h |  7 +--
 drivers/gpu/drm/i915/i915_request.c           |  8 ++-
 5 files changed, 60 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 412d6c7949b7..455690c80d42 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1778,6 +1778,40 @@ set_persistence(struct i915_gem_context *ctx,
 	return __context_set_persistence(ctx, args->value);
 }
 
+static void __apply_priority(struct intel_context *ce, void *arg)
+{
+	struct i915_gem_context *ctx = arg;
+
+	if (intel_context_use_semaphores(ce) &&
+	    ctx->sched.priority < I915_PRIORITY_NORMAL)
+		intel_context_clear_use_semaphores(ce);
+}
+
+static int set_priority(struct i915_gem_context *ctx,
+			const struct drm_i915_gem_context_param *args)
+{
+	s64 priority = args->value;
+
+	if (args->size)
+		return -EINVAL;
+
+	if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+		return -ENODEV;
+
+	if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+	    priority < I915_CONTEXT_MIN_USER_PRIORITY)
+		return -EINVAL;
+
+	if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+	    !capable(CAP_SYS_NICE))
+		return -EPERM;
+
+	ctx->sched.priority = I915_USER_PRIORITY(priority);
+	context_apply_all(ctx, __apply_priority, ctx);
+
+	return 0;
+}
+
 static int ctx_setparam(struct drm_i915_file_private *fpriv,
 			struct i915_gem_context *ctx,
 			struct drm_i915_gem_context_param *args)
@@ -1824,23 +1858,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
 		break;
 
 	case I915_CONTEXT_PARAM_PRIORITY:
-		{
-			s64 priority = args->value;
-
-			if (args->size)
-				ret = -EINVAL;
-			else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
-				ret = -ENODEV;
-			else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
-				 priority < I915_CONTEXT_MIN_USER_PRIORITY)
-				ret = -EINVAL;
-			else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
-				 !capable(CAP_SYS_NICE))
-				ret = -EPERM;
-			else
-				ctx->sched.priority =
-					I915_USER_PRIORITY(priority);
-		}
+		ret = set_priority(ctx, args);
 		break;
 
 	case I915_CONTEXT_PARAM_SSEU:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 625f75f7825d..7075d03f508f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -238,6 +238,9 @@ intel_context_init(struct intel_context *ce,
 	rcu_read_unlock();
 	if (ctx->timeline)
 		ce->timeline = intel_timeline_get(ctx->timeline);
+	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+	    intel_engine_has_semaphores(engine))
+		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
 
 	ce->engine = engine;
 	ce->ops = engine->cops;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 1e607343d256..d7b667a26e08 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -155,6 +155,21 @@ static inline struct intel_ring *__intel_context_ring_size(u64 sz)
 	return u64_to_ptr(struct intel_ring, sz);
 }
 
+static inline bool intel_context_use_semaphores(const struct intel_context *ce)
+{
+	return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_set_use_semaphores(struct intel_context *ce)
+{
+	set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
+static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
+{
+	clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
 static inline bool intel_context_is_banned(const struct intel_context *ce)
 {
 	return test_bit(CONTEXT_BANNED, &ce->flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 0251edc8f568..264c1efde772 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -55,9 +55,10 @@ struct intel_context {
 
 	unsigned long flags;
 #define CONTEXT_ALLOC_BIT		0
-#define CONTEXT_BANNED			1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION	2
-#define CONTEXT_NOPREEMPT		3
+#define CONTEXT_USE_SEMAPHORES		1
+#define CONTEXT_BANNED			2
+#define CONTEXT_FORCE_SINGLE_SUBMISSION	3
+#define CONTEXT_NOPREEMPT		4
 
 	u32 *lrc_reg_state;
 	u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 60f6179c2e5e..b9bccdb9ee6c 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -896,18 +896,16 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
 			return ret;
 	}
 
-	if (to->engine == from->engine) {
+	if (to->engine == from->engine)
 		ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
 						       &from->submit,
 						       I915_FENCE_GFP);
-	} else if (intel_engine_has_semaphores(to->engine) &&
-		   to->context->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+	else if (intel_context_use_semaphores(to->context))
 		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
-	} else {
+	else
 		ret = i915_sw_fence_await_dma_fence(&to->submit,
 						    &from->fence, 0,
 						    I915_FENCE_GFP);
-	}
 	if (ret < 0)
 		return ret;
 
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 08/11] drm/i915: Remove i915->kernel_context
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Allocate only an internal intel_context for the kernel_context, forgoing
a global GEM context for internal use as we only require a separate
address space (for our own protection).

Now having weaned GT from requiring ce->gem_context, we can stop
referencing it entirely. This also means we no longer have to create random
and unnecessary GEM contexts for internal use.

GEM contexts are now entirely for tracking GEM clients, and intel_context
the execution environment on the GPU.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  91 +++++-------
 drivers/gpu/drm/i915/gem/i915_gem_context.h   |   6 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |   3 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |   6 +-
 .../drm/i915/gem/selftests/i915_gem_context.c |   5 +-
 .../gpu/drm/i915/gem/selftests/mock_context.c |  11 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |  32 +---
 drivers/gpu/drm/i915/gt/intel_context.h       |   4 +-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  21 +--
 drivers/gpu/drm/i915/gt/intel_gt.c            |  26 +++-
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |   7 +
 drivers/gpu/drm/i915/gt/intel_lrc.c           |  18 +--
 drivers/gpu/drm/i915/gt/intel_lrc.h           |   6 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |   6 +-
 .../gpu/drm/i915/gt/intel_ring_submission.c   |   4 +-
 drivers/gpu/drm/i915/gt/selftest_context.c    |  59 +++-----
 .../drm/i915/gt/selftest_engine_heartbeat.c   |   3 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  29 ++--
 drivers/gpu/drm/i915/gt/selftest_lrc.c        | 137 ++++++------------
 drivers/gpu/drm/i915/gvt/scheduler.c          |  16 +-
 drivers/gpu/drm/i915/i915_drv.h               |   3 -
 drivers/gpu/drm/i915/i915_gem.c               |  16 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           |   8 +-
 drivers/gpu/drm/i915/i915_gpu_error.c         |   8 +-
 drivers/gpu/drm/i915/i915_perf.c              |   3 -
 drivers/gpu/drm/i915/i915_request.c           |   5 +-
 drivers/gpu/drm/i915/selftests/i915_request.c |   6 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   8 +-
 28 files changed, 216 insertions(+), 331 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 455690c80d42..ff9e64c513e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -170,6 +170,26 @@ lookup_user_engine(struct i915_gem_context *ctx,
 	return i915_gem_context_get_engine(ctx, idx);
 }
 
+static void intel_context_set_gem(struct intel_context *ce,
+				  struct i915_gem_context *ctx)
+{
+	ce->gem_context = ctx;
+
+	ce->ring = __intel_context_ring_size(SZ_16K);
+
+	if (ctx->vm) {
+		i915_vm_put(ce->vm);
+		ce->vm = i915_vm_get(ctx->vm);
+	}
+
+	if (ctx->timeline)
+		ce->timeline = intel_timeline_get(ctx->timeline);
+
+	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+	    intel_engine_has_semaphores(ce->engine))
+		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
 	while (count--) {
@@ -212,12 +232,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			__free_engines(e, e->num_engines + 1);
 			return ERR_CAST(ce);
 		}
 
+		intel_context_set_gem(ce, ctx);
+
 		e->engines[engine->legacy_idx] = ce;
 		e->num_engines = max(e->num_engines, engine->legacy_idx);
 	}
@@ -634,37 +656,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 	return ctx;
 }
 
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
-	struct i915_gem_context *ctx;
-
-	/* Keep the context ref so that we can free it immediately ourselves */
-	ctx = i915_gem_context_get(fetch_and_zero(ctxp));
-	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-	context_close(ctx);
-	i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
-	struct i915_gem_context *ctx;
-
-	ctx = i915_gem_create_context(i915, 0);
-	if (IS_ERR(ctx))
-		return ctx;
-
-	i915_gem_context_clear_bannable(ctx);
-	i915_gem_context_set_persistence(ctx);
-	ctx->sched.priority = I915_USER_PRIORITY(prio);
-
-	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-	return ctx;
-}
-
 static void init_contexts(struct i915_gem_contexts *gc)
 {
 	spin_lock_init(&gc->lock);
@@ -674,32 +665,12 @@ static void init_contexts(struct i915_gem_contexts *gc)
 	init_llist_head(&gc->free_list);
 }
 
-int i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init_contexts(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *ctx;
-
-	/* Reassure ourselves we are only called once */
-	GEM_BUG_ON(i915->kernel_context);
-
 	init_contexts(&i915->gem.contexts);
-
-	/* lowest priority; idle task */
-	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
-	if (IS_ERR(ctx)) {
-		DRM_ERROR("Failed to create default global context\n");
-		return PTR_ERR(ctx);
-	}
-	i915->kernel_context = ctx;
-
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
 			 DRIVER_CAPS(i915)->has_logical_contexts ?
 			 "logical" : "fake");
-	return 0;
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
-	destroy_kernel_context(&i915->kernel_context);
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -1457,12 +1428,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 		}
 	}
 
-	ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+	ce = intel_execlists_create_virtual(siblings, n);
 	if (IS_ERR(ce)) {
 		err = PTR_ERR(ce);
 		goto out_siblings;
 	}
 
+	intel_context_set_gem(ce, set->ctx);
+
 	if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
 		intel_context_put(ce);
 		err = -EEXIST;
@@ -1632,12 +1605,14 @@ set_engines(struct i915_gem_context *ctx,
 			return -ENOENT;
 		}
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			__free_engines(set.engines, n);
 			return PTR_ERR(ce);
 		}
 
+		intel_context_set_gem(ce, ctx);
+
 		set.engines->engines[n] = ce;
 	}
 	set.engines->num_engines = num_engines;
@@ -1938,13 +1913,15 @@ static int clone_engines(struct i915_gem_context *dst,
 		 */
 		if (intel_engine_is_virtual(engine))
 			clone->engines[n] =
-				intel_execlists_clone_virtual(dst, engine);
+				intel_execlists_clone_virtual(engine);
 		else
-			clone->engines[n] = intel_context_create(dst, engine);
+			clone->engines[n] = intel_context_create(engine);
 		if (IS_ERR_OR_NULL(clone->engines[n])) {
 			__free_engines(clone, n);
 			goto err_unlock;
 		}
+
+		intel_context_set_gem(clone->engines[n], dst);
 	}
 	clone->num_engines = n;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 69932899803e..d99fff481d59 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -115,8 +115,7 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 }
 
 /* i915_gem_context.c */
-int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
+void i915_gem_init_contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
@@ -140,9 +139,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 				       struct drm_file *file);
 
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
 static inline struct i915_gem_context *
 i915_gem_context_get(struct i915_gem_context *ctx)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6dc86c129eee..4f6b847ea011 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1261,8 +1261,7 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
 			 * memory location.
 			 */
 			if (cache->gen >= 12)
-				ce = intel_context_create(eb->context->gem_context,
-							  eb->engine);
+				ce = intel_context_create(eb->engine);
 			else
 				ce = intel_context_get(eb->context);
 			if (IS_ERR(ce))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 1e045c337044..432f2828b093 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -753,15 +753,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 		return -EFAULT;
 
 	if (args->flags & I915_USERPTR_READ_ONLY) {
-		struct i915_address_space *vm;
-
 		/*
 		 * On almost all of the older hw, we cannot tell the GPU that
 		 * a page is readonly.
 		 */
-		vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
-					       true); /* static vm */
-		if (!vm || !vm->has_read_only)
+		if (!dev_priv->gt.vm->has_read_only)
 			return -ENODEV;
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index c6e61564bb5e..6d174caa8f8b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -309,7 +309,7 @@ static int live_parallel_switch(void *arg)
 			if (!data[m].ce[0])
 				continue;
 
-			ce = intel_context_create(ctx, data[m].ce[0]->engine);
+			ce = intel_context_create(data[m].ce[0]->engine);
 			if (IS_ERR(ce))
 				goto out;
 
@@ -1230,8 +1230,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 			hweight32(engine->sseu.slice_mask),
 			hweight32(pg_sseu.slice_mask));
 
-		ce = intel_context_create(engine->kernel_context->gem_context,
-					  engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			ret = PTR_ERR(ce);
 			goto out_put;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 29b8984f0e47..f472fc9240d5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -97,7 +97,16 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 struct i915_gem_context *
 kernel_context(struct drm_i915_private *i915)
 {
-	return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+	struct i915_gem_context *ctx;
+
+	ctx = i915_gem_create_context(i915, 0);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+	i915_gem_context_clear_bannable(ctx);
+
+	return ctx;
 }
 
 void kernel_context_close(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 7075d03f508f..8b71b2ccbf27 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -31,8 +31,7 @@ void intel_context_free(struct intel_context *ce)
 }
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-		     struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 
@@ -40,7 +39,7 @@ intel_context_create(struct i915_gem_context *ctx,
 	if (!ce)
 		return ERR_PTR(-ENOMEM);
 
-	intel_context_init(ce, ctx, engine);
+	intel_context_init(ce, engine);
 	return ce;
 }
 
@@ -72,8 +71,6 @@ int __intel_context_do_pin(struct intel_context *ce)
 			  ce->engine->name, ce->timeline->fence_context,
 			  ce->ring->head, ce->ring->tail);
 
-		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
-
 		smp_mb__before_atomic(); /* flush pin before it is visible */
 	}
 
@@ -103,7 +100,6 @@ void intel_context_unpin(struct intel_context *ce)
 
 		ce->ops->unpin(ce);
 
-		i915_gem_context_put(ce->gem_context);
 		intel_context_active_release(ce);
 	}
 
@@ -198,7 +194,7 @@ int intel_context_active_acquire(struct intel_context *ce)
 		return err;
 
 	/* Preallocate tracking nodes */
-	if (!i915_gem_context_is_kernel(ce->gem_context)) {
+	if (ce->gem_context) {
 		err = i915_active_acquire_preallocate_barrier(&ce->active,
 							      ce->engine);
 		if (err) {
@@ -219,33 +215,19 @@ void intel_context_active_release(struct intel_context *ce)
 
 void
 intel_context_init(struct intel_context *ce,
-		   struct i915_gem_context *ctx,
 		   struct intel_engine_cs *engine)
 {
-	struct i915_address_space *vm;
-
 	GEM_BUG_ON(!engine->cops);
+	GEM_BUG_ON(!engine->gt->vm);
 
 	kref_init(&ce->ref);
 
-	ce->gem_context = ctx;
-	rcu_read_lock();
-	vm = rcu_dereference(ctx->vm);
-	if (vm)
-		ce->vm = i915_vm_get(vm);
-	else
-		ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
-	rcu_read_unlock();
-	if (ctx->timeline)
-		ce->timeline = intel_timeline_get(ctx->timeline);
-	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
-	    intel_engine_has_semaphores(engine))
-		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
-
 	ce->engine = engine;
 	ce->ops = engine->cops;
 	ce->sseu = engine->sseu;
-	ce->ring = __intel_context_ring_size(SZ_16K);
+	ce->ring = __intel_context_ring_size(SZ_4K);
+
+	ce->vm = i915_vm_get(engine->gt->vm);
 
 	INIT_LIST_HEAD(&ce->signal_link);
 	INIT_LIST_HEAD(&ce->signals);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d7b667a26e08..15bc46e1b308 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -18,13 +18,11 @@
 #include "intel_timeline_types.h"
 
 void intel_context_init(struct intel_context *ce,
-			struct i915_gem_context *ctx,
 			struct intel_engine_cs *engine);
 void intel_context_fini(struct intel_context *ce);
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-		     struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
 
 void intel_context_free(struct intel_context *ce);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index efde3f8e0559..b38ea44ab761 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -756,12 +756,10 @@ create_kernel_context(struct intel_engine_cs *engine)
 	struct intel_context *ce;
 	int err;
 
-	ce = intel_context_create(engine->i915->kernel_context, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return ce;
 
-	ce->ring = __intel_context_ring_size(SZ_4K);
-
 	err = intel_context_pin(ce);
 	if (err) {
 		intel_context_put(ce);
@@ -797,6 +795,12 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 
 	engine->set_default_submission(engine);
 
+	ret = measure_breadcrumb_dw(engine);
+	if (ret < 0)
+		return ret;
+
+	engine->emit_fini_breadcrumb_dw = ret;
+
 	/*
 	 * We may need to do things with the shrinker which
 	 * require us to immediately switch back to the default
@@ -811,18 +815,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 
 	engine->kernel_context = ce;
 
-	ret = measure_breadcrumb_dw(engine);
-	if (ret < 0)
-		goto err_unpin;
-
-	engine->emit_fini_breadcrumb_dw = ret;
-
 	return 0;
-
-err_unpin:
-	intel_context_unpin(ce);
-	intel_context_put(ce);
-	return ret;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 4c26daf7ee46..f9fb0865fbb6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -73,7 +73,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
 	struct intel_uncore *uncore = gt->uncore;
 	int ret;
 
-	BUG_ON(!i915->kernel_context);
 	ret = intel_gt_terminally_wedged(gt);
 	if (ret)
 		return ret;
@@ -364,6 +363,14 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
 	i915_vma_unpin_and_release(&gt->scratch, 0);
 }
 
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+		return &i915_ppgtt_create(gt->i915)->vm;
+	else
+		return i915_vm_get(&gt->ggtt->vm);
+}
+
 int intel_gt_init(struct intel_gt *gt)
 {
 	int err;
@@ -374,7 +381,17 @@ int intel_gt_init(struct intel_gt *gt)
 
 	intel_gt_pm_init(gt);
 
+	gt->vm = kernel_vm(gt);
+	if (!gt->vm) {
+		err = -ENOMEM;
+		goto err_scratch;
+	}
+
 	return 0;
+
+err_scratch:
+	intel_gt_fini_scratch(gt);
+	return err;
 }
 
 void intel_gt_driver_remove(struct intel_gt *gt)
@@ -389,6 +406,13 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
 
 void intel_gt_driver_release(struct intel_gt *gt)
 {
+	struct i915_address_space *vm;
+
+	vm = fetch_and_zero(&gt->vm);
+	if (!vm) /* FIXME being called twice on error paths :( */
+		return;
+
+	i915_vm_put(vm);
 	intel_gt_pm_fini(gt);
 	intel_gt_fini_scratch(gt);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index d4e14dbd172e..96890dd12b5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -90,6 +90,13 @@ struct intel_gt {
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
 					    [MAX_ENGINE_INSTANCE + 1];
+
+	/*
+	 * Default address space (either GGTT or ppGTT depending on arch).
+	 *
+	 * Reserved for exclusive use by the kernel.
+	 */
+	struct i915_address_space *vm;
 };
 
 enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 5b23aeb50385..242e6927675c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,12 +133,11 @@
  */
 #include <linux/interrupt.h>
 
-#include "gem/i915_gem_context.h"
-
 #include "i915_drv.h"
 #include "i915_perf.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -4393,8 +4392,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 }
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-			       struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count)
 {
 	struct virtual_engine *ve;
@@ -4405,13 +4403,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		return ERR_PTR(-EINVAL);
 
 	if (count == 1)
-		return intel_context_create(ctx, siblings[0]);
+		return intel_context_create(siblings[0]);
 
 	ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
 	if (!ve)
 		return ERR_PTR(-ENOMEM);
 
-	ve->base.i915 = ctx->i915;
+	ve->base.i915 = siblings[0]->i915;
 	ve->base.gt = siblings[0]->gt;
 	ve->base.uncore = siblings[0]->uncore;
 	ve->base.id = -1;
@@ -4454,7 +4452,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		     virtual_submission_tasklet,
 		     (unsigned long)ve);
 
-	intel_context_init(&ve->context, ctx, &ve->base);
+	intel_context_init(&ve->context, &ve->base);
 
 	for (n = 0; n < count; n++) {
 		struct intel_engine_cs *sibling = siblings[n];
@@ -4535,14 +4533,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 }
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-			      struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
 {
 	struct virtual_engine *se = to_virtual_engine(src);
 	struct intel_context *dst;
 
-	dst = intel_execlists_create_virtual(ctx,
-					     se->siblings,
+	dst = intel_execlists_create_virtual(se->siblings,
 					     se->num_siblings);
 	if (IS_ERR(dst))
 		return dst;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index faa2d56c279b..faa496ee71bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -130,13 +130,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 				   unsigned int max);
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-			       struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count);
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-			      struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
 
 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 				     const struct intel_engine_cs *master,
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b4c0ad4a6081..98e7bba5979e 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -86,6 +86,9 @@ static bool mark_guilty(struct i915_request *rq)
 	bool banned;
 	int i;
 
+	if (!ctx)
+		return false;
+
 	atomic_inc(&ctx->guilty_count);
 
 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
@@ -115,7 +118,8 @@ static bool mark_guilty(struct i915_request *rq)
 
 static void mark_innocent(struct i915_request *rq)
 {
-	atomic_inc(&rq->context->gem_context->active_count);
+	if (rq->context->gem_context)
+		atomic_inc(&rq->context->gem_context->active_count);
 }
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 1c2f3c3a8840..6ddfe8a86dbe 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1558,7 +1558,7 @@ static int remap_l3(struct i915_request *rq)
 	struct i915_gem_context *ctx = rq->context->gem_context;
 	int i, err;
 
-	if (!ctx->remap_slice)
+	if (!ctx || !ctx->remap_slice)
 		return 0;
 
 	for (i = 0; i < MAX_L3_SLICES; i++) {
@@ -1601,7 +1601,7 @@ static int switch_context(struct i915_request *rq)
 		 * is purely used for flushing user contexts.
 		 */
 		hw_flags = 0;
-		if (i915_gem_context_is_kernel(rq->context->gem_context))
+		if (!rq->context->gem_context)
 			hw_flags = MI_RESTORE_INHIBIT;
 
 		ret = mi_set_context(rq, hw_flags);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index f63a26a3e620..6b7048636c43 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -67,15 +67,14 @@ static int context_sync(struct intel_context *ce)
 	return err;
 }
 
-static int __live_context_size(struct intel_engine_cs *engine,
-			       struct i915_gem_context *fixme)
+static int __live_context_size(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 	struct i915_request *rq;
 	void *vaddr;
 	int err;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -146,7 +145,6 @@ static int live_context_size(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	int err = 0;
 
@@ -155,10 +153,6 @@ static int live_context_size(void *arg)
 	 * HW tries to write past the end of one.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (IS_ERR(fixme))
-		return PTR_ERR(fixme);
-
 	for_each_engine(engine, gt, id) {
 		struct {
 			struct drm_i915_gem_object *state;
@@ -183,7 +177,7 @@ static int live_context_size(void *arg)
 		/* Overlaps with the execlists redzone */
 		engine->context_size += I915_GTT_PAGE_SIZE;
 
-		err = __live_context_size(engine, fixme);
+		err = __live_context_size(engine);
 
 		engine->context_size -= I915_GTT_PAGE_SIZE;
 
@@ -196,12 +190,10 @@ static int live_context_size(void *arg)
 			break;
 	}
 
-	kernel_context_close(fixme);
 	return err;
 }
 
-static int __live_active_context(struct intel_engine_cs *engine,
-				 struct i915_gem_context *fixme)
+static int __live_active_context(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 	int pass;
@@ -226,10 +218,15 @@ static int __live_active_context(struct intel_engine_cs *engine,
 		return -EINVAL;
 	}
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
+	/* Pretend to be a user context to partake in idle barriers */
+	ce->gem_context = kernel_context(engine->i915);
+	if (IS_ERR(ce->gem_context))
+		goto err_ce;
+
 	for (pass = 0; pass <= 2; pass++) {
 		struct i915_request *rq;
 
@@ -282,6 +279,8 @@ static int __live_active_context(struct intel_engine_cs *engine,
 	}
 
 err:
+	kernel_context_close(ce->gem_context);
+err_ce:
 	intel_context_put(ce);
 	return err;
 }
@@ -290,7 +289,6 @@ static int live_active_context(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	struct drm_file *file;
 	int err = 0;
@@ -299,14 +297,8 @@ static int live_active_context(void *arg)
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	fixme = live_context(gt->i915, file);
-	if (IS_ERR(fixme)) {
-		err = PTR_ERR(fixme);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
-		err = __live_active_context(engine, fixme);
+		err = __live_active_context(engine);
 		if (err)
 			break;
 
@@ -315,7 +307,6 @@ static int live_active_context(void *arg)
 			break;
 	}
 
-out_file:
 	mock_file_free(gt->i915, file);
 	return err;
 }
@@ -348,8 +339,7 @@ static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
 	return err;
 }
 
-static int __live_remote_context(struct intel_engine_cs *engine,
-				 struct i915_gem_context *fixme)
+static int __live_remote_context(struct intel_engine_cs *engine)
 {
 	struct intel_context *local, *remote;
 	int pass;
@@ -363,11 +353,16 @@ static int __live_remote_context(struct intel_engine_cs *engine,
 	 * clobber the idle-barrier.
 	 */
 
-	remote = intel_context_create(fixme, engine);
+	remote = intel_context_create(engine);
 	if (IS_ERR(remote))
 		return PTR_ERR(remote);
 
-	local = intel_context_create(fixme, engine);
+	/* Pretend to be a user context to partake in idle barriers */
+	remote->gem_context = kernel_context(engine->i915);
+	if (IS_ERR(remote->gem_context))
+		goto err_remote_ce;
+
+	local = intel_context_create(engine);
 	if (IS_ERR(local)) {
 		err = PTR_ERR(local);
 		goto err_remote;
@@ -392,6 +387,8 @@ static int __live_remote_context(struct intel_engine_cs *engine,
 
 	intel_context_put(local);
 err_remote:
+	kernel_context_close(remote->gem_context);
+err_remote_ce:
 	intel_context_put(remote);
 	return err;
 }
@@ -400,7 +397,6 @@ static int live_remote_context(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	struct drm_file *file;
 	int err = 0;
@@ -409,14 +405,8 @@ static int live_remote_context(void *arg)
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	fixme = live_context(gt->i915, file);
-	if (IS_ERR(fixme)) {
-		err = PTR_ERR(fixme);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
-		err = __live_remote_context(engine, fixme);
+		err = __live_remote_context(engine);
 		if (err)
 			break;
 
@@ -425,7 +415,6 @@ static int live_remote_context(void *arg)
 			break;
 	}
 
-out_file:
 	mock_file_free(gt->i915, file);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 155c508024df..9e7376b592e5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -169,8 +169,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
 	int err;
 	int i;
 
-	ce = intel_context_create(engine->kernel_context->gem_context,
-				  engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 8abc0a1d692b..4f8cd3cbb8bd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -377,36 +377,30 @@ static int igt_reset_nop(void *arg)
 	struct intel_gt *gt = arg;
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	unsigned int reset_count, count;
 	enum intel_engine_id id;
-	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	int err = 0;
 
 	/* Check that we can reset during non-user portions of requests */
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	ctx = live_context(gt->i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out;
-	}
-
-	i915_gem_context_clear_bannable(ctx);
 	reset_count = i915_reset_count(global);
 	count = 0;
 	do {
 		for_each_engine(engine, gt, id) {
+			struct intel_context *ce;
 			int i;
 
+			ce = intel_context_create(engine);
+			if (IS_ERR(ce)) {
+				err = PTR_ERR(ce);
+				break;
+			}
+
 			for (i = 0; i < 16; i++) {
 				struct i915_request *rq;
 
-				rq = igt_request_alloc(ctx, engine);
+				rq = intel_context_create_request(ce);
 				if (IS_ERR(rq)) {
 					err = PTR_ERR(rq);
 					break;
@@ -414,6 +408,8 @@ static int igt_reset_nop(void *arg)
 
 				i915_request_add(rq);
 			}
+
+			intel_context_put(ce);
 		}
 
 		igt_global_reset_lock(gt);
@@ -437,10 +433,7 @@ static int igt_reset_nop(void *arg)
 	} while (time_before(jiffies, end_time));
 	pr_info("%s: %d resets\n", __func__, count);
 
-	err = igt_flush_test(gt->i915);
-out:
-	mock_file_free(gt->i915, file);
-	if (intel_gt_is_wedged(gt))
+	if (igt_flush_test(gt->i915))
 		err = -EIO;
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95504c48cd99..04d0dd6a938a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -53,11 +53,10 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
 static int live_sanitycheck(void *arg)
 {
 	struct intel_gt *gt = arg;
-	struct i915_gem_engines_iter it;
-	struct i915_gem_context *ctx;
-	struct intel_context *ce;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct igt_spinner spin;
-	int err = -ENOMEM;
+	int err = 0;
 
 	if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
 		return 0;
@@ -65,17 +64,20 @@ static int live_sanitycheck(void *arg)
 	if (igt_spinner_init(&spin, gt))
 		return -ENOMEM;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		goto err_spin;
-
-	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
 		struct i915_request *rq;
 
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			goto err_ctx;
+			goto out_ctx;
 		}
 
 		i915_request_add(rq);
@@ -84,21 +86,21 @@ static int live_sanitycheck(void *arg)
 			GEM_TRACE_DUMP();
 			intel_gt_set_wedged(gt);
 			err = -EIO;
-			goto err_ctx;
+			goto out_ctx;
 		}
 
 		igt_spinner_end(&spin);
 		if (igt_flush_test(gt->i915)) {
 			err = -EIO;
-			goto err_ctx;
+			goto out_ctx;
 		}
+
+out_ctx:
+		intel_context_put(ce);
+		if (err)
+			break;
 	}
 
-	err = 0;
-err_ctx:
-	i915_gem_context_unlock_engines(ctx);
-	kernel_context_close(ctx);
-err_spin:
 	igt_spinner_fini(&spin);
 	return err;
 }
@@ -144,7 +146,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
 		for (n = 0; n < ARRAY_SIZE(ce); n++) {
 			struct intel_context *tmp;
 
-			tmp = intel_context_create(ctx, engine);
+			tmp = intel_context_create(engine);
 			if (IS_ERR(tmp)) {
 				err = PTR_ERR(tmp);
 				goto err_ce;
@@ -309,17 +311,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
 static struct i915_request *
 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 {
-	struct i915_gem_context *ctx;
+	struct intel_context *ce;
 	struct i915_request *rq;
 	int err;
 
-	ctx = kernel_context(engine->i915);
-	if (!ctx)
-		return ERR_PTR(-ENOMEM);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
 
-	rq = igt_request_alloc(ctx, engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq))
-		goto out_ctx;
+		goto out_ce;
 
 	err = 0;
 	if (rq->engine->emit_init_breadcrumb)
@@ -332,8 +334,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 	if (err)
 		rq = ERR_PTR(err);
 
-out_ctx:
-	kernel_context_close(ctx);
+out_ce:
+	intel_context_put(ce);
 	return rq;
 }
 
@@ -2390,27 +2392,17 @@ static int nop_virtual_engine(struct intel_gt *gt,
 {
 	IGT_TIMEOUT(end_time);
 	struct i915_request *request[16];
-	struct i915_gem_context *ctx[16];
 	struct intel_context *ve[16];
 	unsigned long n, prime, nc;
 	struct igt_live_test t;
 	ktime_t times[2] = {};
 	int err;
 
-	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
 
 	for (n = 0; n < nctx; n++) {
-		ctx[n] = kernel_context(gt->i915);
-		if (!ctx[n]) {
-			err = -ENOMEM;
-			nctx = n;
-			goto out;
-		}
-
-		ve[n] = intel_execlists_create_virtual(ctx[n],
-						       siblings, nsibling);
+		ve[n] = intel_execlists_create_virtual(siblings, nsibling);
 		if (IS_ERR(ve[n])) {
-			kernel_context_close(ctx[n]);
 			err = PTR_ERR(ve[n]);
 			nctx = n;
 			goto out;
@@ -2419,7 +2411,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
 		err = intel_context_pin(ve[n]);
 		if (err) {
 			intel_context_put(ve[n]);
-			kernel_context_close(ctx[n]);
 			nctx = n;
 			goto out;
 		}
@@ -2500,7 +2491,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
 	for (nc = 0; nc < nctx; nc++) {
 		intel_context_unpin(ve[nc]);
 		intel_context_put(ve[nc]);
-		kernel_context_close(ctx[nc]);
 	}
 	return err;
 }
@@ -2559,7 +2549,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
 			       unsigned int nsibling)
 {
 	struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
-	struct i915_gem_context *ctx;
 	struct intel_context *ve;
 	struct igt_live_test t;
 	unsigned int n;
@@ -2570,11 +2559,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
 	 * restrict it to our desired engine within the virtual engine.
 	 */
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		return -ENOMEM;
-
-	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
 		goto out_close;
@@ -2642,7 +2627,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
 out_put:
 	intel_context_put(ve);
 out_close:
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -2682,7 +2666,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 				    unsigned int nsibling)
 {
 	struct i915_request *last = NULL;
-	struct i915_gem_context *ctx;
 	struct intel_context *ve;
 	struct i915_vma *scratch;
 	struct igt_live_test t;
@@ -2690,17 +2673,11 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	int err = 0;
 	u32 *cs;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		return -ENOMEM;
-
 	scratch = create_scratch(siblings[0]->gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
-	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
 		goto out_scratch;
@@ -2783,8 +2760,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	intel_context_put(ve);
 out_scratch:
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -2878,9 +2853,7 @@ static int bond_virtual_engine(struct intel_gt *gt,
 		for (n = 0; n < nsibling; n++) {
 			struct intel_context *ve;
 
-			ve = intel_execlists_create_virtual(ctx,
-							    siblings,
-							    nsibling);
+			ve = intel_execlists_create_virtual(siblings, nsibling);
 			if (IS_ERR(ve)) {
 				err = PTR_ERR(ve);
 				onstack_fence_fini(&fence);
@@ -3268,8 +3241,7 @@ static int live_lrc_fixed(void *arg)
 	return err;
 }
 
-static int __live_lrc_state(struct i915_gem_context *fixme,
-			    struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
 			    struct i915_vma *scratch)
 {
 	struct intel_context *ce;
@@ -3284,7 +3256,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme,
 	int err;
 	int n;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -3358,7 +3330,6 @@ static int live_lrc_state(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	struct i915_vma *scratch;
 	enum intel_engine_id id;
 	int err = 0;
@@ -3368,18 +3339,12 @@ static int live_lrc_state(void *arg)
 	 * intel_context.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (!fixme)
-		return -ENOMEM;
-
 	scratch = create_scratch(gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_lrc_state(fixme, engine, scratch);
+		err = __live_lrc_state(engine, scratch);
 		if (err)
 			break;
 	}
@@ -3388,8 +3353,6 @@ static int live_lrc_state(void *arg)
 		err = -EIO;
 
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(fixme);
 	return err;
 }
 
@@ -3422,8 +3385,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int __live_gpr_clear(struct i915_gem_context *fixme,
-			    struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
 			    struct i915_vma *scratch)
 {
 	struct intel_context *ce;
@@ -3439,7 +3401,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme,
 	if (err)
 		return err;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -3501,7 +3463,6 @@ static int live_gpr_clear(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	struct i915_vma *scratch;
 	enum intel_engine_id id;
 	int err = 0;
@@ -3511,18 +3472,12 @@ static int live_gpr_clear(void *arg)
 	 * to avoid leaking any information from previous contexts.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (!fixme)
-		return -ENOMEM;
-
 	scratch = create_scratch(gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_gpr_clear(fixme, engine, scratch);
+		err = __live_gpr_clear(engine, scratch);
 		if (err)
 			break;
 	}
@@ -3531,8 +3486,6 @@ static int live_gpr_clear(void *arg)
 		err = -EIO;
 
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(fixme);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 976d6950218e..f0785b9fe02c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,12 +35,12 @@
 
 #include <linux/kthread.h>
 
-#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
+#include "i915_gem_gtt.h"
 #include "gvt.h"
 
 #define RING_CTX_OFF(x) \
@@ -1220,16 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	struct i915_ppgtt *ppgtt;
 	enum intel_engine_id i;
 	int ret;
 
-	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	ppgtt = i915_ppgtt_create(i915);
+	if (IS_ERR(ppgtt))
+		return PTR_ERR(ppgtt);
 
-	ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
 	i915_context_ppgtt_root_save(s, ppgtt);
 
 	for_each_engine(engine, i915, i) {
@@ -1238,12 +1236,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		INIT_LIST_HEAD(&s->workload_q_head[i]);
 		s->shadow[i] = ERR_PTR(-EINVAL);
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			ret = PTR_ERR(ce);
 			goto out_shadow_ctx;
 		}
 
+		i915_vm_put(ce->vm);
+		ce->vm = i915_vm_get(&ppgtt->vm);
 		intel_context_set_single_submission(ce);
 
 		if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
@@ -1278,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
 	i915_vm_put(&ppgtt->vm);
-	i915_gem_context_put(ctx);
 	return 0;
 
 out_shadow_ctx:
@@ -1291,7 +1290,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		intel_context_put(s->shadow[i]);
 	}
 	i915_vm_put(&ppgtt->vm);
-	i915_gem_context_put(ctx);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b5de8f928320..e308a9a71ab1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,9 +956,6 @@ struct drm_i915_private {
 
 	struct pci_dev *bridge_dev;
 
-	/* Context used internally to idle the GPU and setup initial state */
-	struct i915_gem_context *kernel_context;
-
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct rb_root uabi_engines;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fe348ff7db4f..1063d4c74a7f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1063,8 +1063,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
 		GEM_BUG_ON(!engine->kernel_context);
 		engine->serial++; /* force the kernel context switch */
 
-		ce = intel_context_create(engine->kernel_context->gem_context,
-					  engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			err = PTR_ERR(ce);
 			goto out;
@@ -1219,6 +1218,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	}
 
 	intel_gt_init(&dev_priv->gt);
+	i915_gem_init_contexts(dev_priv);
 
 	ret = intel_engines_setup(&dev_priv->gt);
 	if (ret) {
@@ -1226,16 +1226,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_unlock;
 	}
 
-	ret = i915_gem_init_contexts(dev_priv);
-	if (ret) {
-		GEM_BUG_ON(ret == -EIO);
-		goto err_scratch;
-	}
-
 	ret = intel_engines_init(&dev_priv->gt);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
-		goto err_context;
+		goto err_scratch;
 	}
 
 	intel_uc_init(&dev_priv->gt.uc);
@@ -1299,9 +1293,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		intel_uc_fini(&dev_priv->gt.uc);
 		intel_engines_cleanup(&dev_priv->gt);
 	}
-err_context:
-	if (ret != -EIO)
-		i915_gem_driver_release__contexts(dev_priv);
 err_scratch:
 	intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
@@ -1367,7 +1358,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
 	intel_engines_cleanup(&dev_priv->gt);
-	i915_gem_driver_release__contexts(dev_priv);
 	intel_gt_driver_release(&dev_priv->gt);
 
 	intel_wa_list_free(&dev_priv->gt_wa_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 88179202c556..5e207412a459 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1363,12 +1363,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	 * If everybody agrees to not to write into the scratch page,
 	 * we can reuse it for all vm, keeping contexts and processes separate.
 	 */
-	if (vm->has_read_only &&
-	    vm->i915->kernel_context &&
-	    vm->i915->kernel_context->vm) {
-		struct i915_address_space *clone =
-			rcu_dereference_protected(vm->i915->kernel_context->vm,
-						  true); /* static */
+	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+		struct i915_address_space *clone = vm->gt->vm;
 
 		GEM_BUG_ON(!clone->has_read_only);
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b40157cfae14..598425a2be62 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1231,9 +1231,11 @@ static void record_request(const struct i915_request *request,
 	erq->head = request->head;
 	erq->tail = request->tail;
 
-	rcu_read_lock();
-	erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
-	rcu_read_unlock();
+	if (ctx) {
+		rcu_read_lock();
+		erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
+		rcu_read_unlock();
+	}
 }
 
 static void engine_record_requests(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 6e35b9255882..f1147fbb1dc0 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2340,9 +2340,6 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
 	 */
 	spin_lock(&i915->gem.contexts.lock);
 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
-		if (ctx == i915->kernel_context)
-			continue;
-
 		if (!kref_get_unless_zero(&ctx->ref))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b9bccdb9ee6c..3b9c76dd08f0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1227,8 +1227,8 @@ void __i915_request_queue(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq)
 {
-	struct i915_sched_attr attr = rq->context->gem_context->sched;
 	struct intel_timeline * const tl = i915_request_timeline(rq);
+	struct i915_sched_attr attr = {};
 	struct i915_request *prev;
 
 	lockdep_assert_held(&tl->mutex);
@@ -1238,6 +1238,9 @@ void i915_request_add(struct i915_request *rq)
 
 	prev = __i915_request_commit(rq);
 
+	if (rq->context->gem_context)
+		attr = rq->context->gem_context->sched;
+
 	/*
 	 * Boost actual workloads past semaphores!
 	 *
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..9ddade4eacf8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -739,10 +739,8 @@ static int live_empty_request(void *arg)
 
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *ctx = i915->kernel_context;
 	struct drm_i915_gem_object *obj;
 	const int gen = INTEL_GEN(i915);
-	struct i915_address_space *vm;
 	struct i915_vma *vma;
 	u32 *cmd;
 	int err;
@@ -751,9 +749,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	vma = i915_vma_instance(obj, vm, NULL);
-	i915_vm_put(vm);
+	vma = i915_vma_instance(obj, i915->gt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 27ed3cee6a9b..98a754faf222 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -63,7 +63,6 @@ static void mock_device_release(struct drm_device *dev)
 
 	for_each_engine(engine, &i915->gt, id)
 		mock_engine_free(engine);
-	i915_gem_driver_release__contexts(i915);
 
 	intel_timelines_fini(i915);
 
@@ -183,6 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
 	intel_timelines_init(i915);
 
 	mock_init_ggtt(i915, &i915->ggtt);
+	i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
 	mkwrite_device_info(i915)->engine_mask = BIT(0);
 
@@ -190,10 +190,6 @@ struct drm_i915_private *mock_gem_device(void)
 	if (!i915->engine[RCS0])
 		goto err_unlock;
 
-	i915->kernel_context = mock_context(i915, NULL);
-	if (!i915->kernel_context)
-		goto err_engine;
-
 	if (mock_engine_init(i915->engine[RCS0]))
 		goto err_context;
 
@@ -202,8 +198,6 @@ struct drm_i915_private *mock_gem_device(void)
 	return i915;
 
 err_context:
-	i915_gem_driver_release__contexts(i915);
-err_engine:
 	mock_engine_free(i915->engine[RCS0]);
 err_unlock:
 	intel_timelines_fini(i915);
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 08/11] drm/i915: Remove i915->kernel_context
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Allocate only an internal intel_context for the kernel_context, forgoing
a global GEM context for internal use as we only require a separate
address space (for our own protection).

Now having weaned GT from requiring ce->gem_context, we can stop
referencing it entirely. This also means we no longer have to create random
and unnecessary GEM contexts for internal use.

GEM contexts are now entirely for tracking GEM clients, and intel_context
the execution environment on the GPU.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   |  91 +++++-------
 drivers/gpu/drm/i915/gem/i915_gem_context.h   |   6 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |   3 +-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |   6 +-
 .../drm/i915/gem/selftests/i915_gem_context.c |   5 +-
 .../gpu/drm/i915/gem/selftests/mock_context.c |  11 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |  32 +---
 drivers/gpu/drm/i915/gt/intel_context.h       |   4 +-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  21 +--
 drivers/gpu/drm/i915/gt/intel_gt.c            |  26 +++-
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |   7 +
 drivers/gpu/drm/i915/gt/intel_lrc.c           |  18 +--
 drivers/gpu/drm/i915/gt/intel_lrc.h           |   6 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |   6 +-
 .../gpu/drm/i915/gt/intel_ring_submission.c   |   4 +-
 drivers/gpu/drm/i915/gt/selftest_context.c    |  59 +++-----
 .../drm/i915/gt/selftest_engine_heartbeat.c   |   3 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |  29 ++--
 drivers/gpu/drm/i915/gt/selftest_lrc.c        | 137 ++++++------------
 drivers/gpu/drm/i915/gvt/scheduler.c          |  16 +-
 drivers/gpu/drm/i915/i915_drv.h               |   3 -
 drivers/gpu/drm/i915/i915_gem.c               |  16 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c           |   8 +-
 drivers/gpu/drm/i915/i915_gpu_error.c         |   8 +-
 drivers/gpu/drm/i915/i915_perf.c              |   3 -
 drivers/gpu/drm/i915/i915_request.c           |   5 +-
 drivers/gpu/drm/i915/selftests/i915_request.c |   6 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   8 +-
 28 files changed, 216 insertions(+), 331 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 455690c80d42..ff9e64c513e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -170,6 +170,26 @@ lookup_user_engine(struct i915_gem_context *ctx,
 	return i915_gem_context_get_engine(ctx, idx);
 }
 
+static void intel_context_set_gem(struct intel_context *ce,
+				  struct i915_gem_context *ctx)
+{
+	ce->gem_context = ctx;
+
+	ce->ring = __intel_context_ring_size(SZ_16K);
+
+	if (ctx->vm) {
+		i915_vm_put(ce->vm);
+		ce->vm = i915_vm_get(ctx->vm);
+	}
+
+	if (ctx->timeline)
+		ce->timeline = intel_timeline_get(ctx->timeline);
+
+	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+	    intel_engine_has_semaphores(ce->engine))
+		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+}
+
 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
 	while (count--) {
@@ -212,12 +232,14 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
 		GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
 		GEM_BUG_ON(e->engines[engine->legacy_idx]);
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			__free_engines(e, e->num_engines + 1);
 			return ERR_CAST(ce);
 		}
 
+		intel_context_set_gem(ce, ctx);
+
 		e->engines[engine->legacy_idx] = ce;
 		e->num_engines = max(e->num_engines, engine->legacy_idx);
 	}
@@ -634,37 +656,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
 	return ctx;
 }
 
-static void
-destroy_kernel_context(struct i915_gem_context **ctxp)
-{
-	struct i915_gem_context *ctx;
-
-	/* Keep the context ref so that we can free it immediately ourselves */
-	ctx = i915_gem_context_get(fetch_and_zero(ctxp));
-	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-	context_close(ctx);
-	i915_gem_context_free(ctx);
-}
-
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
-{
-	struct i915_gem_context *ctx;
-
-	ctx = i915_gem_create_context(i915, 0);
-	if (IS_ERR(ctx))
-		return ctx;
-
-	i915_gem_context_clear_bannable(ctx);
-	i915_gem_context_set_persistence(ctx);
-	ctx->sched.priority = I915_USER_PRIORITY(prio);
-
-	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
-
-	return ctx;
-}
-
 static void init_contexts(struct i915_gem_contexts *gc)
 {
 	spin_lock_init(&gc->lock);
@@ -674,32 +665,12 @@ static void init_contexts(struct i915_gem_contexts *gc)
 	init_llist_head(&gc->free_list);
 }
 
-int i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init_contexts(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *ctx;
-
-	/* Reassure ourselves we are only called once */
-	GEM_BUG_ON(i915->kernel_context);
-
 	init_contexts(&i915->gem.contexts);
-
-	/* lowest priority; idle task */
-	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
-	if (IS_ERR(ctx)) {
-		DRM_ERROR("Failed to create default global context\n");
-		return PTR_ERR(ctx);
-	}
-	i915->kernel_context = ctx;
-
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
 			 DRIVER_CAPS(i915)->has_logical_contexts ?
 			 "logical" : "fake");
-	return 0;
-}
-
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
-{
-	destroy_kernel_context(&i915->kernel_context);
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -1457,12 +1428,14 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
 		}
 	}
 
-	ce = intel_execlists_create_virtual(set->ctx, siblings, n);
+	ce = intel_execlists_create_virtual(siblings, n);
 	if (IS_ERR(ce)) {
 		err = PTR_ERR(ce);
 		goto out_siblings;
 	}
 
+	intel_context_set_gem(ce, set->ctx);
+
 	if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
 		intel_context_put(ce);
 		err = -EEXIST;
@@ -1632,12 +1605,14 @@ set_engines(struct i915_gem_context *ctx,
 			return -ENOENT;
 		}
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			__free_engines(set.engines, n);
 			return PTR_ERR(ce);
 		}
 
+		intel_context_set_gem(ce, ctx);
+
 		set.engines->engines[n] = ce;
 	}
 	set.engines->num_engines = num_engines;
@@ -1938,13 +1913,15 @@ static int clone_engines(struct i915_gem_context *dst,
 		 */
 		if (intel_engine_is_virtual(engine))
 			clone->engines[n] =
-				intel_execlists_clone_virtual(dst, engine);
+				intel_execlists_clone_virtual(engine);
 		else
-			clone->engines[n] = intel_context_create(dst, engine);
+			clone->engines[n] = intel_context_create(engine);
 		if (IS_ERR_OR_NULL(clone->engines[n])) {
 			__free_engines(clone, n);
 			goto err_unlock;
 		}
+
+		intel_context_set_gem(clone->engines[n], dst);
 	}
 	clone->num_engines = n;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 69932899803e..d99fff481d59 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -115,8 +115,7 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 }
 
 /* i915_gem_context.c */
-int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
-void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
+void i915_gem_init_contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
@@ -140,9 +139,6 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 				       struct drm_file *file);
 
-struct i915_gem_context *
-i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
-
 static inline struct i915_gem_context *
 i915_gem_context_get(struct i915_gem_context *ctx)
 {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6dc86c129eee..4f6b847ea011 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1261,8 +1261,7 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
 			 * memory location.
 			 */
 			if (cache->gen >= 12)
-				ce = intel_context_create(eb->context->gem_context,
-							  eb->engine);
+				ce = intel_context_create(eb->engine);
 			else
 				ce = intel_context_get(eb->context);
 			if (IS_ERR(ce))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 1e045c337044..432f2828b093 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -753,15 +753,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 		return -EFAULT;
 
 	if (args->flags & I915_USERPTR_READ_ONLY) {
-		struct i915_address_space *vm;
-
 		/*
 		 * On almost all of the older hw, we cannot tell the GPU that
 		 * a page is readonly.
 		 */
-		vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
-					       true); /* static vm */
-		if (!vm || !vm->has_read_only)
+		if (!dev_priv->gt.vm->has_read_only)
 			return -ENODEV;
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index c6e61564bb5e..6d174caa8f8b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -309,7 +309,7 @@ static int live_parallel_switch(void *arg)
 			if (!data[m].ce[0])
 				continue;
 
-			ce = intel_context_create(ctx, data[m].ce[0]->engine);
+			ce = intel_context_create(data[m].ce[0]->engine);
 			if (IS_ERR(ce))
 				goto out;
 
@@ -1230,8 +1230,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
 			hweight32(engine->sseu.slice_mask),
 			hweight32(pg_sseu.slice_mask));
 
-		ce = intel_context_create(engine->kernel_context->gem_context,
-					  engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			ret = PTR_ERR(ce);
 			goto out_put;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 29b8984f0e47..f472fc9240d5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -97,7 +97,16 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
 struct i915_gem_context *
 kernel_context(struct drm_i915_private *i915)
 {
-	return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
+	struct i915_gem_context *ctx;
+
+	ctx = i915_gem_create_context(i915, 0);
+	if (IS_ERR(ctx))
+		return ctx;
+
+	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+	i915_gem_context_clear_bannable(ctx);
+
+	return ctx;
 }
 
 void kernel_context_close(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 7075d03f508f..8b71b2ccbf27 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -31,8 +31,7 @@ void intel_context_free(struct intel_context *ce)
 }
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-		     struct intel_engine_cs *engine)
+intel_context_create(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 
@@ -40,7 +39,7 @@ intel_context_create(struct i915_gem_context *ctx,
 	if (!ce)
 		return ERR_PTR(-ENOMEM);
 
-	intel_context_init(ce, ctx, engine);
+	intel_context_init(ce, engine);
 	return ce;
 }
 
@@ -72,8 +71,6 @@ int __intel_context_do_pin(struct intel_context *ce)
 			  ce->engine->name, ce->timeline->fence_context,
 			  ce->ring->head, ce->ring->tail);
 
-		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
-
 		smp_mb__before_atomic(); /* flush pin before it is visible */
 	}
 
@@ -103,7 +100,6 @@ void intel_context_unpin(struct intel_context *ce)
 
 		ce->ops->unpin(ce);
 
-		i915_gem_context_put(ce->gem_context);
 		intel_context_active_release(ce);
 	}
 
@@ -198,7 +194,7 @@ int intel_context_active_acquire(struct intel_context *ce)
 		return err;
 
 	/* Preallocate tracking nodes */
-	if (!i915_gem_context_is_kernel(ce->gem_context)) {
+	if (ce->gem_context) {
 		err = i915_active_acquire_preallocate_barrier(&ce->active,
 							      ce->engine);
 		if (err) {
@@ -219,33 +215,19 @@ void intel_context_active_release(struct intel_context *ce)
 
 void
 intel_context_init(struct intel_context *ce,
-		   struct i915_gem_context *ctx,
 		   struct intel_engine_cs *engine)
 {
-	struct i915_address_space *vm;
-
 	GEM_BUG_ON(!engine->cops);
+	GEM_BUG_ON(!engine->gt->vm);
 
 	kref_init(&ce->ref);
 
-	ce->gem_context = ctx;
-	rcu_read_lock();
-	vm = rcu_dereference(ctx->vm);
-	if (vm)
-		ce->vm = i915_vm_get(vm);
-	else
-		ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
-	rcu_read_unlock();
-	if (ctx->timeline)
-		ce->timeline = intel_timeline_get(ctx->timeline);
-	if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
-	    intel_engine_has_semaphores(engine))
-		__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
-
 	ce->engine = engine;
 	ce->ops = engine->cops;
 	ce->sseu = engine->sseu;
-	ce->ring = __intel_context_ring_size(SZ_16K);
+	ce->ring = __intel_context_ring_size(SZ_4K);
+
+	ce->vm = i915_vm_get(engine->gt->vm);
 
 	INIT_LIST_HEAD(&ce->signal_link);
 	INIT_LIST_HEAD(&ce->signals);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d7b667a26e08..15bc46e1b308 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -18,13 +18,11 @@
 #include "intel_timeline_types.h"
 
 void intel_context_init(struct intel_context *ce,
-			struct i915_gem_context *ctx,
 			struct intel_engine_cs *engine);
 void intel_context_fini(struct intel_context *ce);
 
 struct intel_context *
-intel_context_create(struct i915_gem_context *ctx,
-		     struct intel_engine_cs *engine);
+intel_context_create(struct intel_engine_cs *engine);
 
 void intel_context_free(struct intel_context *ce);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index efde3f8e0559..b38ea44ab761 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -756,12 +756,10 @@ create_kernel_context(struct intel_engine_cs *engine)
 	struct intel_context *ce;
 	int err;
 
-	ce = intel_context_create(engine->i915->kernel_context, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return ce;
 
-	ce->ring = __intel_context_ring_size(SZ_4K);
-
 	err = intel_context_pin(ce);
 	if (err) {
 		intel_context_put(ce);
@@ -797,6 +795,12 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 
 	engine->set_default_submission(engine);
 
+	ret = measure_breadcrumb_dw(engine);
+	if (ret < 0)
+		return ret;
+
+	engine->emit_fini_breadcrumb_dw = ret;
+
 	/*
 	 * We may need to do things with the shrinker which
 	 * require us to immediately switch back to the default
@@ -811,18 +815,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 
 	engine->kernel_context = ce;
 
-	ret = measure_breadcrumb_dw(engine);
-	if (ret < 0)
-		goto err_unpin;
-
-	engine->emit_fini_breadcrumb_dw = ret;
-
 	return 0;
-
-err_unpin:
-	intel_context_unpin(ce);
-	intel_context_put(ce);
-	return ret;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 4c26daf7ee46..f9fb0865fbb6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -73,7 +73,6 @@ int intel_gt_init_hw(struct intel_gt *gt)
 	struct intel_uncore *uncore = gt->uncore;
 	int ret;
 
-	BUG_ON(!i915->kernel_context);
 	ret = intel_gt_terminally_wedged(gt);
 	if (ret)
 		return ret;
@@ -364,6 +363,14 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
 	i915_vma_unpin_and_release(&gt->scratch, 0);
 }
 
+static struct i915_address_space *kernel_vm(struct intel_gt *gt)
+{
+	if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
+		return &i915_ppgtt_create(gt->i915)->vm;
+	else
+		return i915_vm_get(&gt->ggtt->vm);
+}
+
 int intel_gt_init(struct intel_gt *gt)
 {
 	int err;
@@ -374,7 +381,17 @@ int intel_gt_init(struct intel_gt *gt)
 
 	intel_gt_pm_init(gt);
 
+	gt->vm = kernel_vm(gt);
+	if (!gt->vm) {
+		err = -ENOMEM;
+		goto err_scratch;
+	}
+
 	return 0;
+
+err_scratch:
+	intel_gt_fini_scratch(gt);
+	return err;
 }
 
 void intel_gt_driver_remove(struct intel_gt *gt)
@@ -389,6 +406,13 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
 
 void intel_gt_driver_release(struct intel_gt *gt)
 {
+	struct i915_address_space *vm;
+
+	vm = fetch_and_zero(&gt->vm);
+	if (!vm) /* FIXME being called twice on error paths :( */
+		return;
+
+	i915_vm_put(vm);
 	intel_gt_pm_fini(gt);
 	intel_gt_fini_scratch(gt);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index d4e14dbd172e..96890dd12b5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -90,6 +90,13 @@ struct intel_gt {
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
 					    [MAX_ENGINE_INSTANCE + 1];
+
+	/*
+	 * Default address space (either GGTT or ppGTT depending on arch).
+	 *
+	 * Reserved for exclusive use by the kernel.
+	 */
+	struct i915_address_space *vm;
 };
 
 enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 5b23aeb50385..242e6927675c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -133,12 +133,11 @@
  */
 #include <linux/interrupt.h>
 
-#include "gem/i915_gem_context.h"
-
 #include "i915_drv.h"
 #include "i915_perf.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
+#include "intel_context.h"
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
@@ -4393,8 +4392,7 @@ virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 }
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-			       struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count)
 {
 	struct virtual_engine *ve;
@@ -4405,13 +4403,13 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		return ERR_PTR(-EINVAL);
 
 	if (count == 1)
-		return intel_context_create(ctx, siblings[0]);
+		return intel_context_create(siblings[0]);
 
 	ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
 	if (!ve)
 		return ERR_PTR(-ENOMEM);
 
-	ve->base.i915 = ctx->i915;
+	ve->base.i915 = siblings[0]->i915;
 	ve->base.gt = siblings[0]->gt;
 	ve->base.uncore = siblings[0]->uncore;
 	ve->base.id = -1;
@@ -4454,7 +4452,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 		     virtual_submission_tasklet,
 		     (unsigned long)ve);
 
-	intel_context_init(&ve->context, ctx, &ve->base);
+	intel_context_init(&ve->context, &ve->base);
 
 	for (n = 0; n < count; n++) {
 		struct intel_engine_cs *sibling = siblings[n];
@@ -4535,14 +4533,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 }
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-			      struct intel_engine_cs *src)
+intel_execlists_clone_virtual(struct intel_engine_cs *src)
 {
 	struct virtual_engine *se = to_virtual_engine(src);
 	struct intel_context *dst;
 
-	dst = intel_execlists_create_virtual(ctx,
-					     se->siblings,
+	dst = intel_execlists_create_virtual(se->siblings,
 					     se->num_siblings);
 	if (IS_ERR(dst))
 		return dst;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h
index faa2d56c279b..faa496ee71bf 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.h
@@ -130,13 +130,11 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 				   unsigned int max);
 
 struct intel_context *
-intel_execlists_create_virtual(struct i915_gem_context *ctx,
-			       struct intel_engine_cs **siblings,
+intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 			       unsigned int count);
 
 struct intel_context *
-intel_execlists_clone_virtual(struct i915_gem_context *ctx,
-			      struct intel_engine_cs *src);
+intel_execlists_clone_virtual(struct intel_engine_cs *src);
 
 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 				     const struct intel_engine_cs *master,
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b4c0ad4a6081..98e7bba5979e 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -86,6 +86,9 @@ static bool mark_guilty(struct i915_request *rq)
 	bool banned;
 	int i;
 
+	if (!ctx)
+		return false;
+
 	atomic_inc(&ctx->guilty_count);
 
 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
@@ -115,7 +118,8 @@ static bool mark_guilty(struct i915_request *rq)
 
 static void mark_innocent(struct i915_request *rq)
 {
-	atomic_inc(&rq->context->gem_context->active_count);
+	if (rq->context->gem_context)
+		atomic_inc(&rq->context->gem_context->active_count);
 }
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 1c2f3c3a8840..6ddfe8a86dbe 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -1558,7 +1558,7 @@ static int remap_l3(struct i915_request *rq)
 	struct i915_gem_context *ctx = rq->context->gem_context;
 	int i, err;
 
-	if (!ctx->remap_slice)
+	if (!ctx || !ctx->remap_slice)
 		return 0;
 
 	for (i = 0; i < MAX_L3_SLICES; i++) {
@@ -1601,7 +1601,7 @@ static int switch_context(struct i915_request *rq)
 		 * is purely used for flushing user contexts.
 		 */
 		hw_flags = 0;
-		if (i915_gem_context_is_kernel(rq->context->gem_context))
+		if (!rq->context->gem_context)
 			hw_flags = MI_RESTORE_INHIBIT;
 
 		ret = mi_set_context(rq, hw_flags);
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index f63a26a3e620..6b7048636c43 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -67,15 +67,14 @@ static int context_sync(struct intel_context *ce)
 	return err;
 }
 
-static int __live_context_size(struct intel_engine_cs *engine,
-			       struct i915_gem_context *fixme)
+static int __live_context_size(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 	struct i915_request *rq;
 	void *vaddr;
 	int err;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -146,7 +145,6 @@ static int live_context_size(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	int err = 0;
 
@@ -155,10 +153,6 @@ static int live_context_size(void *arg)
 	 * HW tries to write past the end of one.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (IS_ERR(fixme))
-		return PTR_ERR(fixme);
-
 	for_each_engine(engine, gt, id) {
 		struct {
 			struct drm_i915_gem_object *state;
@@ -183,7 +177,7 @@ static int live_context_size(void *arg)
 		/* Overlaps with the execlists redzone */
 		engine->context_size += I915_GTT_PAGE_SIZE;
 
-		err = __live_context_size(engine, fixme);
+		err = __live_context_size(engine);
 
 		engine->context_size -= I915_GTT_PAGE_SIZE;
 
@@ -196,12 +190,10 @@ static int live_context_size(void *arg)
 			break;
 	}
 
-	kernel_context_close(fixme);
 	return err;
 }
 
-static int __live_active_context(struct intel_engine_cs *engine,
-				 struct i915_gem_context *fixme)
+static int __live_active_context(struct intel_engine_cs *engine)
 {
 	struct intel_context *ce;
 	int pass;
@@ -226,10 +218,15 @@ static int __live_active_context(struct intel_engine_cs *engine,
 		return -EINVAL;
 	}
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
+	/* Pretend to be a user context to partake in idle barriers */
+	ce->gem_context = kernel_context(engine->i915);
+	if (IS_ERR(ce->gem_context))
+		goto err_ce;
+
 	for (pass = 0; pass <= 2; pass++) {
 		struct i915_request *rq;
 
@@ -282,6 +279,8 @@ static int __live_active_context(struct intel_engine_cs *engine,
 	}
 
 err:
+	kernel_context_close(ce->gem_context);
+err_ce:
 	intel_context_put(ce);
 	return err;
 }
@@ -290,7 +289,6 @@ static int live_active_context(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	struct drm_file *file;
 	int err = 0;
@@ -299,14 +297,8 @@ static int live_active_context(void *arg)
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	fixme = live_context(gt->i915, file);
-	if (IS_ERR(fixme)) {
-		err = PTR_ERR(fixme);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
-		err = __live_active_context(engine, fixme);
+		err = __live_active_context(engine);
 		if (err)
 			break;
 
@@ -315,7 +307,6 @@ static int live_active_context(void *arg)
 			break;
 	}
 
-out_file:
 	mock_file_free(gt->i915, file);
 	return err;
 }
@@ -348,8 +339,7 @@ static int __remote_sync(struct intel_context *ce, struct intel_context *remote)
 	return err;
 }
 
-static int __live_remote_context(struct intel_engine_cs *engine,
-				 struct i915_gem_context *fixme)
+static int __live_remote_context(struct intel_engine_cs *engine)
 {
 	struct intel_context *local, *remote;
 	int pass;
@@ -363,11 +353,16 @@ static int __live_remote_context(struct intel_engine_cs *engine,
 	 * clobber the idle-barrier.
 	 */
 
-	remote = intel_context_create(fixme, engine);
+	remote = intel_context_create(engine);
 	if (IS_ERR(remote))
 		return PTR_ERR(remote);
 
-	local = intel_context_create(fixme, engine);
+	/* Pretend to be a user context to partake in idle barriers */
+	remote->gem_context = kernel_context(engine->i915);
+	if (IS_ERR(remote->gem_context))
+		goto err_remote_ce;
+
+	local = intel_context_create(engine);
 	if (IS_ERR(local)) {
 		err = PTR_ERR(local);
 		goto err_remote;
@@ -392,6 +387,8 @@ static int __live_remote_context(struct intel_engine_cs *engine,
 
 	intel_context_put(local);
 err_remote:
+	kernel_context_close(remote->gem_context);
+err_remote_ce:
 	intel_context_put(remote);
 	return err;
 }
@@ -400,7 +397,6 @@ static int live_remote_context(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	enum intel_engine_id id;
 	struct drm_file *file;
 	int err = 0;
@@ -409,14 +405,8 @@ static int live_remote_context(void *arg)
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	fixme = live_context(gt->i915, file);
-	if (IS_ERR(fixme)) {
-		err = PTR_ERR(fixme);
-		goto out_file;
-	}
-
 	for_each_engine(engine, gt, id) {
-		err = __live_remote_context(engine, fixme);
+		err = __live_remote_context(engine);
 		if (err)
 			break;
 
@@ -425,7 +415,6 @@ static int live_remote_context(void *arg)
 			break;
 	}
 
-out_file:
 	mock_file_free(gt->i915, file);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 155c508024df..9e7376b592e5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -169,8 +169,7 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine)
 	int err;
 	int i;
 
-	ce = intel_context_create(engine->kernel_context->gem_context,
-				  engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 8abc0a1d692b..4f8cd3cbb8bd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -377,36 +377,30 @@ static int igt_reset_nop(void *arg)
 	struct intel_gt *gt = arg;
 	struct i915_gpu_error *global = &gt->i915->gpu_error;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	unsigned int reset_count, count;
 	enum intel_engine_id id;
-	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	int err = 0;
 
 	/* Check that we can reset during non-user portions of requests */
 
-	file = mock_file(gt->i915);
-	if (IS_ERR(file))
-		return PTR_ERR(file);
-
-	ctx = live_context(gt->i915, file);
-	if (IS_ERR(ctx)) {
-		err = PTR_ERR(ctx);
-		goto out;
-	}
-
-	i915_gem_context_clear_bannable(ctx);
 	reset_count = i915_reset_count(global);
 	count = 0;
 	do {
 		for_each_engine(engine, gt, id) {
+			struct intel_context *ce;
 			int i;
 
+			ce = intel_context_create(engine);
+			if (IS_ERR(ce)) {
+				err = PTR_ERR(ce);
+				break;
+			}
+
 			for (i = 0; i < 16; i++) {
 				struct i915_request *rq;
 
-				rq = igt_request_alloc(ctx, engine);
+				rq = intel_context_create_request(ce);
 				if (IS_ERR(rq)) {
 					err = PTR_ERR(rq);
 					break;
@@ -414,6 +408,8 @@ static int igt_reset_nop(void *arg)
 
 				i915_request_add(rq);
 			}
+
+			intel_context_put(ce);
 		}
 
 		igt_global_reset_lock(gt);
@@ -437,10 +433,7 @@ static int igt_reset_nop(void *arg)
 	} while (time_before(jiffies, end_time));
 	pr_info("%s: %d resets\n", __func__, count);
 
-	err = igt_flush_test(gt->i915);
-out:
-	mock_file_free(gt->i915, file);
-	if (intel_gt_is_wedged(gt))
+	if (igt_flush_test(gt->i915))
 		err = -EIO;
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 95504c48cd99..04d0dd6a938a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -53,11 +53,10 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
 static int live_sanitycheck(void *arg)
 {
 	struct intel_gt *gt = arg;
-	struct i915_gem_engines_iter it;
-	struct i915_gem_context *ctx;
-	struct intel_context *ce;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
 	struct igt_spinner spin;
-	int err = -ENOMEM;
+	int err = 0;
 
 	if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
 		return 0;
@@ -65,17 +64,20 @@ static int live_sanitycheck(void *arg)
 	if (igt_spinner_init(&spin, gt))
 		return -ENOMEM;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		goto err_spin;
-
-	for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
 		struct i915_request *rq;
 
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
 		rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			goto err_ctx;
+			goto out_ctx;
 		}
 
 		i915_request_add(rq);
@@ -84,21 +86,21 @@ static int live_sanitycheck(void *arg)
 			GEM_TRACE_DUMP();
 			intel_gt_set_wedged(gt);
 			err = -EIO;
-			goto err_ctx;
+			goto out_ctx;
 		}
 
 		igt_spinner_end(&spin);
 		if (igt_flush_test(gt->i915)) {
 			err = -EIO;
-			goto err_ctx;
+			goto out_ctx;
 		}
+
+out_ctx:
+		intel_context_put(ce);
+		if (err)
+			break;
 	}
 
-	err = 0;
-err_ctx:
-	i915_gem_context_unlock_engines(ctx);
-	kernel_context_close(ctx);
-err_spin:
 	igt_spinner_fini(&spin);
 	return err;
 }
@@ -144,7 +146,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
 		for (n = 0; n < ARRAY_SIZE(ce); n++) {
 			struct intel_context *tmp;
 
-			tmp = intel_context_create(ctx, engine);
+			tmp = intel_context_create(engine);
 			if (IS_ERR(tmp)) {
 				err = PTR_ERR(tmp);
 				goto err_ce;
@@ -309,17 +311,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
 static struct i915_request *
 semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 {
-	struct i915_gem_context *ctx;
+	struct intel_context *ce;
 	struct i915_request *rq;
 	int err;
 
-	ctx = kernel_context(engine->i915);
-	if (!ctx)
-		return ERR_PTR(-ENOMEM);
+	ce = intel_context_create(engine);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
 
-	rq = igt_request_alloc(ctx, engine);
+	rq = intel_context_create_request(ce);
 	if (IS_ERR(rq))
-		goto out_ctx;
+		goto out_ce;
 
 	err = 0;
 	if (rq->engine->emit_init_breadcrumb)
@@ -332,8 +334,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
 	if (err)
 		rq = ERR_PTR(err);
 
-out_ctx:
-	kernel_context_close(ctx);
+out_ce:
+	intel_context_put(ce);
 	return rq;
 }
 
@@ -2390,27 +2392,17 @@ static int nop_virtual_engine(struct intel_gt *gt,
 {
 	IGT_TIMEOUT(end_time);
 	struct i915_request *request[16];
-	struct i915_gem_context *ctx[16];
 	struct intel_context *ve[16];
 	unsigned long n, prime, nc;
 	struct igt_live_test t;
 	ktime_t times[2] = {};
 	int err;
 
-	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+	GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
 
 	for (n = 0; n < nctx; n++) {
-		ctx[n] = kernel_context(gt->i915);
-		if (!ctx[n]) {
-			err = -ENOMEM;
-			nctx = n;
-			goto out;
-		}
-
-		ve[n] = intel_execlists_create_virtual(ctx[n],
-						       siblings, nsibling);
+		ve[n] = intel_execlists_create_virtual(siblings, nsibling);
 		if (IS_ERR(ve[n])) {
-			kernel_context_close(ctx[n]);
 			err = PTR_ERR(ve[n]);
 			nctx = n;
 			goto out;
@@ -2419,7 +2411,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
 		err = intel_context_pin(ve[n]);
 		if (err) {
 			intel_context_put(ve[n]);
-			kernel_context_close(ctx[n]);
 			nctx = n;
 			goto out;
 		}
@@ -2500,7 +2491,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
 	for (nc = 0; nc < nctx; nc++) {
 		intel_context_unpin(ve[nc]);
 		intel_context_put(ve[nc]);
-		kernel_context_close(ctx[nc]);
 	}
 	return err;
 }
@@ -2559,7 +2549,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
 			       unsigned int nsibling)
 {
 	struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
-	struct i915_gem_context *ctx;
 	struct intel_context *ve;
 	struct igt_live_test t;
 	unsigned int n;
@@ -2570,11 +2559,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
 	 * restrict it to our desired engine within the virtual engine.
 	 */
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		return -ENOMEM;
-
-	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
 		goto out_close;
@@ -2642,7 +2627,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
 out_put:
 	intel_context_put(ve);
 out_close:
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -2682,7 +2666,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 				    unsigned int nsibling)
 {
 	struct i915_request *last = NULL;
-	struct i915_gem_context *ctx;
 	struct intel_context *ve;
 	struct i915_vma *scratch;
 	struct igt_live_test t;
@@ -2690,17 +2673,11 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	int err = 0;
 	u32 *cs;
 
-	ctx = kernel_context(gt->i915);
-	if (!ctx)
-		return -ENOMEM;
-
 	scratch = create_scratch(siblings[0]->gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
-	ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+	ve = intel_execlists_create_virtual(siblings, nsibling);
 	if (IS_ERR(ve)) {
 		err = PTR_ERR(ve);
 		goto out_scratch;
@@ -2783,8 +2760,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
 	intel_context_put(ve);
 out_scratch:
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(ctx);
 	return err;
 }
 
@@ -2878,9 +2853,7 @@ static int bond_virtual_engine(struct intel_gt *gt,
 		for (n = 0; n < nsibling; n++) {
 			struct intel_context *ve;
 
-			ve = intel_execlists_create_virtual(ctx,
-							    siblings,
-							    nsibling);
+			ve = intel_execlists_create_virtual(siblings, nsibling);
 			if (IS_ERR(ve)) {
 				err = PTR_ERR(ve);
 				onstack_fence_fini(&fence);
@@ -3268,8 +3241,7 @@ static int live_lrc_fixed(void *arg)
 	return err;
 }
 
-static int __live_lrc_state(struct i915_gem_context *fixme,
-			    struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
 			    struct i915_vma *scratch)
 {
 	struct intel_context *ce;
@@ -3284,7 +3256,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme,
 	int err;
 	int n;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -3358,7 +3330,6 @@ static int live_lrc_state(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	struct i915_vma *scratch;
 	enum intel_engine_id id;
 	int err = 0;
@@ -3368,18 +3339,12 @@ static int live_lrc_state(void *arg)
 	 * intel_context.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (!fixme)
-		return -ENOMEM;
-
 	scratch = create_scratch(gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_lrc_state(fixme, engine, scratch);
+		err = __live_lrc_state(engine, scratch);
 		if (err)
 			break;
 	}
@@ -3388,8 +3353,6 @@ static int live_lrc_state(void *arg)
 		err = -EIO;
 
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(fixme);
 	return err;
 }
 
@@ -3422,8 +3385,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
 	return 0;
 }
 
-static int __live_gpr_clear(struct i915_gem_context *fixme,
-			    struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
 			    struct i915_vma *scratch)
 {
 	struct intel_context *ce;
@@ -3439,7 +3401,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme,
 	if (err)
 		return err;
 
-	ce = intel_context_create(fixme, engine);
+	ce = intel_context_create(engine);
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
@@ -3501,7 +3463,6 @@ static int live_gpr_clear(void *arg)
 {
 	struct intel_gt *gt = arg;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *fixme;
 	struct i915_vma *scratch;
 	enum intel_engine_id id;
 	int err = 0;
@@ -3511,18 +3472,12 @@ static int live_gpr_clear(void *arg)
 	 * to avoid leaking any information from previous contexts.
 	 */
 
-	fixme = kernel_context(gt->i915);
-	if (!fixme)
-		return -ENOMEM;
-
 	scratch = create_scratch(gt);
-	if (IS_ERR(scratch)) {
-		err = PTR_ERR(scratch);
-		goto out_close;
-	}
+	if (IS_ERR(scratch))
+		return PTR_ERR(scratch);
 
 	for_each_engine(engine, gt, id) {
-		err = __live_gpr_clear(fixme, engine, scratch);
+		err = __live_gpr_clear(engine, scratch);
 		if (err)
 			break;
 	}
@@ -3531,8 +3486,6 @@ static int live_gpr_clear(void *arg)
 		err = -EIO;
 
 	i915_vma_unpin_and_release(&scratch, 0);
-out_close:
-	kernel_context_close(fixme);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 976d6950218e..f0785b9fe02c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,12 +35,12 @@
 
 #include <linux/kthread.h>
 
-#include "gem/i915_gem_context.h"
 #include "gem/i915_gem_pm.h"
 #include "gt/intel_context.h"
 #include "gt/intel_ring.h"
 
 #include "i915_drv.h"
+#include "i915_gem_gtt.h"
 #include "gvt.h"
 
 #define RING_CTX_OFF(x) \
@@ -1220,16 +1220,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct intel_engine_cs *engine;
-	struct i915_gem_context *ctx;
 	struct i915_ppgtt *ppgtt;
 	enum intel_engine_id i;
 	int ret;
 
-	ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
-	if (IS_ERR(ctx))
-		return PTR_ERR(ctx);
+	ppgtt = i915_ppgtt_create(i915);
+	if (IS_ERR(ppgtt))
+		return PTR_ERR(ppgtt);
 
-	ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
 	i915_context_ppgtt_root_save(s, ppgtt);
 
 	for_each_engine(engine, i915, i) {
@@ -1238,12 +1236,14 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		INIT_LIST_HEAD(&s->workload_q_head[i]);
 		s->shadow[i] = ERR_PTR(-EINVAL);
 
-		ce = intel_context_create(ctx, engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			ret = PTR_ERR(ce);
 			goto out_shadow_ctx;
 		}
 
+		i915_vm_put(ce->vm);
+		ce->vm = i915_vm_get(&ppgtt->vm);
 		intel_context_set_single_submission(ce);
 
 		if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
@@ -1278,7 +1278,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 	bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
 
 	i915_vm_put(&ppgtt->vm);
-	i915_gem_context_put(ctx);
 	return 0;
 
 out_shadow_ctx:
@@ -1291,7 +1290,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		intel_context_put(s->shadow[i]);
 	}
 	i915_vm_put(&ppgtt->vm);
-	i915_gem_context_put(ctx);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b5de8f928320..e308a9a71ab1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,9 +956,6 @@ struct drm_i915_private {
 
 	struct pci_dev *bridge_dev;
 
-	/* Context used internally to idle the GPU and setup initial state */
-	struct i915_gem_context *kernel_context;
-
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	struct rb_root uabi_engines;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fe348ff7db4f..1063d4c74a7f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1063,8 +1063,7 @@ static int __intel_engines_record_defaults(struct intel_gt *gt)
 		GEM_BUG_ON(!engine->kernel_context);
 		engine->serial++; /* force the kernel context switch */
 
-		ce = intel_context_create(engine->kernel_context->gem_context,
-					  engine);
+		ce = intel_context_create(engine);
 		if (IS_ERR(ce)) {
 			err = PTR_ERR(ce);
 			goto out;
@@ -1219,6 +1218,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	}
 
 	intel_gt_init(&dev_priv->gt);
+	i915_gem_init_contexts(dev_priv);
 
 	ret = intel_engines_setup(&dev_priv->gt);
 	if (ret) {
@@ -1226,16 +1226,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		goto err_unlock;
 	}
 
-	ret = i915_gem_init_contexts(dev_priv);
-	if (ret) {
-		GEM_BUG_ON(ret == -EIO);
-		goto err_scratch;
-	}
-
 	ret = intel_engines_init(&dev_priv->gt);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
-		goto err_context;
+		goto err_scratch;
 	}
 
 	intel_uc_init(&dev_priv->gt.uc);
@@ -1299,9 +1293,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		intel_uc_fini(&dev_priv->gt.uc);
 		intel_engines_cleanup(&dev_priv->gt);
 	}
-err_context:
-	if (ret != -EIO)
-		i915_gem_driver_release__contexts(dev_priv);
 err_scratch:
 	intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
@@ -1367,7 +1358,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 {
 	intel_engines_cleanup(&dev_priv->gt);
-	i915_gem_driver_release__contexts(dev_priv);
 	intel_gt_driver_release(&dev_priv->gt);
 
 	intel_wa_list_free(&dev_priv->gt_wa_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 88179202c556..5e207412a459 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1363,12 +1363,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
 	 * If everybody agrees to not to write into the scratch page,
 	 * we can reuse it for all vm, keeping contexts and processes separate.
 	 */
-	if (vm->has_read_only &&
-	    vm->i915->kernel_context &&
-	    vm->i915->kernel_context->vm) {
-		struct i915_address_space *clone =
-			rcu_dereference_protected(vm->i915->kernel_context->vm,
-						  true); /* static */
+	if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
+		struct i915_address_space *clone = vm->gt->vm;
 
 		GEM_BUG_ON(!clone->has_read_only);
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b40157cfae14..598425a2be62 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1231,9 +1231,11 @@ static void record_request(const struct i915_request *request,
 	erq->head = request->head;
 	erq->tail = request->tail;
 
-	rcu_read_lock();
-	erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
-	rcu_read_unlock();
+	if (ctx) {
+		rcu_read_lock();
+		erq->pid = ctx && ctx->pid ? pid_nr(ctx->pid) : 0;
+		rcu_read_unlock();
+	}
 }
 
 static void engine_record_requests(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 6e35b9255882..f1147fbb1dc0 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2340,9 +2340,6 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
 	 */
 	spin_lock(&i915->gem.contexts.lock);
 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
-		if (ctx == i915->kernel_context)
-			continue;
-
 		if (!kref_get_unless_zero(&ctx->ref))
 			continue;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b9bccdb9ee6c..3b9c76dd08f0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1227,8 +1227,8 @@ void __i915_request_queue(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq)
 {
-	struct i915_sched_attr attr = rq->context->gem_context->sched;
 	struct intel_timeline * const tl = i915_request_timeline(rq);
+	struct i915_sched_attr attr = {};
 	struct i915_request *prev;
 
 	lockdep_assert_held(&tl->mutex);
@@ -1238,6 +1238,9 @@ void i915_request_add(struct i915_request *rq)
 
 	prev = __i915_request_commit(rq);
 
+	if (rq->context->gem_context)
+		attr = rq->context->gem_context->sched;
+
 	/*
 	 * Boost actual workloads past semaphores!
 	 *
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..9ddade4eacf8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -739,10 +739,8 @@ static int live_empty_request(void *arg)
 
 static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *ctx = i915->kernel_context;
 	struct drm_i915_gem_object *obj;
 	const int gen = INTEL_GEN(i915);
-	struct i915_address_space *vm;
 	struct i915_vma *vma;
 	u32 *cmd;
 	int err;
@@ -751,9 +749,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	vm = i915_gem_context_get_vm_rcu(ctx);
-	vma = i915_vma_instance(obj, vm, NULL);
-	i915_vm_put(vm);
+	vma = i915_vma_instance(obj, i915->gt.vm, NULL);
 	if (IS_ERR(vma)) {
 		err = PTR_ERR(vma);
 		goto err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 27ed3cee6a9b..98a754faf222 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -63,7 +63,6 @@ static void mock_device_release(struct drm_device *dev)
 
 	for_each_engine(engine, &i915->gt, id)
 		mock_engine_free(engine);
-	i915_gem_driver_release__contexts(i915);
 
 	intel_timelines_fini(i915);
 
@@ -183,6 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
 	intel_timelines_init(i915);
 
 	mock_init_ggtt(i915, &i915->ggtt);
+	i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
 	mkwrite_device_info(i915)->engine_mask = BIT(0);
 
@@ -190,10 +190,6 @@ struct drm_i915_private *mock_gem_device(void)
 	if (!i915->engine[RCS0])
 		goto err_unlock;
 
-	i915->kernel_context = mock_context(i915, NULL);
-	if (!i915->kernel_context)
-		goto err_engine;
-
 	if (mock_engine_init(i915->engine[RCS0]))
 		goto err_context;
 
@@ -202,8 +198,6 @@ struct drm_i915_private *mock_gem_device(void)
 	return i915;
 
 err_context:
-	i915_gem_driver_release__contexts(i915);
-err_engine:
 	mock_engine_free(i915->engine[RCS0]);
 err_unlock:
 	intel_timelines_fini(i915);
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 09/11] drm/i915/gt: Defer engine registration until fully initialised
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Only add the engine to the available set of uabi engines once it has
been fully initialised and we know we want it in the public set.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Wajdeczko <michal.wajdeczko@intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index b38ea44ab761..5e40044478ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -344,7 +344,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	gt->engine_class[info->class][info->instance] = engine;
 	gt->engine[id] = engine;
 
-	intel_engine_add_user(engine);
 	gt->i915->engine[id] = engine;
 
 	return 0;
@@ -481,6 +480,8 @@ int intel_engines_init(struct intel_gt *gt)
 		err = init(engine);
 		if (err)
 			goto cleanup;
+
+		intel_engine_add_user(engine);
 	}
 
 	return 0;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 09/11] drm/i915/gt: Defer engine registration until fully initialised
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Only add the engine to the available set of uabi engines once it has
been fully initialised and we know we want it in the public set.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Michał Wajdeczko <michal.wajdeczko@intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index b38ea44ab761..5e40044478ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -344,7 +344,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	gt->engine_class[info->class][info->instance] = engine;
 	gt->engine[id] = engine;
 
-	intel_engine_add_user(engine);
 	gt->i915->engine[id] = engine;
 
 	return 0;
@@ -481,6 +480,8 @@ int intel_engines_init(struct intel_gt *gt)
 		err = init(engine);
 		if (err)
 			goto cleanup;
+
+		intel_engine_add_user(engine);
 	}
 
 	return 0;
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 10/11] drm/i915/gt: Pull timeline initialise to intel_gt_init_early
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Our timelines are currently contained within an intel_gt, and we only
need to perform list/spinlock initialisation, so we can pull the
intel_timelines_init() into our intel_gt_init_early().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_gt.c               |  2 ++
 drivers/gpu/drm/i915/gt/intel_timeline.c         | 14 ++------------
 drivers/gpu/drm/i915/gt/intel_timeline.h         |  4 ++--
 drivers/gpu/drm/i915/i915_gem.c                  |  4 ----
 drivers/gpu/drm/i915/selftests/mock_gem_device.c |  5 -----
 5 files changed, 6 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f9fb0865fbb6..02484860caa8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -25,6 +25,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 
 	intel_gt_init_reset(gt);
 	intel_gt_init_requests(gt);
+	intel_gt_init_timelines(gt);
 	intel_gt_pm_init_early(gt);
 
 	intel_rps_init_early(&gt->rps);
@@ -421,4 +422,5 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
 {
 	intel_uc_driver_late_release(&gt->uc);
 	intel_gt_fini_reset(gt);
+	intel_gt_fini_timelines(gt);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 14ad10acd548..16a9e88d93de 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -262,7 +262,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 	return 0;
 }
 
-static void timelines_init(struct intel_gt *gt)
+void intel_gt_init_timelines(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
@@ -273,11 +273,6 @@ static void timelines_init(struct intel_gt *gt)
 	INIT_LIST_HEAD(&timelines->hwsp_free_list);
 }
 
-void intel_timelines_init(struct drm_i915_private *i915)
-{
-	timelines_init(&i915->gt);
-}
-
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
 	GEM_BUG_ON(atomic_read(&timeline->pin_count));
@@ -562,7 +557,7 @@ void __intel_timeline_free(struct kref *kref)
 	kfree_rcu(timeline, rcu);
 }
 
-static void timelines_fini(struct intel_gt *gt)
+void intel_gt_fini_timelines(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
@@ -570,11 +565,6 @@ static void timelines_fini(struct intel_gt *gt)
 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
 }
 
-void intel_timelines_fini(struct drm_i915_private *i915)
-{
-	timelines_fini(&i915->gt);
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "gt/selftests/mock_timeline.c"
 #include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f583af1ba18d..f5b7eade3809 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -88,7 +88,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 			     struct i915_request *until,
 			     u32 *hwsp_offset);
 
-void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_fini(struct drm_i915_private *i915);
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1063d4c74a7f..b24825c691f7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1194,8 +1194,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		mkwrite_device_info(dev_priv)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	intel_timelines_init(dev_priv);
-
 	ret = i915_gem_init_userptr(dev_priv);
 	if (ret)
 		return ret;
@@ -1301,7 +1299,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	if (ret != -EIO) {
 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 		i915_gem_cleanup_userptr(dev_priv);
-		intel_timelines_fini(dev_priv);
 	}
 
 	if (ret == -EIO) {
@@ -1364,7 +1361,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 
 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 	i915_gem_cleanup_userptr(dev_priv);
-	intel_timelines_fini(dev_priv);
 
 	i915_gem_drain_freed_objects(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 98a754faf222..a539d8b39682 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
 	for_each_engine(engine, &i915->gt, id)
 		mock_engine_free(engine);
 
-	intel_timelines_fini(i915);
-
 	drain_workqueue(i915->wq);
 	i915_gem_drain_freed_objects(i915);
 
@@ -179,8 +177,6 @@ struct drm_i915_private *mock_gem_device(void)
 
 	mock_init_contexts(i915);
 
-	intel_timelines_init(i915);
-
 	mock_init_ggtt(i915, &i915->ggtt);
 	i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
@@ -200,7 +196,6 @@ struct drm_i915_private *mock_gem_device(void)
 err_context:
 	mock_engine_free(i915->engine[RCS0]);
 err_unlock:
-	intel_timelines_fini(i915);
 	destroy_workqueue(i915->wq);
 err_drv:
 	intel_memory_regions_driver_release(i915);
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 10/11] drm/i915/gt: Pull timeline initialise to intel_gt_init_early
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

Our timelines are currently contained within an intel_gt, and we only
need to perform list/spinlock initialisation, so we can pull the
intel_timelines_init() into our intel_gt_init_early().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_gt.c               |  2 ++
 drivers/gpu/drm/i915/gt/intel_timeline.c         | 14 ++------------
 drivers/gpu/drm/i915/gt/intel_timeline.h         |  4 ++--
 drivers/gpu/drm/i915/i915_gem.c                  |  4 ----
 drivers/gpu/drm/i915/selftests/mock_gem_device.c |  5 -----
 5 files changed, 6 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f9fb0865fbb6..02484860caa8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -25,6 +25,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 
 	intel_gt_init_reset(gt);
 	intel_gt_init_requests(gt);
+	intel_gt_init_timelines(gt);
 	intel_gt_pm_init_early(gt);
 
 	intel_rps_init_early(&gt->rps);
@@ -421,4 +422,5 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
 {
 	intel_uc_driver_late_release(&gt->uc);
 	intel_gt_fini_reset(gt);
+	intel_gt_fini_timelines(gt);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 14ad10acd548..16a9e88d93de 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -262,7 +262,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 	return 0;
 }
 
-static void timelines_init(struct intel_gt *gt)
+void intel_gt_init_timelines(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
@@ -273,11 +273,6 @@ static void timelines_init(struct intel_gt *gt)
 	INIT_LIST_HEAD(&timelines->hwsp_free_list);
 }
 
-void intel_timelines_init(struct drm_i915_private *i915)
-{
-	timelines_init(&i915->gt);
-}
-
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
 	GEM_BUG_ON(atomic_read(&timeline->pin_count));
@@ -562,7 +557,7 @@ void __intel_timeline_free(struct kref *kref)
 	kfree_rcu(timeline, rcu);
 }
 
-static void timelines_fini(struct intel_gt *gt)
+void intel_gt_fini_timelines(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
@@ -570,11 +565,6 @@ static void timelines_fini(struct intel_gt *gt)
 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
 }
 
-void intel_timelines_fini(struct drm_i915_private *i915)
-{
-	timelines_fini(&i915->gt);
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "gt/selftests/mock_timeline.c"
 #include "gt/selftest_timeline.c"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index f583af1ba18d..f5b7eade3809 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -88,7 +88,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 			     struct i915_request *until,
 			     u32 *hwsp_offset);
 
-void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_fini(struct drm_i915_private *i915);
+void intel_gt_init_timelines(struct intel_gt *gt);
+void intel_gt_fini_timelines(struct intel_gt *gt);
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1063d4c74a7f..b24825c691f7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1194,8 +1194,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 		mkwrite_device_info(dev_priv)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	intel_timelines_init(dev_priv);
-
 	ret = i915_gem_init_userptr(dev_priv);
 	if (ret)
 		return ret;
@@ -1301,7 +1299,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	if (ret != -EIO) {
 		intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 		i915_gem_cleanup_userptr(dev_priv);
-		intel_timelines_fini(dev_priv);
 	}
 
 	if (ret == -EIO) {
@@ -1364,7 +1361,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
 
 	intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
 	i915_gem_cleanup_userptr(dev_priv);
-	intel_timelines_fini(dev_priv);
 
 	i915_gem_drain_freed_objects(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 98a754faf222..a539d8b39682 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev)
 	for_each_engine(engine, &i915->gt, id)
 		mock_engine_free(engine);
 
-	intel_timelines_fini(i915);
-
 	drain_workqueue(i915->wq);
 	i915_gem_drain_freed_objects(i915);
 
@@ -179,8 +177,6 @@ struct drm_i915_private *mock_gem_device(void)
 
 	mock_init_contexts(i915);
 
-	intel_timelines_init(i915);
-
 	mock_init_ggtt(i915, &i915->ggtt);
 	i915->gt.vm = i915_vm_get(&i915->ggtt.vm);
 
@@ -200,7 +196,6 @@ struct drm_i915_private *mock_gem_device(void)
 err_context:
 	mock_engine_free(i915->engine[RCS0]);
 err_unlock:
-	intel_timelines_fini(i915);
 	destroy_workqueue(i915->wq);
 err_drv:
 	intel_memory_regions_driver_release(i915);
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH 11/11] drm/i915: Move i915_gem_init_contexts() earlier
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

As the GEM global context setup is now independent of the GT state
(although GT does currently still depending upon the global
i915->kernel_context), we can move its init earlier, leaving the gt init
ready to extracted.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_context.h |  2 +-
 drivers/gpu/drm/i915/i915_gem.c             | 23 +++++++++++----------
 3 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ff9e64c513e7..5361c81e2f68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -665,7 +665,7 @@ static void init_contexts(struct i915_gem_contexts *gc)
 	init_llist_head(&gc->free_list);
 }
 
-void i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
 {
 	init_contexts(&i915->gem.contexts);
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index d99fff481d59..80a90ead4108 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -115,7 +115,7 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 }
 
 /* i915_gem_context.c */
-void i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b24825c691f7..04350e61c86c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1216,18 +1216,17 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	}
 
 	intel_gt_init(&dev_priv->gt);
-	i915_gem_init_contexts(dev_priv);
 
 	ret = intel_engines_setup(&dev_priv->gt);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
-		goto err_unlock;
+		goto err_gt_early;
 	}
 
 	ret = intel_engines_init(&dev_priv->gt);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
-		goto err_scratch;
+		goto err_engines;
 	}
 
 	intel_uc_init(&dev_priv->gt.uc);
@@ -1254,19 +1253,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 	ret = intel_engines_verify_workarounds(&dev_priv->gt);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	ret = __intel_engines_record_defaults(&dev_priv->gt);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	ret = i915_inject_probe_error(dev_priv, -ENODEV);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	ret = i915_inject_probe_error(dev_priv, -EIO);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
@@ -1278,7 +1277,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_gt:
+err_gt_late:
 	intel_gt_set_wedged_on_init(&dev_priv->gt);
 	i915_gem_suspend(dev_priv);
 	i915_gem_suspend_late(dev_priv);
@@ -1287,11 +1286,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 err_init_hw:
 	intel_uc_fini_hw(&dev_priv->gt.uc);
 err_uc_init:
-	if (ret != -EIO) {
+	if (ret != -EIO)
 		intel_uc_fini(&dev_priv->gt.uc);
+err_engines:
+	if (ret != -EIO)
 		intel_engines_cleanup(&dev_priv->gt);
-	}
-err_scratch:
+err_gt_early:
 	intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1382,6 +1382,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
 void i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
 	i915_gem_init__mm(dev_priv);
+	i915_gem_init__contexts(dev_priv);
 
 	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [Intel-gfx] [PATCH 11/11] drm/i915: Move i915_gem_init_contexts() earlier
@ 2019-10-30 19:21   ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2019-10-30 19:21 UTC (permalink / raw)
  To: intel-gfx

As the GEM global context setup is now independent of the GT state
(although GT does currently still depending upon the global
i915->kernel_context), we can move its init earlier, leaving the gt init
ready to extracted.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c |  2 +-
 drivers/gpu/drm/i915/gem/i915_gem_context.h |  2 +-
 drivers/gpu/drm/i915/i915_gem.c             | 23 +++++++++++----------
 3 files changed, 14 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ff9e64c513e7..5361c81e2f68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -665,7 +665,7 @@ static void init_contexts(struct i915_gem_contexts *gc)
 	init_llist_head(&gc->free_list);
 }
 
-void i915_gem_init_contexts(struct drm_i915_private *i915)
+void i915_gem_init__contexts(struct drm_i915_private *i915)
 {
 	init_contexts(&i915->gem.contexts);
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index d99fff481d59..80a90ead4108 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -115,7 +115,7 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 }
 
 /* i915_gem_context.c */
-void i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_init__contexts(struct drm_i915_private *i915);
 
 int i915_gem_context_open(struct drm_i915_private *i915,
 			  struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b24825c691f7..04350e61c86c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1216,18 +1216,17 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	}
 
 	intel_gt_init(&dev_priv->gt);
-	i915_gem_init_contexts(dev_priv);
 
 	ret = intel_engines_setup(&dev_priv->gt);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
-		goto err_unlock;
+		goto err_gt_early;
 	}
 
 	ret = intel_engines_init(&dev_priv->gt);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
-		goto err_scratch;
+		goto err_engines;
 	}
 
 	intel_uc_init(&dev_priv->gt.uc);
@@ -1254,19 +1253,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
 	ret = intel_engines_verify_workarounds(&dev_priv->gt);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	ret = __intel_engines_record_defaults(&dev_priv->gt);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	ret = i915_inject_probe_error(dev_priv, -ENODEV);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	ret = i915_inject_probe_error(dev_priv, -EIO);
 	if (ret)
-		goto err_gt;
+		goto err_gt_late;
 
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
@@ -1278,7 +1277,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_gt:
+err_gt_late:
 	intel_gt_set_wedged_on_init(&dev_priv->gt);
 	i915_gem_suspend(dev_priv);
 	i915_gem_suspend_late(dev_priv);
@@ -1287,11 +1286,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 err_init_hw:
 	intel_uc_fini_hw(&dev_priv->gt.uc);
 err_uc_init:
-	if (ret != -EIO) {
+	if (ret != -EIO)
 		intel_uc_fini(&dev_priv->gt.uc);
+err_engines:
+	if (ret != -EIO)
 		intel_engines_cleanup(&dev_priv->gt);
-	}
-err_scratch:
+err_gt_early:
 	intel_gt_driver_release(&dev_priv->gt);
 err_unlock:
 	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1382,6 +1382,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
 void i915_gem_init_early(struct drm_i915_private *dev_priv)
 {
 	i915_gem_init__mm(dev_priv);
+	i915_gem_init__contexts(dev_priv);
 
 	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
-- 
2.24.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [01/11] drm/i915: Split detaching and removing the vma
@ 2019-10-30 21:19   ` Patchwork
  0 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2019-10-30 21:19 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/11] drm/i915: Split detaching and removing the vma
URL   : https://patchwork.freedesktop.org/series/68788/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7226 -> Patchwork_15078
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15078:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * {igt@i915_selftest@live_gt_pm}:
    - fi-kbl-guc:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-kbl-guc/igt@i915_selftest@live_gt_pm.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-kbl-guc/igt@i915_selftest@live_gt_pm.html

  
Known issues
------------

  Here are the changes found in Patchwork_15078 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_chamelium@dp-crc-fast:
    - fi-kbl-7500u:       [PASS][3] -> [FAIL][4] ([fdo#109635 ] / [fdo#110387])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - fi-cfl-guc:         [PASS][5] -> [INCOMPLETE][6] ([fdo#108126] / [fdo#108743])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-cfl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-cfl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
    - fi-skl-guc:         [PASS][7] -> [INCOMPLETE][8] ([fdo#104108] / [fdo#108743])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-skl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-skl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
    - fi-apl-guc:         [PASS][9] -> [INCOMPLETE][10] ([fdo#103927] / [fdo#108743])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-apl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-apl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  
#### Possible fixes ####

  * igt@gem_ctx_create@basic-files:
    - {fi-cml-s}:         [INCOMPLETE][11] ([fdo#110566]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-cml-s/igt@gem_ctx_create@basic-files.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-cml-s/igt@gem_ctx_create@basic-files.html
    - {fi-icl-guc}:       [INCOMPLETE][13] ([fdo#107713] / [fdo#109100]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-icl-guc/igt@gem_ctx_create@basic-files.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-icl-guc/igt@gem_ctx_create@basic-files.html

  * igt@gem_ctx_switch@rcs0:
    - fi-icl-u2:          [INCOMPLETE][15] ([fdo#107713]) -> [PASS][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-icl-u2/igt@gem_ctx_switch@rcs0.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-icl-u2/igt@gem_ctx_switch@rcs0.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108126]: https://bugs.freedesktop.org/show_bug.cgi?id=108126
  [fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
  [fdo#108743]: https://bugs.freedesktop.org/show_bug.cgi?id=108743
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109635 ]: https://bugs.freedesktop.org/show_bug.cgi?id=109635 
  [fdo#110387]: https://bugs.freedesktop.org/show_bug.cgi?id=110387
  [fdo#110566]: https://bugs.freedesktop.org/show_bug.cgi?id=110566
  [fdo#111597]: https://bugs.freedesktop.org/show_bug.cgi?id=111597


Participating hosts (51 -> 42)
------------------------------

  Missing    (9): fi-icl-u4 fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-u3 fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7226 -> Patchwork_15078

  CI-20190529: 20190529
  CI_DRM_7226: 6190cecae6196d219ea9a19c16a81598141c68af @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5253: d46ccb32cf693e8d8253543e9a4fbe5eaef4aa41 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_15078: f098be6a0a429431e2ea042f9035b455f73807d1 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

f098be6a0a42 drm/i915: Move i915_gem_init_contexts() earlier
36e4324cec0e drm/i915/gt: Pull timeline initialise to intel_gt_init_early
5421c66c785c drm/i915/gt: Defer engine registration until fully initialised
1e8364d9d7cf drm/i915: Remove i915->kernel_context
2903d30de5b8 drm/i915: Push the use-semaphore marker onto the intel_context
960644550592 drm/i915: Drop GEM context as a direct link from i915_request
04a118042203 drm/i915: Defer rc6 shutdown to suspend_late
f0a6dc75fb6d drm/i915/gt: Move user_forcewake application to GT
1c594e380a29 drm/i915/gem: Leave reloading kernel context on resume to GT
b683559aa67b drm/i915/gt: Call intel_gt_sanitize() directly
d0ab0266b94a drm/i915: Split detaching and removing the vma

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [01/11] drm/i915: Split detaching and removing the vma
@ 2019-10-30 21:19   ` Patchwork
  0 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2019-10-30 21:19 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/11] drm/i915: Split detaching and removing the vma
URL   : https://patchwork.freedesktop.org/series/68788/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7226 -> Patchwork_15078
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15078:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * {igt@i915_selftest@live_gt_pm}:
    - fi-kbl-guc:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-kbl-guc/igt@i915_selftest@live_gt_pm.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-kbl-guc/igt@i915_selftest@live_gt_pm.html

  
Known issues
------------

  Here are the changes found in Patchwork_15078 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_chamelium@dp-crc-fast:
    - fi-kbl-7500u:       [PASS][3] -> [FAIL][4] ([fdo#109635 ] / [fdo#110387])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - fi-cfl-guc:         [PASS][5] -> [INCOMPLETE][6] ([fdo#108126] / [fdo#108743])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-cfl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-cfl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
    - fi-skl-guc:         [PASS][7] -> [INCOMPLETE][8] ([fdo#104108] / [fdo#108743])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-skl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-skl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
    - fi-apl-guc:         [PASS][9] -> [INCOMPLETE][10] ([fdo#103927] / [fdo#108743])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-apl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-apl-guc/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  
#### Possible fixes ####

  * igt@gem_ctx_create@basic-files:
    - {fi-cml-s}:         [INCOMPLETE][11] ([fdo#110566]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-cml-s/igt@gem_ctx_create@basic-files.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-cml-s/igt@gem_ctx_create@basic-files.html
    - {fi-icl-guc}:       [INCOMPLETE][13] ([fdo#107713] / [fdo#109100]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-icl-guc/igt@gem_ctx_create@basic-files.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-icl-guc/igt@gem_ctx_create@basic-files.html

  * igt@gem_ctx_switch@rcs0:
    - fi-icl-u2:          [INCOMPLETE][15] ([fdo#107713]) -> [PASS][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/fi-icl-u2/igt@gem_ctx_switch@rcs0.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/fi-icl-u2/igt@gem_ctx_switch@rcs0.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108126]: https://bugs.freedesktop.org/show_bug.cgi?id=108126
  [fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
  [fdo#108743]: https://bugs.freedesktop.org/show_bug.cgi?id=108743
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#109635 ]: https://bugs.freedesktop.org/show_bug.cgi?id=109635 
  [fdo#110387]: https://bugs.freedesktop.org/show_bug.cgi?id=110387
  [fdo#110566]: https://bugs.freedesktop.org/show_bug.cgi?id=110566
  [fdo#111597]: https://bugs.freedesktop.org/show_bug.cgi?id=111597


Participating hosts (51 -> 42)
------------------------------

  Missing    (9): fi-icl-u4 fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-u3 fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7226 -> Patchwork_15078

  CI-20190529: 20190529
  CI_DRM_7226: 6190cecae6196d219ea9a19c16a81598141c68af @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5253: d46ccb32cf693e8d8253543e9a4fbe5eaef4aa41 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_15078: f098be6a0a429431e2ea042f9035b455f73807d1 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

f098be6a0a42 drm/i915: Move i915_gem_init_contexts() earlier
36e4324cec0e drm/i915/gt: Pull timeline initialise to intel_gt_init_early
5421c66c785c drm/i915/gt: Defer engine registration until fully initialised
1e8364d9d7cf drm/i915: Remove i915->kernel_context
2903d30de5b8 drm/i915: Push the use-semaphore marker onto the intel_context
960644550592 drm/i915: Drop GEM context as a direct link from i915_request
04a118042203 drm/i915: Defer rc6 shutdown to suspend_late
f0a6dc75fb6d drm/i915/gt: Move user_forcewake application to GT
1c594e380a29 drm/i915/gem: Leave reloading kernel context on resume to GT
b683559aa67b drm/i915/gt: Call intel_gt_sanitize() directly
d0ab0266b94a drm/i915: Split detaching and removing the vma

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 01/11] drm/i915: Split detaching and removing the vma
@ 2019-10-31 13:49   ` Matthew Auld
  0 siblings, 0 replies; 28+ messages in thread
From: Matthew Auld @ 2019-10-31 13:49 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development, Matthew Auld

On Wed, 30 Oct 2019 at 19:22, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> In order to keep the assert_bind_count() valid, we need to hold the vma
> page reference until after we drop the bind count. However, we must also
> keep the drm_mm_remove_node() as the last action of i915_vma_unbind() so
> that it serialises with the unlocked check inside i915_vma_destroy(). So
> we need to split up i915_vma_remove() so that we order the detach, drop
> pages and remove as required during unbind.
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112067
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [Intel-gfx] [PATCH 01/11] drm/i915: Split detaching and removing the vma
@ 2019-10-31 13:49   ` Matthew Auld
  0 siblings, 0 replies; 28+ messages in thread
From: Matthew Auld @ 2019-10-31 13:49 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development, Matthew Auld

On Wed, 30 Oct 2019 at 19:22, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> In order to keep the assert_bind_count() valid, we need to hold the vma
> page reference until after we drop the bind count. However, we must also
> keep the drm_mm_remove_node() as the last action of i915_vma_unbind() so
> that it serialises with the unlocked check inside i915_vma_destroy(). So
> we need to split up i915_vma_remove() so that we order the detach, drop
> pages and remove as required during unbind.
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112067
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* ✗ Fi.CI.IGT: failure for series starting with [01/11] drm/i915: Split detaching and removing the vma
@ 2019-11-01  1:22   ` Patchwork
  0 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2019-11-01  1:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/11] drm/i915: Split detaching and removing the vma
URL   : https://patchwork.freedesktop.org/series/68788/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7226_full -> Patchwork_15078_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_15078_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_15078_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15078_full:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_ctx_param@invalid-ctx-get:
    - shard-skl:          [PASS][1] -> [DMESG-WARN][2] +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl9/igt@gem_ctx_param@invalid-ctx-get.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl5/igt@gem_ctx_param@invalid-ctx-get.html

  * igt@gem_exec_schedule@out-order-vebox:
    - shard-skl:          [PASS][3] -> [INCOMPLETE][4] +22 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl4/igt@gem_exec_schedule@out-order-vebox.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl1/igt@gem_exec_schedule@out-order-vebox.html

  * igt@gem_exec_schedule@preempt-hang-bsd:
    - shard-glk:          [PASS][5] -> [DMESG-WARN][6] +3 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk4/igt@gem_exec_schedule@preempt-hang-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@gem_exec_schedule@preempt-hang-bsd.html

  * igt@gem_exec_schedule@preempt-hang-bsd2:
    - shard-kbl:          [PASS][7] -> [DMESG-WARN][8] +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl7/igt@gem_exec_schedule@preempt-hang-bsd2.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@gem_exec_schedule@preempt-hang-bsd2.html

  * igt@gem_exec_schedule@preempt-hang-render:
    - shard-apl:          [PASS][9] -> [DMESG-WARN][10] +4 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl6/igt@gem_exec_schedule@preempt-hang-render.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl4/igt@gem_exec_schedule@preempt-hang-render.html
    - shard-iclb:         [PASS][11] -> [DMESG-WARN][12] +2 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb6/igt@gem_exec_schedule@preempt-hang-render.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb3/igt@gem_exec_schedule@preempt-hang-render.html

  * igt@gem_exec_schedule@preempt-hang-vebox:
    - shard-iclb:         NOTRUN -> [DMESG-WARN][13]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb3/igt@gem_exec_schedule@preempt-hang-vebox.html

  * igt@runner@aborted:
    - shard-hsw:          NOTRUN -> [FAIL][14]
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-hsw8/igt@runner@aborted.html
    - shard-kbl:          NOTRUN -> ([FAIL][15], [FAIL][16], [FAIL][17], [FAIL][18], [FAIL][19], [FAIL][20], [FAIL][21], [FAIL][22], [FAIL][23], [FAIL][24], [FAIL][25], [FAIL][26], [FAIL][27], [FAIL][28])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl7/igt@runner@aborted.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl6/igt@runner@aborted.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl6/igt@runner@aborted.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl7/igt@runner@aborted.html
    - shard-apl:          NOTRUN -> ([FAIL][29], [FAIL][30], [FAIL][31], [FAIL][32], [FAIL][33])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl3/igt@runner@aborted.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl1/igt@runner@aborted.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl1/igt@runner@aborted.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl4/igt@runner@aborted.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl2/igt@runner@aborted.html
    - shard-glk:          NOTRUN -> ([FAIL][34], [FAIL][35], [FAIL][36], [FAIL][37], [FAIL][38], [FAIL][39], [FAIL][40], [FAIL][41], [FAIL][42], [FAIL][43]) ([k.org#202321])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@runner@aborted.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk1/igt@runner@aborted.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@runner@aborted.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@runner@aborted.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk8/igt@runner@aborted.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk6/igt@runner@aborted.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk9/igt@runner@aborted.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk5/igt@runner@aborted.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk6/igt@runner@aborted.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk6/igt@runner@aborted.html

  
#### Warnings ####

  * igt@gem_exec_schedule@preempt-hang-bsd2:
    - shard-iclb:         [SKIP][44] ([fdo#109276]) -> [DMESG-WARN][45]
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb5/igt@gem_exec_schedule@preempt-hang-bsd2.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb4/igt@gem_exec_schedule@preempt-hang-bsd2.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * {igt@gem_ctx_exec@basic-nohangcheck}:
    - {shard-tglb}:       [PASS][46] -> [INCOMPLETE][47] +10 similar issues
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb1/igt@gem_ctx_exec@basic-nohangcheck.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb8/igt@gem_ctx_exec@basic-nohangcheck.html

  * igt@gem_ctx_param@vm:
    - {shard-tglb}:       [PASS][48] -> [DMESG-WARN][49] +3 similar issues
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb1/igt@gem_ctx_param@vm.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@gem_ctx_param@vm.html

  * {igt@gem_ctx_persistence@bcs0-hostile}:
    - shard-skl:          [PASS][50] -> [FAIL][51] +1 similar issue
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl8/igt@gem_ctx_persistence@bcs0-hostile.html
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl6/igt@gem_ctx_persistence@bcs0-hostile.html
    - shard-apl:          [PASS][52] -> [FAIL][53] +2 similar issues
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl1/igt@gem_ctx_persistence@bcs0-hostile.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl2/igt@gem_ctx_persistence@bcs0-hostile.html

  * {igt@gem_ctx_persistence@bcs0-hostile-preempt}:
    - shard-skl:          [PASS][54] -> [DMESG-WARN][55]
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl8/igt@gem_ctx_persistence@bcs0-hostile-preempt.html
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl10/igt@gem_ctx_persistence@bcs0-hostile-preempt.html

  * {igt@gem_ctx_persistence@hangcheck}:
    - shard-kbl:          [PASS][56] -> [FAIL][57] +3 similar issues
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl3/igt@gem_ctx_persistence@hangcheck.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl1/igt@gem_ctx_persistence@hangcheck.html
    - shard-iclb:         [PASS][58] -> [FAIL][59] +5 similar issues
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb8/igt@gem_ctx_persistence@hangcheck.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb4/igt@gem_ctx_persistence@hangcheck.html

  * {igt@gem_ctx_persistence@rcs0-mixed-process}:
    - shard-skl:          [PASS][60] -> [INCOMPLETE][61] +1 similar issue
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl2/igt@gem_ctx_persistence@rcs0-mixed-process.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl1/igt@gem_ctx_persistence@rcs0-mixed-process.html

  * {igt@gem_ctx_persistence@vcs0-hostile-preempt}:
    - shard-glk:          [PASS][62] -> [DMESG-WARN][63]
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk1/igt@gem_ctx_persistence@vcs0-hostile-preempt.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk8/igt@gem_ctx_persistence@vcs0-hostile-preempt.html

  * {igt@gem_ctx_persistence@vcs1-cleanup}:
    - {shard-tglb}:       [PASS][64] -> [FAIL][65] +2 similar issues
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb7/igt@gem_ctx_persistence@vcs1-cleanup.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb8/igt@gem_ctx_persistence@vcs1-cleanup.html

  * {igt@gem_ctx_persistence@vcs1-hostile-preempt}:
    - shard-kbl:          [PASS][66] -> [DMESG-WARN][67] +1 similar issue
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl7/igt@gem_ctx_persistence@vcs1-hostile-preempt.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@gem_ctx_persistence@vcs1-hostile-preempt.html
    - shard-iclb:         [SKIP][68] ([fdo#109276] / [fdo#112080]) -> [DMESG-WARN][69]
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb6/igt@gem_ctx_persistence@vcs1-hostile-preempt.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb1/igt@gem_ctx_persistence@vcs1-hostile-preempt.html

  * {igt@gem_ctx_persistence@vecs0-cleanup}:
    - shard-glk:          [PASS][70] -> [FAIL][71] +1 similar issue
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk4/igt@gem_ctx_persistence@vecs0-cleanup.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk3/igt@gem_ctx_persistence@vecs0-cleanup.html

  * igt@kms_cursor_crc@pipe-a-cursor-64x21-onscreen:
    - {shard-tglb}:       [FAIL][72] ([fdo#111703]) -> [INCOMPLETE][73]
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb3/igt@kms_cursor_crc@pipe-a-cursor-64x21-onscreen.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@kms_cursor_crc@pipe-a-cursor-64x21-onscreen.html

  * igt@runner@aborted:
    - {shard-tglb}:       NOTRUN -> ([FAIL][74], [FAIL][75], [FAIL][76], [FAIL][77], [FAIL][78], [FAIL][79], [FAIL][80], [FAIL][81], [FAIL][82], [FAIL][83], [FAIL][84], [FAIL][85], [FAIL][86]) ([fdo#103375] / [fdo#112184])
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@runner@aborted.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb4/igt@runner@aborted.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb7/igt@runner@aborted.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb6/igt@runner@aborted.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb1/igt@runner@aborted.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb7/igt@runner@aborted.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb3/igt@runner@aborted.html
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb1/igt@runner@aborted.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@runner@aborted.html
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb7/igt@runner@aborted.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@runner@aborted.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb5/igt@runner@aborted.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb8/igt@runner@aborted.html

  
Known issues
------------

  Here are the changes found in Patchwork_15078_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-apl:          [PASS][87] -> [DMESG-WARN][88] ([fdo#108566]) +1 similar issue
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl4/igt@gem_ctx_isolation@rcs0-s3.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl4/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_shared@q-in-order-vebox:
    - shard-glk:          [PASS][89] -> [INCOMPLETE][90] ([fdo#103359] / [k.org#198133]) +25 similar issues
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk8/igt@gem_ctx_shared@q-in-order-vebox.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk3/igt@gem_ctx_shared@q-in-order-vebox.html

  * igt@gem_ctx_shared@q-out-order-bsd1:
    - shard-apl:          [PASS][91] -> [INCOMPLETE][92] ([fdo#103927]) +25 similar issues
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl7/igt@gem_ctx_shared@q-out-order-bsd1.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl3/igt@gem_ctx_shared@q-out-order-bsd1.html

  * igt@gem_exec_balancer@bonded-imm:
    - shard-iclb:         [PASS][93] -> [INCOMPLETE][94] ([fdo#107713]) +9 similar issues
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb1/igt@gem_exec_balancer@bonded-imm.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb2/igt@gem_exec_balancer@bonded-imm.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [PASS][95] -> [SKIP][96] ([fdo#112080]) +2 similar issues
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb6/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [PASS][97] -> [SKIP][98] ([fdo#112146]) +1 similar issue
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb5/igt@gem_exec_schedule@in-order-bsd.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@reorder-wide-bsd:
    - shard-skl:          [PASS][99] -> [INCOMPLETE][100] ([fdo#108972])
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl4/igt@gem_exec_schedule@reorder-wide-bsd.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl8/igt@gem_exec_schedule@reorder-wide-bsd.html

  * igt@gem_exec_schedule@reorder-wide-vebox:
    - shard-kbl:          [PASS][101] -> [INCOMPLETE][102] ([fdo#103665]) +26 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl7/igt@gem_exec_schedule@reorder-wide-vebox.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl6/igt@gem_exec_schedule@reorder-wide-vebox.html

  * igt@gem_userptr_blits@sync-unmap:
    - shard-snb:          [PASS][103] -> [DMESG-WARN][104] ([fdo#111870])
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-snb4/igt@gem_userptr_blits@sync-unmap.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-snb4/igt@gem_userptr_blits@sync-unmap.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-hsw:          [PASS][105] -> [DMESG-WARN][106] ([fdo#111870])
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-hsw2/igt@gem_userptr_blits@sync-unmap-cycles.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-hsw7/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt:
    - shard-iclb:         [PASS][107] -> [FAIL][108] ([fdo#103167]) +1 similar issue
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-kbl:          [PASS][109] -> [DMESG-WARN][110] ([fdo#108566])
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
    - shard-skl:          [PASS][111] -> [FAIL][112] ([fdo#108145])
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl2/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [PASS][113] -> [SKIP][114] ([fdo#109441]) +1 similar issue
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb8/igt@kms_psr@psr2_primary_mmap_cpu.html

  * igt@prime_vgem@wait-bsd2:
    - shard-iclb:         [PASS][115] -> [SKIP][116] ([fdo#109276]) +8 similar issues
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb4/igt@prime_vgem@wait-bsd2.html
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb6/igt@prime_vgem@wait-bsd2.html

  
#### Possible fixes ####

  * igt@gem_exec_blt@dumb-buf:
    - shard-iclb:         [INCOMPLETE][117] ([fdo#107713]) -> [PASS][118] +1 similar issue
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb4/igt@gem_exec_blt@dumb-buf.html
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb3/igt@gem_exec_blt@dumb-buf.html

  * igt@gem_exec_schedule@preempt-self-bsd:
    - shard-iclb:         [SKIP][119] ([fdo#112146]) -> [PASS][120] +1 similar issue
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb2/igt@gem_exec_schedule@preempt-self-bsd.html
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb8/igt@gem_exec_schedule@preempt-self-bsd.html

  * igt@gem_persistent_relocs@forked-interruptible-fau

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [01/11] drm/i915: Split detaching and removing the vma
@ 2019-11-01  1:22   ` Patchwork
  0 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2019-11-01  1:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/11] drm/i915: Split detaching and removing the vma
URL   : https://patchwork.freedesktop.org/series/68788/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7226_full -> Patchwork_15078_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_15078_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_15078_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15078_full:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_ctx_param@invalid-ctx-get:
    - shard-skl:          [PASS][1] -> [DMESG-WARN][2] +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl9/igt@gem_ctx_param@invalid-ctx-get.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl5/igt@gem_ctx_param@invalid-ctx-get.html

  * igt@gem_exec_schedule@out-order-vebox:
    - shard-skl:          [PASS][3] -> [INCOMPLETE][4] +22 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl4/igt@gem_exec_schedule@out-order-vebox.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl1/igt@gem_exec_schedule@out-order-vebox.html

  * igt@gem_exec_schedule@preempt-hang-bsd:
    - shard-glk:          [PASS][5] -> [DMESG-WARN][6] +3 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk4/igt@gem_exec_schedule@preempt-hang-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@gem_exec_schedule@preempt-hang-bsd.html

  * igt@gem_exec_schedule@preempt-hang-bsd2:
    - shard-kbl:          [PASS][7] -> [DMESG-WARN][8] +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl7/igt@gem_exec_schedule@preempt-hang-bsd2.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@gem_exec_schedule@preempt-hang-bsd2.html

  * igt@gem_exec_schedule@preempt-hang-render:
    - shard-apl:          [PASS][9] -> [DMESG-WARN][10] +4 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl6/igt@gem_exec_schedule@preempt-hang-render.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl4/igt@gem_exec_schedule@preempt-hang-render.html
    - shard-iclb:         [PASS][11] -> [DMESG-WARN][12] +2 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb6/igt@gem_exec_schedule@preempt-hang-render.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb3/igt@gem_exec_schedule@preempt-hang-render.html

  * igt@gem_exec_schedule@preempt-hang-vebox:
    - shard-iclb:         NOTRUN -> [DMESG-WARN][13]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb3/igt@gem_exec_schedule@preempt-hang-vebox.html

  * igt@runner@aborted:
    - shard-hsw:          NOTRUN -> [FAIL][14]
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-hsw8/igt@runner@aborted.html
    - shard-kbl:          NOTRUN -> ([FAIL][15], [FAIL][16], [FAIL][17], [FAIL][18], [FAIL][19], [FAIL][20], [FAIL][21], [FAIL][22], [FAIL][23], [FAIL][24], [FAIL][25], [FAIL][26], [FAIL][27], [FAIL][28])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@runner@aborted.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl3/igt@runner@aborted.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl7/igt@runner@aborted.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl6/igt@runner@aborted.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl6/igt@runner@aborted.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl7/igt@runner@aborted.html
    - shard-apl:          NOTRUN -> ([FAIL][29], [FAIL][30], [FAIL][31], [FAIL][32], [FAIL][33])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl3/igt@runner@aborted.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl1/igt@runner@aborted.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl1/igt@runner@aborted.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl4/igt@runner@aborted.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl2/igt@runner@aborted.html
    - shard-glk:          NOTRUN -> ([FAIL][34], [FAIL][35], [FAIL][36], [FAIL][37], [FAIL][38], [FAIL][39], [FAIL][40], [FAIL][41], [FAIL][42], [FAIL][43]) ([k.org#202321])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@runner@aborted.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk1/igt@runner@aborted.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@runner@aborted.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk7/igt@runner@aborted.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk8/igt@runner@aborted.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk6/igt@runner@aborted.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk9/igt@runner@aborted.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk5/igt@runner@aborted.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk6/igt@runner@aborted.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk6/igt@runner@aborted.html

  
#### Warnings ####

  * igt@gem_exec_schedule@preempt-hang-bsd2:
    - shard-iclb:         [SKIP][44] ([fdo#109276]) -> [DMESG-WARN][45]
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb5/igt@gem_exec_schedule@preempt-hang-bsd2.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb4/igt@gem_exec_schedule@preempt-hang-bsd2.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * {igt@gem_ctx_exec@basic-nohangcheck}:
    - {shard-tglb}:       [PASS][46] -> [INCOMPLETE][47] +10 similar issues
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb1/igt@gem_ctx_exec@basic-nohangcheck.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb8/igt@gem_ctx_exec@basic-nohangcheck.html

  * igt@gem_ctx_param@vm:
    - {shard-tglb}:       [PASS][48] -> [DMESG-WARN][49] +3 similar issues
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb1/igt@gem_ctx_param@vm.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@gem_ctx_param@vm.html

  * {igt@gem_ctx_persistence@bcs0-hostile}:
    - shard-skl:          [PASS][50] -> [FAIL][51] +1 similar issue
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl8/igt@gem_ctx_persistence@bcs0-hostile.html
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl6/igt@gem_ctx_persistence@bcs0-hostile.html
    - shard-apl:          [PASS][52] -> [FAIL][53] +2 similar issues
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl1/igt@gem_ctx_persistence@bcs0-hostile.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl2/igt@gem_ctx_persistence@bcs0-hostile.html

  * {igt@gem_ctx_persistence@bcs0-hostile-preempt}:
    - shard-skl:          [PASS][54] -> [DMESG-WARN][55]
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl8/igt@gem_ctx_persistence@bcs0-hostile-preempt.html
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl10/igt@gem_ctx_persistence@bcs0-hostile-preempt.html

  * {igt@gem_ctx_persistence@hangcheck}:
    - shard-kbl:          [PASS][56] -> [FAIL][57] +3 similar issues
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl3/igt@gem_ctx_persistence@hangcheck.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl1/igt@gem_ctx_persistence@hangcheck.html
    - shard-iclb:         [PASS][58] -> [FAIL][59] +5 similar issues
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb8/igt@gem_ctx_persistence@hangcheck.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb4/igt@gem_ctx_persistence@hangcheck.html

  * {igt@gem_ctx_persistence@rcs0-mixed-process}:
    - shard-skl:          [PASS][60] -> [INCOMPLETE][61] +1 similar issue
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl2/igt@gem_ctx_persistence@rcs0-mixed-process.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl1/igt@gem_ctx_persistence@rcs0-mixed-process.html

  * {igt@gem_ctx_persistence@vcs0-hostile-preempt}:
    - shard-glk:          [PASS][62] -> [DMESG-WARN][63]
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk1/igt@gem_ctx_persistence@vcs0-hostile-preempt.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk8/igt@gem_ctx_persistence@vcs0-hostile-preempt.html

  * {igt@gem_ctx_persistence@vcs1-cleanup}:
    - {shard-tglb}:       [PASS][64] -> [FAIL][65] +2 similar issues
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb7/igt@gem_ctx_persistence@vcs1-cleanup.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb8/igt@gem_ctx_persistence@vcs1-cleanup.html

  * {igt@gem_ctx_persistence@vcs1-hostile-preempt}:
    - shard-kbl:          [PASS][66] -> [DMESG-WARN][67] +1 similar issue
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl7/igt@gem_ctx_persistence@vcs1-hostile-preempt.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl2/igt@gem_ctx_persistence@vcs1-hostile-preempt.html
    - shard-iclb:         [SKIP][68] ([fdo#109276] / [fdo#112080]) -> [DMESG-WARN][69]
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb6/igt@gem_ctx_persistence@vcs1-hostile-preempt.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb1/igt@gem_ctx_persistence@vcs1-hostile-preempt.html

  * {igt@gem_ctx_persistence@vecs0-cleanup}:
    - shard-glk:          [PASS][70] -> [FAIL][71] +1 similar issue
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk4/igt@gem_ctx_persistence@vecs0-cleanup.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk3/igt@gem_ctx_persistence@vecs0-cleanup.html

  * igt@kms_cursor_crc@pipe-a-cursor-64x21-onscreen:
    - {shard-tglb}:       [FAIL][72] ([fdo#111703]) -> [INCOMPLETE][73]
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-tglb3/igt@kms_cursor_crc@pipe-a-cursor-64x21-onscreen.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@kms_cursor_crc@pipe-a-cursor-64x21-onscreen.html

  * igt@runner@aborted:
    - {shard-tglb}:       NOTRUN -> ([FAIL][74], [FAIL][75], [FAIL][76], [FAIL][77], [FAIL][78], [FAIL][79], [FAIL][80], [FAIL][81], [FAIL][82], [FAIL][83], [FAIL][84], [FAIL][85], [FAIL][86]) ([fdo#103375] / [fdo#112184])
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@runner@aborted.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb4/igt@runner@aborted.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb7/igt@runner@aborted.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb6/igt@runner@aborted.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb1/igt@runner@aborted.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb7/igt@runner@aborted.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb3/igt@runner@aborted.html
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb1/igt@runner@aborted.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@runner@aborted.html
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb7/igt@runner@aborted.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb2/igt@runner@aborted.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb5/igt@runner@aborted.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-tglb8/igt@runner@aborted.html

  
Known issues
------------

  Here are the changes found in Patchwork_15078_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-apl:          [PASS][87] -> [DMESG-WARN][88] ([fdo#108566]) +1 similar issue
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl4/igt@gem_ctx_isolation@rcs0-s3.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl4/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_shared@q-in-order-vebox:
    - shard-glk:          [PASS][89] -> [INCOMPLETE][90] ([fdo#103359] / [k.org#198133]) +25 similar issues
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-glk8/igt@gem_ctx_shared@q-in-order-vebox.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-glk3/igt@gem_ctx_shared@q-in-order-vebox.html

  * igt@gem_ctx_shared@q-out-order-bsd1:
    - shard-apl:          [PASS][91] -> [INCOMPLETE][92] ([fdo#103927]) +25 similar issues
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-apl7/igt@gem_ctx_shared@q-out-order-bsd1.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-apl3/igt@gem_ctx_shared@q-out-order-bsd1.html

  * igt@gem_exec_balancer@bonded-imm:
    - shard-iclb:         [PASS][93] -> [INCOMPLETE][94] ([fdo#107713]) +9 similar issues
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb1/igt@gem_exec_balancer@bonded-imm.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb2/igt@gem_exec_balancer@bonded-imm.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [PASS][95] -> [SKIP][96] ([fdo#112080]) +2 similar issues
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb6/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [PASS][97] -> [SKIP][98] ([fdo#112146]) +1 similar issue
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb5/igt@gem_exec_schedule@in-order-bsd.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@reorder-wide-bsd:
    - shard-skl:          [PASS][99] -> [INCOMPLETE][100] ([fdo#108972])
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl4/igt@gem_exec_schedule@reorder-wide-bsd.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl8/igt@gem_exec_schedule@reorder-wide-bsd.html

  * igt@gem_exec_schedule@reorder-wide-vebox:
    - shard-kbl:          [PASS][101] -> [INCOMPLETE][102] ([fdo#103665]) +26 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl7/igt@gem_exec_schedule@reorder-wide-vebox.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl6/igt@gem_exec_schedule@reorder-wide-vebox.html

  * igt@gem_userptr_blits@sync-unmap:
    - shard-snb:          [PASS][103] -> [DMESG-WARN][104] ([fdo#111870])
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-snb4/igt@gem_userptr_blits@sync-unmap.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-snb4/igt@gem_userptr_blits@sync-unmap.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-hsw:          [PASS][105] -> [DMESG-WARN][106] ([fdo#111870])
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-hsw2/igt@gem_userptr_blits@sync-unmap-cycles.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-hsw7/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt:
    - shard-iclb:         [PASS][107] -> [FAIL][108] ([fdo#103167]) +1 similar issue
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-blt.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-kbl:          [PASS][109] -> [DMESG-WARN][110] ([fdo#108566])
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-kbl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-kbl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
    - shard-skl:          [PASS][111] -> [FAIL][112] ([fdo#108145])
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-skl2/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [PASS][113] -> [SKIP][114] ([fdo#109441]) +1 similar issue
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb8/igt@kms_psr@psr2_primary_mmap_cpu.html

  * igt@prime_vgem@wait-bsd2:
    - shard-iclb:         [PASS][115] -> [SKIP][116] ([fdo#109276]) +8 similar issues
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb4/igt@prime_vgem@wait-bsd2.html
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb6/igt@prime_vgem@wait-bsd2.html

  
#### Possible fixes ####

  * igt@gem_exec_blt@dumb-buf:
    - shard-iclb:         [INCOMPLETE][117] ([fdo#107713]) -> [PASS][118] +1 similar issue
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb4/igt@gem_exec_blt@dumb-buf.html
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb3/igt@gem_exec_blt@dumb-buf.html

  * igt@gem_exec_schedule@preempt-self-bsd:
    - shard-iclb:         [SKIP][119] ([fdo#112146]) -> [PASS][120] +1 similar issue
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7226/shard-iclb2/igt@gem_exec_schedule@preempt-self-bsd.html
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/shard-iclb8/igt@gem_exec_schedule@preempt-self-bsd.html

  * igt@gem_persistent_relocs@forked-interruptible-fau

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15078/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2019-11-01  1:22 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-30 19:21 [PATCH 01/11] drm/i915: Split detaching and removing the vma Chris Wilson
2019-10-30 19:21 ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 02/11] drm/i915/gt: Call intel_gt_sanitize() directly Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 03/11] drm/i915/gem: Leave reloading kernel context on resume to GT Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 04/11] drm/i915/gt: Move user_forcewake application " Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 05/11] drm/i915: Defer rc6 shutdown to suspend_late Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 06/11] drm/i915: Drop GEM context as a direct link from i915_request Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 07/11] drm/i915: Push the use-semaphore marker onto the intel_context Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 08/11] drm/i915: Remove i915->kernel_context Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 09/11] drm/i915/gt: Defer engine registration until fully initialised Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 10/11] drm/i915/gt: Pull timeline initialise to intel_gt_init_early Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 19:21 ` [PATCH 11/11] drm/i915: Move i915_gem_init_contexts() earlier Chris Wilson
2019-10-30 19:21   ` [Intel-gfx] " Chris Wilson
2019-10-30 21:19 ` ✓ Fi.CI.BAT: success for series starting with [01/11] drm/i915: Split detaching and removing the vma Patchwork
2019-10-30 21:19   ` [Intel-gfx] " Patchwork
2019-10-31 13:49 ` [PATCH 01/11] " Matthew Auld
2019-10-31 13:49   ` [Intel-gfx] " Matthew Auld
2019-11-01  1:22 ` ✗ Fi.CI.IGT: failure for series starting with [01/11] " Patchwork
2019-11-01  1:22   ` [Intel-gfx] " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.